diff --git a/.github/workflows/README.md b/.github/workflows/README.md index edc1ef3272..d342f5e75c 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -1,37 +1,49 @@ # GitHub Actions CI -## Actions - - `custom-actions/lint-packager`: - Lints Shaka Packager. You must pass `fetch-depth: 2` to `actions/checkout` - in order to provide enough history for the linter to tell which files have - changed. - - `custom-actions/build-packager`: - Builds Shaka Packager. Leaves build artifacts in the "artifacts" folder. - Requires OS-dependent and build-dependent inputs. - - `custom-actions/test-packager`: - Tests Shaka Packager. Requires OS-dependent and build-dependent inputs. - - `custom-actions/build-docs`: - Builds Shaka Packager docs. +## Reusable workflows + - `build.yaml`: + Build and test all combinations of OS & build settings. Also builds docs on + Linux. + + - `build-docs.yaml`: + Build Packager docs. Runs only on Linux. + + - `docker-image.yaml`: + Build the official Docker image. + + - `lint.yaml`: + Lint Shaka Packager. + + - `test-linux-distros.yaml`: + Test the build on all Linux distros via docker. + +## Composed workflows + - On PR (`pr.yaml`), invoke: + - `lint.yaml` + - `build.yaml` + - `build-docs.yaml` + - `docker-image.yaml` + - `test-linux-distros.yaml` + + - On release tag (`github-release.yaml`): + - Create a draft release + - Invoke: + - `lint.yaml` + - `build.yaml` + - `test-linux-distros.yaml` + - Publish the release with binaries from `build.yaml` attached -## Workflows - - On PR: - - `build_and_test.yaml`: - Builds and tests all combinations of OS & build settings. Also builds - docs. - - On release tag: - - `github_release.yaml`: - Creates a draft release on GitHub, builds and tests all combinations of OS - & build settings, builds docs on all OSes, attaches static release binaries - to the draft release, then fully publishes the release. - On release published: - - `docker_hub_release.yaml`: - Builds a Docker image to match the published GitHub release, then pushes it - to Docker Hub. - - `npm_release.yaml`: - Builds an NPM package to match the published GitHub release, then pushes it - to NPM. - - `update_docs.yaml`: - Builds updated docs and pushes them to the gh-pages branch. + - `docker-hub-release.yaml`, publishes the official Docker image + - `npm-release.yaml`, publishes the official NPM package + - `update-docs.yaml`: + - Invoke `build-docs.yaml` + - Push the output to the `gh-pages` branch + +## Common workflows from shaka-project + - `sync-labels.yaml` + - `update-issues.yaml` + - `validate-pr-title.yaml` ## Required Repo Secrets - `DOCKERHUB_CI_USERNAME`: The username of the Docker Hub CI account @@ -47,3 +59,8 @@ - `NPM_PACKAGE_NAME`: Not a true "secret", but stored here to avoid someone pushing bogus packages to NPM during CI testing from a fork - In a fork, set to a private name which differs from the production one + +## Optional Repo Secrets + - `ENABLE_DEBUG`: Set to non-empty to enable debugging via SSH after a failure + - `ENABLE_SELF_HOSTED`: Set to non-empty to enable self-hosted runners in the + build matrix diff --git a/.github/workflows/build-docs.yaml b/.github/workflows/build-docs.yaml new file mode 100644 index 0000000000..2b7238486f --- /dev/null +++ b/.github/workflows/build-docs.yaml @@ -0,0 +1,63 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A reusable workflow to build Packager docs. Leaves docs output in the +# "gh-pages" folder. Only runs in Linux due to the dependency on doxygen, +# which we install with apt. +name: Build Docs + +# Runs when called from another workflow. +on: + workflow_call: + inputs: + ref: + required: true + type: string + +jobs: + docs: + name: Build docs + runs-on: ubuntu-latest + + steps: + - name: Install dependencies + run: | + sudo apt install -y doxygen + python3 -m pip install \ + sphinxcontrib.plantuml \ + recommonmark \ + cloud_sptheme \ + breathe + + - name: Checkout code + uses: actions/checkout@v2 + with: + ref: ${{ inputs.ref }} + + - name: Generate docs + run: | + mkdir -p gh-pages + mkdir -p build + + # Doxygen must run before Sphinx. Sphinx will refer to + # Doxygen-generated output when it builds its own docs. + doxygen docs/Doxyfile + + # Now build the Sphinx-based docs. + make -C docs/ html + + # Now move the generated outputs. + cp -a build/sphinx/html gh-pages/html + cp -a build/doxygen/html gh-pages/docs + cp docs/index.html gh-pages/index.html diff --git a/.github/workflows/build-matrix.json b/.github/workflows/build-matrix.json new file mode 100644 index 0000000000..3c792ada79 --- /dev/null +++ b/.github/workflows/build-matrix.json @@ -0,0 +1,33 @@ +{ + "comment1": "runners hosted by GitHub, always enabled", + "hosted": [ + { + "os": "ubuntu-latest", + "os_name": "linux", + "target_arch": "x64", + "exe_ext": "" + }, + { + "os": "macos-latest", + "os_name": "osx", + "target_arch": "x64", + "exe_ext": "" + }, + { + "os": "windows-latest", + "os_name": "win", + "target_arch": "x64", + "exe_ext": ".exe" + } + ], + + "comment2": "runners hosted by the owner, enabled by the ENABLE_SELF_HOSTED secret being set on the repo", + "selfHosted": [ + { + "os": "self-hosted-linux-arm64", + "os_name": "linux", + "target_arch": "arm64", + "exe_ext": "" + } + ] +} diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 0000000000..7231f1f77d --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,209 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A reusable workflow to build and test Packager on every supported OS and +# architecture. +name: Build + +# Runs when called from another workflow. +on: + workflow_call: + inputs: + ref: + required: true + type: string + secrets: + # The GITHUB_TOKEN name is reserved, but not passed through implicitly. + # So we call our secret parameter simply TOKEN. + TOKEN: + required: false + + # These below are not actual secrets, but secrets are the only place to + # keep repo-specific configs that make this project friendlier to forks + # and easier to debug. + + # If non-empty, start a debug SSH server on failures. + ENABLE_DEBUG: + required: false + # If non-empty, enable self-hosted runners in the build matrix. + ENABLE_SELF_HOSTED: + required: false + +# By default, run all commands in a bash shell. On Windows, the default would +# otherwise be powershell. +defaults: + run: + shell: bash + +jobs: + # Configure the build matrix based on inputs. The list of objects in the + # build matrix contents can't be changed by conditionals, but it can be + # computed by another job and deserialized. This uses + # secrets.ENABLE_SELF_HOSTED to determine the build matrix, based on the + # metadata in build-matrix.json. + matrix_config: + runs-on: ubuntu-latest + outputs: + INCLUDE: ${{ steps.configure.outputs.INCLUDE }} + OS: ${{ steps.configure.outputs.OS }} + ENABLE_DEBUG: ${{ steps.configure.outputs.ENABLE_DEBUG }} + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ inputs.ref }} + + - name: Configure Build Matrix + id: configure + shell: node {0} + run: | + const enableDebug = "${{ secrets.ENABLE_DEBUG }}" != ''; + const enableSelfHosted = "${{ secrets.ENABLE_SELF_HOSTED }}" != ''; + + // Use ENABLE_SELF_HOSTED to decide what the build matrix below + // should include. + const {hosted, selfHosted} = require("${{ github.workspace }}/.github/workflows/build-matrix.json"); + const include = enableSelfHosted ? hosted.concat(selfHosted) : hosted; + const os = include.map((config) => config.os); + + // Output JSON objects consumed by the build matrix below. + console.log(`::set-output name=INCLUDE::${ JSON.stringify(include) }`); + console.log(`::set-output name=OS::${ JSON.stringify(os) }`); + + // Output the debug flag directly. + console.log(`::set-output name=ENABLE_DEBUG::${ enableDebug }`); + + // Log the outputs, for the sake of debugging this script. + console.log({enableDebug, include, os}); + + build: + needs: matrix_config + strategy: + fail-fast: false + matrix: + include: ${{ fromJSON(needs.matrix_config.outputs.INCLUDE) }} + os: ${{ fromJSON(needs.matrix_config.outputs.OS) }} + build_type: ["Debug", "Release"] + lib_type: ["static", "shared"] + + name: ${{ matrix.os_name }} ${{ matrix.target_arch }} ${{ matrix.build_type }} ${{ matrix.lib_type }} + runs-on: ${{ matrix.os }} + + steps: + - name: Configure git to preserve line endings + # Otherwise, tests fail on Windows because "golden" test outputs will not + # have the correct line endings. + run: git config --global core.autocrlf false + + - name: Checkout code + uses: actions/checkout@v2 + with: + ref: ${{ inputs.ref }} + submodules: true + + - name: Install Linux deps + if: runner.os == 'Linux' + run: sudo apt install -y libc-ares-dev + + - name: Generate build files + run: | + mkdir -p build/ + + if [[ "${{ matrix.lib_type }}" == "shared" ]]; then + LIBPACKAGER_SHARED="ON" + else + LIBPACKAGER_SHARED="OFF" + fi + + cmake \ + -DCMAKE_BUILD_TYPE="${{ matrix.build_type }}" \ + -DLIBPACKAGER_SHARED="$LIBPACKAGER_SHARED" \ + -S . \ + -B build/ + + - name: Build + # This is a universal build command, which will call make on Linux and + # Visual Studio on Windows. Note that the VS generator is what cmake + # calls a "multi-configuration" generator, and so the desired build + # type must be specified for Windows. + run: cmake --build build/ --config "${{ matrix.build_type }}" + + - name: Test + run: cd build; ctest -C "${{ matrix.build_type }}" -V + + # TODO(joeyparrish): Prepare artifacts when build system is complete again +# - name: Prepare artifacts (static release only) +# run: | +# BUILD_CONFIG="${{ matrix.build_type }}-${{ matrix.lib_type }}" +# if [[ "$BUILD_CONFIG" != "Release-static" ]]; then +# echo "Skipping artifacts for $BUILD_CONFIG." +# exit 0 +# fi +# if [[ "${{ runner.os }}" == "Linux" ]]; then +# echo "::group::Check for static executables" +# ( +# cd build/Release +# # Prove that we built static executables on Linux. First, check that +# # the executables exist, and fail if they do not. Then check "ldd", +# # which will fail if the executable is not dynamically linked. If +# # "ldd" succeeds, we fail the workflow. Finally, we call "true" so +# # that the last executed statement will be a success, and the step +# # won't be failed if we get that far. +# ls packager mpd_generator >/dev/null || exit 1 +# ldd packager 2>&1 && exit 1 +# ldd mpd_generator 2>&1 && exit 1 +# true +# ) +# echo "::endgroup::" +# fi +# echo "::group::Prepare artifacts folder" +# mkdir artifacts +# ARTIFACTS="$GITHUB_WORKSPACE/artifacts" +# cd build/Release +# echo "::endgroup::" +# echo "::group::Strip executables" +# strip packager${{ matrix.exe_ext }} +# strip mpd_generator${{ matrix.exe_ext }} +# echo "::endgroup::" +# SUFFIX="-${{ matrix.os_name }}-${{ matrix.target_arch }}" +# EXE_SUFFIX="$SUFFIX${{ matrix.exe_ext}}" +# echo "::group::Copy packager" +# cp packager${{ matrix.exe_ext }} $ARTIFACTS/packager$EXE_SUFFIX +# echo "::endgroup::" +# echo "::group::Copy mpd_generator" +# cp mpd_generator${{ matrix.exe_ext }} $ARTIFACTS/mpd_generator$EXE_SUFFIX +# echo "::endgroup::" +# # The pssh-box bundle is OS and architecture independent. So only do +# # it on this one OS and architecture, and give it a more generic +# # filename. +# if [[ '${{ matrix.os_name }}' == 'linux' && '${{ matrix.target_arch }}' == 'x64' ]]; then +# echo "::group::Tar pssh-box" +# tar -czf $ARTIFACTS/pssh-box.py.tar.gz pyproto pssh-box.py +# echo "::endgroup::" +# fi + + # TODO(joeyparrish): Attach artifacts when build system is complete again +# - name: Attach artifacts to release +# if: matrix.build_type == 'Release' && matrix.lib_type == 'static' +# uses: dwenegar/upload-release-assets@v1 +# env: +# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +# with: +# release_id: ${{ needs.draft_release.outputs.release_id }} +# assets_path: artifacts + + - name: Debug + uses: mxschmitt/action-tmate@v3.6 + with: + limit-access-to-actor: true + if: failure() && needs.matrix_config.outputs.ENABLE_DEBUG != '' diff --git a/.github/workflows/build_and_test.yaml b/.github/workflows/build_and_test.yaml deleted file mode 100644 index 208ba0b355..0000000000 --- a/.github/workflows/build_and_test.yaml +++ /dev/null @@ -1,137 +0,0 @@ -name: Build and Test PR - -# Builds and tests on all combinations of OS, build type, and library type. -# Also builds the docs. -# -# Runs when a pull request is opened or updated. -# -# Can also be run manually for debugging purposes. -on: - pull_request: - types: [opened, synchronize, reopened] - workflow_dispatch: - inputs: - ref: - description: "The ref to build and test." - required: False - -jobs: - lint: - name: Lint - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ github.event.inputs.ref || github.ref }} - # This makes the merge base available for the C++ linter, so that it - # can tell which files have changed. - fetch-depth: 2 - - - name: Lint - uses: ./src/.github/workflows/custom-actions/lint-packager - - build_and_test: - # Doesn't really "need" it, but let's not waste time on an expensive matrix - # build step just to cancel it because of a linter error. - needs: lint - strategy: - fail-fast: false - matrix: - # NOTE: macos-10.15 is required for now, to work around issues with our - # build system. See related comments in - # .github/workflows/custom-actions/build-packager/action.yaml - os: ["ubuntu-latest", "macos-10.15", "windows-latest", "self-hosted-linux-arm64"] - build_type: ["Debug", "Release"] - lib_type: ["static", "shared"] - include: - - os: ubuntu-latest - os_name: linux - target_arch: x64 - exe_ext: "" - build_type_suffix: "" - - os: macos-10.15 - os_name: osx - target_arch: x64 - exe_ext: "" - build_type_suffix: "" - - os: windows-latest - os_name: win - target_arch: x64 - exe_ext: ".exe" - # 64-bit outputs on Windows go to a different folder name. - build_type_suffix: "_x64" - - os: self-hosted-linux-arm64 - os_name: linux - target_arch: arm64 - exe_ext: "" - build_type_suffix: "" - - name: Build and test ${{ matrix.os_name }} ${{ matrix.target_arch }} ${{ matrix.build_type }} ${{ matrix.lib_type }} - runs-on: ${{ matrix.os }} - - steps: - - name: Configure git to preserve line endings - # Otherwise, tests fail on Windows because "golden" test outputs will not - # have the correct line endings. - run: git config --global core.autocrlf false - - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ github.event.inputs.ref || github.ref }} - - - name: Build docs (Linux only) - if: runner.os == 'Linux' - uses: ./src/.github/workflows/custom-actions/build-docs - - - name: Build Packager - uses: ./src/.github/workflows/custom-actions/build-packager - with: - os_name: ${{ matrix.os_name }} - target_arch: ${{ matrix.target_arch }} - lib_type: ${{ matrix.lib_type }} - build_type: ${{ matrix.build_type }} - build_type_suffix: ${{ matrix.build_type_suffix }} - exe_ext: ${{ matrix.exe_ext }} - - - name: Test Packager - uses: ./src/.github/workflows/custom-actions/test-packager - with: - lib_type: ${{ matrix.lib_type }} - build_type: ${{ matrix.build_type }} - build_type_suffix: ${{ matrix.build_type_suffix }} - exe_ext: ${{ matrix.exe_ext }} - - test_supported_linux_distros: - # Doesn't really "need" it, but let's not waste time on a series of docker - # builds just to cancel it because of a linter error. - needs: lint - name: Test builds on all supported Linux distros (using docker) - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ github.event.inputs.ref || github.ref }} - - - name: Install depot tools - shell: bash - run: | - git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git - touch depot_tools/.disable_auto_update - echo "${GITHUB_WORKSPACE}/depot_tools" >> $GITHUB_PATH - - - name: Setup gclient - shell: bash - run: | - gclient config https://github.com/shaka-project/shaka-packager.git --name=src --unmanaged - # NOTE: the docker tests will do gclient runhooks, so skip hooks here. - gclient sync --nohooks - - - name: Test all distros - shell: bash - run: ./src/packager/testing/dockers/test_dockers.sh diff --git a/.github/workflows/custom-actions/build-docs/action.yaml b/.github/workflows/custom-actions/build-docs/action.yaml deleted file mode 100644 index 3b68b588eb..0000000000 --- a/.github/workflows/custom-actions/build-docs/action.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: Build Shaka Packager Docs - -description: | - A reusable action to build Shaka Packager docs. - Leaves docs output in the "gh-pages" folder. - Only runs in Linux due to the dependency on doxygen, which we install with - apt. - -runs: - using: composite - steps: - - name: Install dependencies - shell: bash - run: | - echo "::group::Install dependencies" - sudo apt install -y doxygen - python3 -m pip install \ - sphinxcontrib.plantuml \ - recommonmark \ - cloud_sptheme \ - breathe - echo "::endgroup::" - - - name: Generate docs - shell: bash - run: | - echo "::group::Prepare output folders" - mkdir -p gh-pages - cd src - mkdir -p out - echo "::endgroup::" - echo "::group::Build Doxygen docs" - # Doxygen must run before Sphinx. Sphinx will refer to - # Doxygen-generated output when it builds its own docs. - doxygen docs/Doxyfile - echo "::endgroup::" - echo "::group::Build Sphinx docs" - # Now build the Sphinx-based docs. - make -C docs/ html - echo "::endgroup::" - echo "::group::Move ouputs" - # Now move the generated outputs. - cp -a out/sphinx/html ../gh-pages/html - cp -a out/doxygen/html ../gh-pages/docs - cp docs/index.html ../gh-pages/index.html - echo "::endgroup::" diff --git a/.github/workflows/custom-actions/build-packager/action.yaml b/.github/workflows/custom-actions/build-packager/action.yaml deleted file mode 100644 index 0556ae86b1..0000000000 --- a/.github/workflows/custom-actions/build-packager/action.yaml +++ /dev/null @@ -1,182 +0,0 @@ -name: Build Shaka Packager - -description: | - A reusable action to build Shaka Packager. - Leaves build artifacts in the "artifacts" folder. - -inputs: - os_name: - description: The name of the OS (one word). Appended to artifact filenames. - required: true - target_arch: - description: The CPU architecture to target. We support x64, arm64. - required: true - lib_type: - description: A library type, either "static" or "shared". - required: true - build_type: - description: A build type, either "Debug" or "Release". - required: true - build_type_suffix: - description: A suffix to append to the build type in the output path. - required: false - default: "" - exe_ext: - description: The extension on executable files. - required: false - default: "" - -runs: - using: composite - steps: - - name: Select Xcode 10.3 and SDK 10.14 (macOS only) - # NOTE: macOS 11 doesn't work with our (old) version of Chromium build, - # and the latest Chromium build doesn't work with Packager's build - # system. To work around this, we need an older SDK version, and to - # get that, we need an older XCode version. XCode 10.3 has SDK 10.14, - # which works. - shell: bash - run: | - if [[ "${{ runner.os }}" == "macOS" ]]; then - echo "::group::Select Xcode 10.3" - sudo xcode-select -s /Applications/Xcode_10.3.app/Contents/Developer - echo "::endgroup::" - fi - - - name: Install c-ares (Linux only) - shell: bash - run: | - if [[ "${{ runner.os }}" == "Linux" ]]; then - echo "::group::Install c-ares" - sudo apt install -y libc-ares-dev - echo "::endgroup::" - fi - - - name: Force Python 2 to support ancient build system (non-Linux only) - if: runner.os != 'Linux' - uses: actions/setup-python@v2 - with: - python-version: '2.x' - - - name: Force Python 2 to support ancient build system (Linux only) - if: runner.os == 'Linux' - shell: bash - run: | - echo "::group::Install python2" - sudo apt install -y python2 - sudo ln -sf python2 /usr/bin/python - echo "::endgroup::" - - - name: Install depot tools - shell: bash - run: | - echo "::group::Install depot_tools" - git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git - touch depot_tools/.disable_auto_update - echo "${GITHUB_WORKSPACE}/depot_tools" >> $GITHUB_PATH - # Bypass VPYTHON included by depot_tools. Prefer the system installation. - echo "VPYTHON_BYPASS=manually managed python not supported by chrome operations" >> $GITHUB_ENV - echo "::endgroup::" - - - name: Build ninja (arm only) - shell: bash - run: | - # NOTE: There is no prebuilt copy of ninja for the "aarch64" - # architecture (as reported by "uname -p" on arm64). So we must build - # our own, as recommended by depot_tools when it fails to fetch a - # prebuilt copy for us. - # NOTE 2: It turns out that $GITHUB_PATH operates like a stack. - # Appending to that file places the new path at the beginning of $PATH - # for the next step, so this step must come _after_ installing - # depot_tools. - if [[ "${{ inputs.target_arch }}" == "arm64" ]]; then - echo "::group::Build ninja (arm-only)" - git clone https://github.com/ninja-build/ninja.git -b v1.8.2 - # The --bootstrap option compiles ninja as well as configures it. - # This is the exact command prescribed by depot_tools when it fails to - # fetch a ninja binary for your platform. - (cd ninja && ./configure.py --bootstrap) - echo "${GITHUB_WORKSPACE}/ninja" >> $GITHUB_PATH - echo "::endgroup::" - fi - - - name: Configure gclient - shell: bash - run: | - echo "::group::Configure gclient" - gclient config https://github.com/shaka-project/shaka-packager.git --name=src --unmanaged - echo "::endgroup::" - - - name: Sync gclient - env: - GYP_DEFINES: "target_arch=${{ inputs.target_arch }} libpackager_type=${{ inputs.lib_type }}_library" - GYP_MSVS_VERSION: "2019" - GYP_MSVS_OVERRIDE_PATH: "C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise" - shell: bash - run: | - echo "::group::Sync gclient" - BUILD_CONFIG="${{ inputs.build_type }}-${{ inputs.lib_type }}" - if [[ "$BUILD_CONFIG" == "Release-static" && "${{ runner.os }}" == "Linux" ]]; then - # For static release builds, set these two additional flags for fully static binaries. - export GYP_DEFINES="$GYP_DEFINES disable_fatal_linker_warnings=1 static_link_binaries=1" - fi - gclient sync - echo "::endgroup::" - - - name: Build - shell: bash - run: | - echo "::group::Build" - ninja -C src/out/${{ inputs.build_type }}${{ inputs.build_type_suffix }} - echo "::endgroup::" - - - name: Prepare artifacts (static release only) - shell: bash - run: | - BUILD_CONFIG="${{ inputs.build_type }}-${{ inputs.lib_type }}" - if [[ "$BUILD_CONFIG" != "Release-static" ]]; then - echo "Skipping artifacts for $BUILD_CONFIG." - exit 0 - fi - if [[ "${{ runner.os }}" == "Linux" ]]; then - echo "::group::Check for static executables" - ( - cd src/out/Release${{ inputs.build_type_suffix }} - # Prove that we built static executables on Linux. First, check that - # the executables exist, and fail if they do not. Then check "ldd", - # which will fail if the executable is not dynamically linked. If - # "ldd" succeeds, we fail the workflow. Finally, we call "true" so - # that the last executed statement will be a success, and the step - # won't be failed if we get that far. - ls packager mpd_generator >/dev/null || exit 1 - ldd packager 2>&1 && exit 1 - ldd mpd_generator 2>&1 && exit 1 - true - ) - echo "::endgroup::" - fi - echo "::group::Prepare artifacts folder" - mkdir artifacts - ARTIFACTS="$GITHUB_WORKSPACE/artifacts" - cd src/out/Release${{ inputs.build_type_suffix }} - echo "::endgroup::" - echo "::group::Strip executables" - strip packager${{ inputs.exe_ext }} - strip mpd_generator${{ inputs.exe_ext }} - echo "::endgroup::" - SUFFIX="-${{ inputs.os_name }}-${{ inputs.target_arch }}" - EXE_SUFFIX="$SUFFIX${{ inputs.exe_ext}}" - echo "::group::Copy packager" - cp packager${{ inputs.exe_ext }} $ARTIFACTS/packager$EXE_SUFFIX - echo "::endgroup::" - echo "::group::Copy mpd_generator" - cp mpd_generator${{ inputs.exe_ext }} $ARTIFACTS/mpd_generator$EXE_SUFFIX - echo "::endgroup::" - # The pssh-box bundle is OS and architecture independent. So only do - # it on this one OS and architecture, and give it a more generic - # filename. - if [[ '${{ inputs.os_name }}' == 'linux' && '${{ inputs.target_arch }}' == 'x64' ]]; then - echo "::group::Tar pssh-box" - tar -czf $ARTIFACTS/pssh-box.py.tar.gz pyproto pssh-box.py - echo "::endgroup::" - fi diff --git a/.github/workflows/custom-actions/lint-packager/action.yaml b/.github/workflows/custom-actions/lint-packager/action.yaml deleted file mode 100644 index a2969da8b6..0000000000 --- a/.github/workflows/custom-actions/lint-packager/action.yaml +++ /dev/null @@ -1,36 +0,0 @@ -name: Lint Shaka Packager - -description: | - A reusable action to lint Shaka Packager source. - When checking out source, you must use 'fetch-depth: 2' in actions/checkout, - or else the linter won't have another revision to compare to. - -runs: - using: composite - steps: - - name: Lint - shell: bash - run: | - cd src/ - echo "::group::Installing git-clang-format" - wget https://raw.githubusercontent.com/llvm-mirror/clang/master/tools/clang-format/git-clang-format - sudo install -m 755 git-clang-format /usr/local/bin/git-clang-format - rm git-clang-format - echo "::endgroup::" - echo "::group::Installing pylint" - python3 -m pip install --upgrade pylint==2.8.3 - echo "::endgroup::" - echo "::group::Check clang-format for C++ sources" - # NOTE: --binary forces use of global clang-format (which works) instead - # of depot_tools clang-format (which doesn't). - # NOTE: Must use base.sha instead of base.ref, since we don't have - # access to the branch name that base.ref would give us. - # NOTE: Must also use fetch-depth: 2 in actions/checkout to have access - # to the base ref for comparison. - packager/tools/git/check_formatting.py \ - --binary /usr/bin/clang-format \ - ${{ github.event.pull_request.base.sha || 'HEAD^' }} - echo "::endgroup::" - echo "::group::Check pylint for Python sources" - packager/tools/git/check_pylint.py - echo "::endgroup::" diff --git a/.github/workflows/custom-actions/test-packager/action.yaml b/.github/workflows/custom-actions/test-packager/action.yaml deleted file mode 100644 index c8a7407377..0000000000 --- a/.github/workflows/custom-actions/test-packager/action.yaml +++ /dev/null @@ -1,45 +0,0 @@ -name: Test Shaka Packager - -description: | - A reusable action to test Shaka Packager. - Should be run after building Shaka Packager. - -inputs: - lib_type: - description: A library type, either "static" or "shared". - required: true - build_type: - description: A build type, either "Debug" or "Release". - required: true - build_type_suffix: - description: A suffix to append to the build type in the output path. - required: false - default: "" - exe_ext: - description: The extension on executable files. - required: false - default: "" - -runs: - using: composite - steps: - - name: Test - shell: bash - run: | - echo "::group::Prepare test environment" - # NOTE: Some of these tests must be run from the "src" directory. - cd src/ - OUTDIR=out/${{ inputs.build_type }}${{ inputs.build_type_suffix }} - if [[ '${{ runner.os }}' == 'macOS' ]]; then - export DYLD_FALLBACK_LIBRARY_PATH=$OUTDIR - fi - echo "::endgroup::" - for i in $OUTDIR/*test${{ inputs.exe_ext }}; do - echo "::group::Test $i" - "$i" || exit 1 - echo "::endgroup::" - done - echo "::group::Test $OUTDIR/packager_test.py" - python3 $OUTDIR/packager_test.py \ - -v --libpackager_type=${{ inputs.lib_type }}_library - echo "::endgroup::" diff --git a/.github/workflows/docker_hub_release.yaml b/.github/workflows/docker-hub-release.yaml similarity index 69% rename from .github/workflows/docker_hub_release.yaml rename to .github/workflows/docker-hub-release.yaml index 4a2b1130a3..33ce37ea2f 100644 --- a/.github/workflows/docker_hub_release.yaml +++ b/.github/workflows/docker-hub-release.yaml @@ -1,3 +1,17 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + name: Docker Hub Release # Runs when a new release is published on GitHub. @@ -30,8 +44,8 @@ jobs: - name: Checkout code uses: actions/checkout@v2 with: - path: src ref: ${{ env.TARGET_REF }} + submodules: true - name: Log in to Docker Hub uses: docker/login-action@v1 @@ -43,5 +57,4 @@ jobs: uses: docker/build-push-action@v2 with: push: true - context: src/ tags: ${{ secrets.DOCKERHUB_PACKAGE_NAME }}:latest,${{ secrets.DOCKERHUB_PACKAGE_NAME }}:${{ env.TARGET_REF }} diff --git a/.github/workflows/docker-image.yaml b/.github/workflows/docker-image.yaml new file mode 100644 index 0000000000..a8b0c27aed --- /dev/null +++ b/.github/workflows/docker-image.yaml @@ -0,0 +1,45 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A workflow to build the official docker image. +name: Official Docker image + +# Runs when called from another workflow. +on: + workflow_call: + inputs: + ref: + required: true + type: string + +# By default, run all commands in a bash shell. On Windows, the default would +# otherwise be powershell. +defaults: + run: + shell: bash + +jobs: + official_docker_image: + name: Build + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + ref: ${{ github.event.inputs.ref || github.ref }} + submodules: true + + - name: Build + shell: bash + run: docker build . diff --git a/.github/workflows/github-release.yaml b/.github/workflows/github-release.yaml new file mode 100644 index 0000000000..0f757809b5 --- /dev/null +++ b/.github/workflows/github-release.yaml @@ -0,0 +1,141 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: GitHub Release + +# Runs when a new tag is created that looks like a version number. +# +# 1. Creates a draft release on GitHub with the latest release notes +# 2. On all combinations of OS, build type, and library type: +# a. builds Packager +# b. builds the docs +# c. runs all tests +# d. attaches build artifacts to the release +# 3. Fully publishes the release on GitHub +# +# Publishing the release then triggers additional workflows for NPM, Docker +# Hub, and GitHub Pages. +# +# Can also be run manually for debugging purposes. +on: + push: + tags: + - "v*.*" + # For manual debugging: + workflow_dispatch: + inputs: + tag: + description: "An existing tag to release." + required: True + +jobs: + # TODO(joeyparrish): Switch to release-please + setup: + name: Setup + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.compute_tag.outputs.tag }} + steps: + - name: Compute tag + id: compute_tag + # We could be building from a workflow dispatch (manual run) + # or from a pushed tag. If triggered from a pushed tag, we would like + # to strip refs/tags/ off of the incoming ref and just use the tag + # name. Subsequent jobs can refer to the "tag" output of this job to + # determine the correct tag name in all cases. + run: | + # Strip refs/tags/ from the input to get the tag name, then store + # that in output. + echo "::set-output name=tag::${{ github.event.inputs.tag || github.ref }}" \ + | sed -e 's@refs/tags/@@' + + # TODO(joeyparrish): Switch to release-please + draft_release: + name: Create GitHub release + needs: setup + runs-on: ubuntu-latest + outputs: + release_id: ${{ steps.draft_release.outputs.id }} + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + ref: ${{ needs.setup.outputs.tag }} + + - name: Check changelog version + # This check prevents releases without appropriate changelog updates. + run: | + VERSION=$(packager/tools/extract_from_changelog.py --version) + if [[ "$VERSION" != "${{ needs.setup.outputs.tag }}" ]]; then + echo "" + echo "" + echo "***** ***** *****" + echo "" + echo "Version mismatch!" + echo "Workflow is targetting ${{ needs.setup.outputs.tag }}," + echo "but CHANGELOG.md contains $VERSION!" + exit 1 + fi + + - name: Extract release notes + run: | + packager/tools/extract_from_changelog.py --release_notes \ + | tee ../RELEASE_NOTES.md + + - name: Draft release + id: draft_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ needs.setup.outputs.tag }} + release_name: ${{ needs.setup.outputs.tag }} + body_path: RELEASE_NOTES.md + draft: true + + lint: + needs: setup + name: Lint + uses: ./.github/workflows/lint.yaml + with: + ref: ${{ needs.setup.outputs.tag }} + + build_and_test: + needs: [setup, lint, draft_release] + name: Build and test + uses: ./.github/workflows/build.yaml + with: + ref: ${{ needs.setup.outputs.tag }} + + test_supported_linux_distros: + # Doesn't really "need" it, but let's not waste time on a series of docker + # builds just to cancel it because of a linter error. + needs: lint + name: Test Linux distros + uses: ./.github/workflows/test-linux-distros.yaml + with: + ref: ${{ needs.setup.outputs.tag }} + + # TODO(joeyparrish): Switch to release-please + publish_release: + name: Publish GitHub release + needs: [draft_release, build_and_test] + runs-on: ubuntu-latest + steps: + - name: Publish release + uses: eregon/publish-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + release_id: ${{ needs.draft_release.outputs.release_id }} diff --git a/.github/workflows/github_release.yaml b/.github/workflows/github_release.yaml deleted file mode 100644 index 0d018670c6..0000000000 --- a/.github/workflows/github_release.yaml +++ /dev/null @@ -1,228 +0,0 @@ -name: GitHub Release - -# Runs when a new tag is created that looks like a version number. -# -# 1. Creates a draft release on GitHub with the latest release notes -# 2. On all combinations of OS, build type, and library type: -# a. builds Packager -# b. builds the docs -# c. runs all tests -# d. attaches build artifacts to the release -# 3. Fully publishes the release on GitHub -# -# Publishing the release then triggers additional workflows for NPM, Docker -# Hub, and GitHub Pages. -# -# Can also be run manually for debugging purposes. -on: - push: - tags: - - "v*.*" - # For manual debugging: - workflow_dispatch: - inputs: - tag: - description: "An existing tag to release." - required: True - -jobs: - setup: - name: Setup - runs-on: ubuntu-latest - outputs: - tag: ${{ steps.compute_tag.outputs.tag }} - steps: - - name: Compute tag - id: compute_tag - # We could be building from a workflow dispatch (manual run) - # or from a pushed tag. If triggered from a pushed tag, we would like - # to strip refs/tags/ off of the incoming ref and just use the tag - # name. Subsequent jobs can refer to the "tag" output of this job to - # determine the correct tag name in all cases. - run: | - # Strip refs/tags/ from the input to get the tag name, then store - # that in output. - echo "::set-output name=tag::${{ github.event.inputs.tag || github.ref }}" \ - | sed -e 's@refs/tags/@@' - - draft_release: - name: Create GitHub release - needs: setup - runs-on: ubuntu-latest - outputs: - release_id: ${{ steps.draft_release.outputs.id }} - steps: - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ needs.setup.outputs.tag }} - - - name: Check changelog version - # This check prevents releases without appropriate changelog updates. - run: | - cd src - VERSION=$(packager/tools/extract_from_changelog.py --version) - if [[ "$VERSION" != "${{ needs.setup.outputs.tag }}" ]]; then - echo "" - echo "" - echo "***** ***** *****" - echo "" - echo "Version mismatch!" - echo "Workflow is targetting ${{ needs.setup.outputs.tag }}," - echo "but CHANGELOG.md contains $VERSION!" - exit 1 - fi - - - name: Extract release notes - run: | - cd src - packager/tools/extract_from_changelog.py --release_notes \ - | tee ../RELEASE_NOTES.md - - - name: Draft release - id: draft_release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ needs.setup.outputs.tag }} - release_name: ${{ needs.setup.outputs.tag }} - body_path: RELEASE_NOTES.md - draft: true - - lint: - needs: setup - name: Lint - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ needs.setup.outputs.tag }} - # This makes the merge base available for the C++ linter, so that it - # can tell which files have changed. - fetch-depth: 2 - - - name: Lint - uses: ./src/.github/workflows/custom-actions/lint-packager - - build_and_test: - needs: [setup, lint, draft_release] - strategy: - matrix: - os: ["ubuntu-latest", "macos-latest", "windows-latest", "self-hosted-linux-arm64"] - build_type: ["Debug", "Release"] - lib_type: ["static", "shared"] - include: - - os: ubuntu-latest - os_name: linux - target_arch: x64 - exe_ext: "" - build_type_suffix: "" - - os: macos-latest - os_name: osx - target_arch: x64 - exe_ext: "" - build_type_suffix: "" - - os: windows-latest - os_name: win - target_arch: x64 - exe_ext: ".exe" - # 64-bit outputs on Windows go to a different folder name. - build_type_suffix: "_x64" - - os: self-hosted-linux-arm64 - os_name: linux - target_arch: arm64 - exe_ext: "" - build_type_suffix: "" - - name: Build and test ${{ matrix.os_name }} ${{ matrix.target_arch }} ${{ matrix.build_type }} ${{ matrix.lib_type }} - runs-on: ${{ matrix.os }} - - steps: - - name: Configure git to preserve line endings - # Otherwise, tests fail on Windows because "golden" test outputs will not - # have the correct line endings. - run: git config --global core.autocrlf false - - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ needs.setup.outputs.tag }} - - - name: Build docs (Linux only) - if: runner.os == 'Linux' - uses: ./src/.github/workflows/custom-actions/build-docs - - - name: Build Packager - uses: ./src/.github/workflows/custom-actions/build-packager - with: - os_name: ${{ matrix.os_name }} - target_arch: ${{ matrix.target_arch }} - lib_type: ${{ matrix.lib_type }} - build_type: ${{ matrix.build_type }} - build_type_suffix: ${{ matrix.build_type_suffix }} - exe_ext: ${{ matrix.exe_ext }} - - - name: Test Packager - uses: ./src/.github/workflows/custom-actions/test-packager - with: - lib_type: ${{ matrix.lib_type }} - build_type: ${{ matrix.build_type }} - build_type_suffix: ${{ matrix.build_type_suffix }} - exe_ext: ${{ matrix.exe_ext }} - - - name: Attach artifacts to release - if: matrix.build_type == 'Release' && matrix.lib_type == 'static' - uses: dwenegar/upload-release-assets@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - release_id: ${{ needs.draft_release.outputs.release_id }} - assets_path: artifacts - - test_supported_linux_distros: - # Doesn't really "need" it, but let's not waste time on a series of docker - # builds just to cancel it because of a linter error. - needs: lint - name: Test builds on all supported Linux distros (using docker) - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v2 - with: - path: src - ref: ${{ github.event.inputs.ref || github.ref }} - - - name: Install depot tools - shell: bash - run: | - git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git - touch depot_tools/.disable_auto_update - echo "${GITHUB_WORKSPACE}/depot_tools" >> $GITHUB_PATH - - - name: Setup gclient - shell: bash - run: | - gclient config https://github.com/shaka-project/shaka-packager.git --name=src --unmanaged - # NOTE: the docker tests will do gclient runhooks, so skip hooks here. - gclient sync --nohooks - - - name: Test all distros - shell: bash - run: ./src/packager/testing/dockers/test_dockers.sh - - publish_release: - name: Publish GitHub release - needs: [draft_release, build_and_test] - runs-on: ubuntu-latest - steps: - - name: Publish release - uses: eregon/publish-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - release_id: ${{ needs.draft_release.outputs.release_id }} diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000000..205ccafd66 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,62 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A workflow to lint Shaka Packager. +name: Lint + +# Runs when called from another workflow. +on: + workflow_call: + inputs: + ref: + required: true + type: string + +# By default, run all commands in a bash shell. On Windows, the default would +# otherwise be powershell. +defaults: + run: + shell: bash + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + ref: ${{ inputs.ref }} + # We must use 'fetch-depth: 2', or else the linter won't have another + # revision to compare to. + fetch-depth: 2 + + - name: Lint + shell: bash + run: | + wget https://raw.githubusercontent.com/llvm-mirror/clang/master/tools/clang-format/git-clang-format + sudo install -m 755 git-clang-format /usr/local/bin/git-clang-format + rm git-clang-format + + python3 -m pip install --upgrade pylint==2.8.3 + + # NOTE: Must use base.sha instead of base.ref, since we don't have + # access to the branch name that base.ref would give us. + # NOTE: Must also use fetch-depth: 2 in actions/checkout to have access + # to the base ref for comparison. + packager/tools/git/check_formatting.py \ + --binary /usr/bin/clang-format \ + ${{ github.event.pull_request.base.sha || 'HEAD^' }} + + packager/tools/git/check_pylint.py diff --git a/.github/workflows/npm_release.yaml b/.github/workflows/npm-release.yaml similarity index 94% rename from .github/workflows/npm_release.yaml rename to .github/workflows/npm-release.yaml index 9c9d2591fd..91797d515b 100644 --- a/.github/workflows/npm_release.yaml +++ b/.github/workflows/npm-release.yaml @@ -30,7 +30,6 @@ jobs: - name: Checkout code uses: actions/checkout@v2 with: - path: src ref: ${{ env.TARGET_REF }} - name: Setup NodeJS @@ -40,7 +39,7 @@ jobs: - name: Set package name and version run: | - cd src/npm + cd npm sed package.json -i \ -e 's/"name": ""/"name": "${{ secrets.NPM_PACKAGE_NAME }}"/' \ -e 's/"version": ""/"version": "${{ env.TARGET_REF }}"/' @@ -49,6 +48,6 @@ jobs: uses: JS-DevTools/npm-publish@v1 with: token: ${{ secrets.NPM_CI_TOKEN }} - package: src/npm/package.json + package: npm/package.json check-version: false access: public diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 0000000000..5e8eacb30e --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,67 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Build and Test PR + +# Builds and tests on all combinations of OS, build type, and library type. +# Also builds the docs. +# +# Runs when a pull request is opened or updated. +# +# Can also be run manually for debugging purposes. +on: + pull_request: + types: [opened, synchronize, reopened] + workflow_dispatch: + inputs: + ref: + description: "The ref to build and test." + required: False + +jobs: + lint: + name: Lint + uses: ./.github/workflows/lint.yaml + with: + ref: ${{ github.event.inputs.ref || github.ref }} + + build_and_test: + needs: lint + name: Build and test + uses: ./.github/workflows/build.yaml + with: + ref: ${{ github.event.inputs.ref || github.ref }} + + build_docs: + needs: lint + name: Build docs + uses: ./.github/workflows/build-docs.yaml + with: + ref: ${{ github.event.inputs.ref || github.ref }} + + official_docker_image: + needs: lint + name: Official Docker image + uses: ./.github/workflows/docker-image.yaml + with: + ref: ${{ github.event.inputs.ref || github.ref }} + + test_supported_linux_distros: + # Doesn't really "need" it, but let's not waste time on a series of docker + # builds just to cancel it because of a linter error. + needs: lint + name: Test Linux distros + uses: ./.github/workflows/test-linux-distros.yaml + with: + ref: ${{ github.event.inputs.ref || github.ref }} diff --git a/.github/workflows/test-linux-distros.yaml b/.github/workflows/test-linux-distros.yaml new file mode 100644 index 0000000000..8e6a69c053 --- /dev/null +++ b/.github/workflows/test-linux-distros.yaml @@ -0,0 +1,80 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A workflow to test building in various Linux distros. +name: Test Linux Distros + +# Runs when called from another workflow. +on: + workflow_call: + inputs: + ref: + required: true + type: string + +# By default, run all commands in a bash shell. On Windows, the default would +# otherwise be powershell. +defaults: + run: + shell: bash + +jobs: + # Configure the build matrix based on files in the repo. + matrix_config: + name: Matrix config + runs-on: ubuntu-latest + outputs: + MATRIX: ${{ steps.configure.outputs.MATRIX }} + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ inputs.ref }} + + - name: Configure Build Matrix + id: configure + shell: node {0} + run: | + const fs = require('fs'); + const files = fs.readdirSync('packager/testing/dockers/'); + const matrix = files.map((file) => { + return { os_name: file.replace('_Dockerfile', '') }; + }); + + // Output a JSON object consumed by the build matrix below. + console.log(`::set-output name=MATRIX::${ JSON.stringify(matrix) }`); + + // Log the outputs, for the sake of debugging this script. + console.log({matrix}); + + # Build each dockerfile in parallel in a different CI job. + build: + needs: matrix_config + strategy: + # Let other matrix entries complete, so we have all results on failure + # instead of just the first failure. + fail-fast: false + matrix: + include: ${{ fromJSON(needs.matrix_config.outputs.MATRIX) }} + + name: ${{ matrix.os_name }} + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ inputs.ref }} + submodules: true + + - name: Build in Docker + run: ./packager/testing/test_dockers.sh "${{ matrix.os_name }}" diff --git a/.github/workflows/update_docs.yaml b/.github/workflows/update-docs.yaml similarity index 94% rename from .github/workflows/update_docs.yaml rename to .github/workflows/update-docs.yaml index 5b9466ba60..4a8efb9288 100644 --- a/.github/workflows/update_docs.yaml +++ b/.github/workflows/update-docs.yaml @@ -31,7 +31,6 @@ jobs: - name: Checkout code uses: actions/checkout@v2 with: - path: src ref: ${{ steps.ref.outputs.ref }} - name: Set up Python @@ -40,7 +39,7 @@ jobs: python-version: 3.8 - name: Build docs - uses: ./src/.github/workflows/custom-actions/build-docs + uses: ./.github/workflows/custom-actions/build-docs - name: Deploy to gh-pages branch uses: peaceiris/actions-gh-pages@v3 diff --git a/.gitignore b/.gitignore index 66bc422882..735f9ca8fc 100644 --- a/.gitignore +++ b/.gitignore @@ -5,33 +5,17 @@ */.vs/* *~ .DS_store +.cache .cproject .project .pydevproject .idea .repo .settings -/out* -/packager/base/ -/packager/build/ -/packager/buildtools/third_party/libc++/trunk/ -/packager/buildtools/third_party/libc++abi/trunk/ +build/ /packager/docs/ -/packager/testing/gmock/ -/packager/testing/gtest/ -/packager/third_party/binutils/ -/packager/third_party/boringssl/src/ -/packager/third_party/curl/source/ -/packager/third_party/gflags/src/ -/packager/third_party/gold/ /packager/third_party/icu/ /packager/third_party/libpng/src/ /packager/third_party/libwebm/src/ -/packager/third_party/llvm-build/ /packager/third_party/modp_b64/ -/packager/third_party/tcmalloc/ -/packager/third_party/yasm/source/patched-yasm/ /packager/third_party/zlib/ -/packager/tools/clang/ -/packager/tools/gyp/ -/packager/tools/valgrind/ diff --git a/.gitmodules b/.gitmodules index e69de29bb2..d6f9d061cc 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,18 @@ +[submodule "packager/testing/googletest"] + path = packager/third_party/googletest/source + url = https://github.com/google/googletest +[submodule "packager/third_party/abseil-cpp"] + path = packager/third_party/abseil-cpp/source + url = https://github.com/abseil/abseil-cpp +[submodule "packager/third_party/curl"] + path = packager/third_party/curl/source + url = https://github.com/curl/curl +[submodule "packager/third_party/glog"] + path = packager/third_party/glog/source + url = https://github.com/google/glog +[submodule "packager/third_party/json"] + path = packager/third_party/json + url = https://github.com/nlohmann/json +[submodule "packager/third_party/mbedtls"] + path = packager/third_party/mbedtls/source + url = https://github.com/Mbed-TLS/mbedtls diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000000..ec0314de6f --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,45 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# Root-level CMake build file. + +# Project name. May not contain spaces. Versioning is managed elsewhere. +cmake_policy(SET CMP0048 NEW) +project(shaka-packager VERSION "") + +# Minimum CMake version. +# We could require as low as 3.10, but glog requires 3.16. +cmake_minimum_required(VERSION 3.16) + +# The only build option for Shaka Packager is whether to build a shared +# libpackager library. By default, don't. +option(LIBPACKAGER_SHARED "Build libpackager as a shared library" OFF) + +# No in-source builds allowed. +set(CMAKE_DISABLE_SOURCE_CHANGES ON) +set(CMAKE_DISABLE_IN_SOURCE_BUILD ON) + +# Minimum C++ version. +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Minimum GCC version, if using GCC. +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + # Require at least GCC 9. Before GCC 9, C++17 filesystem libs don't link. + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9) + message(FATAL_ERROR "GCC version must be at least 9! (Found ${CMAKE_CXX_COMPILER_VERSION})") + endif() +endif() + +# Global include paths. +# Project root, to reference packager/foo/bar/... +include_directories(.) + +# Enable CMake's test infrastructure. +enable_testing() + +# Subdirectories with their own CMakeLists.txt +add_subdirectory(packager) diff --git a/DEPS b/DEPS deleted file mode 100644 index 96ca6f18bb..0000000000 --- a/DEPS +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file or at -# https://developers.google.com/open-source/licenses/bsd -# -# Packager dependencies. - -vars = { - "chromium_git": "https://chromium.googlesource.com", - "github": "https://github.com", -} - -deps = { - "src/packager/base": - Var("chromium_git") + "/chromium/src/base@a34eabec0d807cf03dc8cfc1a6240156ac2bbd01", #409071 - - "src/packager/build": - Var("chromium_git") + "/chromium/src/build@f0243d787961584ac95a86e7dae897b9b60ea674", #409966 - - "src/packager/testing/gmock": - Var("chromium_git") + "/external/googlemock@0421b6f358139f02e102c9c332ce19a33faf75be", #566 - - "src/packager/testing/gtest": - Var("chromium_git") + "/external/github.com/google/googletest@6f8a66431cb592dad629028a50b3dd418a408c87", - - # Make sure the version matches the one in - # src/packager/third_party/boringssl, which contains perl generated files. - "src/packager/third_party/boringssl/src": - Var("github") + "/google/boringssl@76918d016414bf1d71a86d28239566fbcf8aacf0", - - "src/packager/third_party/curl/source": - Var("github") + "/curl/curl@62c07b5743490ce373910f469abc8cdc759bec2b", #7.57.0 - - "src/packager/third_party/gflags/src": - Var("chromium_git") + "/external/github.com/gflags/gflags@03bebcb065c83beff83d50ae025a55a4bf94dfca", - - # Required by libxml. - "src/packager/third_party/icu": - Var("chromium_git") + "/chromium/deps/icu@ef5c735307d0f86c7622f69620994c9468beba99", - - "src/packager/third_party/libpng/src": - Var("github") + "/glennrp/libpng@a40189cf881e9f0db80511c382292a5604c3c3d1", - - "src/packager/third_party/libwebm/src": - Var("chromium_git") + "/webm/libwebm@d6af52a1e688fade2e2d22b6d9b0c82f10d38e0b", - - "src/packager/third_party/modp_b64": - Var("chromium_git") + "/chromium/src/third_party/modp_b64@aae60754fa997799e8037f5e8ca1f56d58df763d", #405651 - - "src/packager/third_party/tcmalloc/chromium": - Var("chromium_git") + "/chromium/src/third_party/tcmalloc/chromium@58a93bea442dbdcb921e9f63e9d8b0009eea8fdb", #374449 - - "src/packager/third_party/zlib": - Var("chromium_git") + "/chromium/src/third_party/zlib@830b5c25b5fbe37e032ea09dd011d57042dd94df", #408157 - - "src/packager/tools/gyp": - Var("chromium_git") + "/external/gyp@caa60026e223fc501e8b337fd5086ece4028b1c6", -} - -deps_os = { - "win": { - # Required by boringssl. - "src/packager/third_party/yasm/source/patched-yasm": - Var("chromium_git") + "/chromium/deps/yasm/patched-yasm.git@7da28c6c7c6a1387217352ce02b31754deb54d2a", - }, -} - -hooks = [ - { - # When using CC=clang CXX=clang++, there is a binutils version check that - # does not work correctly in common.gypi. Since we are stuck with a very - # old version of chromium/src/build, there is nothing to do but patch it to - # remove the check. Thankfully, this version number does not control - # anything critical in the build settings as far as we can tell. - 'name': 'patch-binutils-version-check', - 'pattern': '.', - 'action': ['sed', '-e', 's/ + +#include "absl/base/macros.h" + +namespace shaka { + +/// A macro to disable copying and assignment. Usage: +/// class Foo { +/// private: +/// DISALLOW_COPY_AND_ASSIGN(Foo); +/// } +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete; + +/// ABSL_ARRAYSIZE works just like the arraysize macro we used to use from +/// Chromium. To ease porting, define arraysize() as ABSL_ARRAYSIZE(). +#define arraysize(a) ABSL_ARRAYSIZE(a) + +/// A macro to declare that you intentionally did not use a parameter. Useful +/// when implementing abstract interfaces. +#define UNUSED(x) (void)(x) + +/// A macro to declare that you intentionally did not implement a method. +/// You can use the insertion operator to add specific logs to this. +#define NOTIMPLEMENTED() LOG(ERROR) << "NOTIMPLEMENTED: " + +} // namespace shaka + +#endif // PACKAGER_COMMON_H_ diff --git a/packager/file/CMakeLists.txt b/packager/file/CMakeLists.txt new file mode 100644 index 0000000000..c5e3a946ae --- /dev/null +++ b/packager/file/CMakeLists.txt @@ -0,0 +1,46 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +add_library(file STATIC + callback_file.cc + file.cc + file_util.cc + http_file.cc + io_cache.cc + local_file.cc + memory_file.cc + thread_pool.cc + threaded_io_file.cc + udp_file.cc + udp_options.cc) +target_link_libraries(file + absl::base + absl::flags + absl::str_format + absl::strings + absl::synchronization + absl::time + libcurl + glog + kv_pairs + status + version) + +add_executable(file_unittest + callback_file_unittest.cc + file_unittest.cc + file_util_unittest.cc + http_file_unittest.cc + io_cache_unittest.cc + memory_file_unittest.cc + udp_options_unittest.cc) +target_link_libraries(file_unittest + file + gmock + gtest + gtest_main + nlohmann_json) +add_test(NAME file_unittest COMMAND file_unittest) diff --git a/packager/file/callback_file.cc b/packager/file/callback_file.cc index 55dae0a98e..7927df7a89 100644 --- a/packager/file/callback_file.cc +++ b/packager/file/callback_file.cc @@ -6,7 +6,8 @@ #include "packager/file/callback_file.h" -#include "packager/base/logging.h" +#include "glog/logging.h" +#include "packager/common.h" namespace shaka { @@ -47,11 +48,13 @@ bool CallbackFile::Flush() { } bool CallbackFile::Seek(uint64_t position) { + UNUSED(position); VLOG(1) << "CallbackFile does not support Seek()."; return false; } bool CallbackFile::Tell(uint64_t* position) { + UNUSED(position); VLOG(1) << "CallbackFile does not support Tell()."; return false; } diff --git a/packager/file/callback_file_unittest.cc b/packager/file/callback_file_unittest.cc index 0e229b4845..4504329914 100644 --- a/packager/file/callback_file_unittest.cc +++ b/packager/file/callback_file_unittest.cc @@ -94,8 +94,11 @@ TEST(CallbackFileTest, ReadFailed) { File::MakeCallbackFileName(callback_params, kBufferLabel); EXPECT_CALL(mock_read_func, Call(StrEq(kBufferLabel), _, _)) - .WillOnce(WithArgs<1, 2>( - Invoke([](void* buffer, uint64_t size) { return kFileError; }))); + .WillOnce(WithArgs<1, 2>(Invoke([](void* buffer, uint64_t size) { + UNUSED(buffer); + UNUSED(size); + return kFileError; + }))); std::unique_ptr reader(File::Open(file_name.c_str(), "r")); ASSERT_TRUE(reader); diff --git a/packager/file/file.cc b/packager/file/file.cc index c10718128e..607f6924ee 100644 --- a/packager/file/file.cc +++ b/packager/file/file.cc @@ -6,30 +6,34 @@ #include "packager/file/file.h" -#include #include + #include +#include #include -#include "packager/base/files/file_util.h" -#include "packager/base/logging.h" -#include "packager/base/strings/string_number_conversions.h" -#include "packager/base/strings/string_piece.h" -#include "packager/base/strings/stringprintf.h" + +#include "absl/flags/flag.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_format.h" +#include "glog/logging.h" + #include "packager/file/callback_file.h" #include "packager/file/file_util.h" +#include "packager/file/http_file.h" #include "packager/file/local_file.h" #include "packager/file/memory_file.h" #include "packager/file/threaded_io_file.h" #include "packager/file/udp_file.h" -#include "packager/file/http_file.h" -DEFINE_uint64(io_cache_size, - 32ULL << 20, - "Size of the threaded I/O cache, in bytes. Specify 0 to disable " - "threaded I/O."); -DEFINE_uint64(io_block_size, - 1ULL << 16, - "Size of the block size used for threaded I/O, in bytes."); +ABSL_FLAG(uint64_t, + io_cache_size, + 32ULL << 20, + "Size of the threaded I/O cache, in bytes. Specify 0 to disable " + "threaded I/O."); +ABSL_FLAG(uint64_t, + io_block_size, + 1ULL << 16, + "Size of the block size used for threaded I/O, in bytes."); // Needed for Windows weirdness which somewhere defines CopyFile as CopyFileW. #ifdef CopyFile @@ -74,18 +78,20 @@ bool DeleteLocalFile(const char* file_name) { bool WriteLocalFileAtomically(const char* file_name, const std::string& contents) { - const base::FilePath file_path = base::FilePath::FromUTF8Unsafe(file_name); - const std::string dir_name = file_path.DirName().AsUTF8Unsafe(); + const std::filesystem::path file_path(file_name); + const std::filesystem::path dir_path = file_path.parent_path(); + std::string temp_file_name; - if (!TempFilePath(dir_name, &temp_file_name)) + if (!TempFilePath(dir_path.string(), &temp_file_name)) return false; if (!File::WriteStringToFile(temp_file_name.c_str(), contents)) return false; - base::File::Error replace_file_error = base::File::FILE_OK; - if (!base::ReplaceFile(base::FilePath::FromUTF8Unsafe(temp_file_name), - file_path, &replace_file_error)) { + + std::error_code ec; + std::filesystem::rename(temp_file_name, file_name, ec); + if (ec) { LOG(ERROR) << "Failed to replace file '" << file_name << "' with '" - << temp_file_name << "', error: " << replace_file_error; + << temp_file_name << "', error: " << ec; return false; } return true; @@ -100,10 +106,12 @@ File* CreateUdpFile(const char* file_name, const char* mode) { } File* CreateHttpsFile(const char* file_name, const char* mode) { + UNUSED(mode); // TODO: choose method based on file mode return new HttpFile(HttpMethod::kPut, std::string("https://") + file_name); } File* CreateHttpFile(const char* file_name, const char* mode) { + UNUSED(mode); // TODO: choose method based on file mode return new HttpFile(HttpMethod::kPut, std::string("http://") + file_name); } @@ -130,14 +138,14 @@ static const FileTypeInfo kFileTypeInfo[] = { {kHttpsFilePrefix, &CreateHttpsFile, nullptr, nullptr}, }; -base::StringPiece GetFileTypePrefix(base::StringPiece file_name) { +std::string_view GetFileTypePrefix(std::string_view file_name) { size_t pos = file_name.find("://"); return (pos == std::string::npos) ? "" : file_name.substr(0, pos + 3); } -const FileTypeInfo* GetFileTypeInfo(base::StringPiece file_name, - base::StringPiece* real_file_name) { - base::StringPiece file_type_prefix = GetFileTypePrefix(file_name); +const FileTypeInfo* GetFileTypeInfo(std::string_view file_name, + std::string_view* real_file_name) { + std::string_view file_type_prefix = GetFileTypePrefix(file_name); for (const FileTypeInfo& file_type : kFileTypeInfo) { if (file_type_prefix == file_type.type) { *real_file_name = file_name.substr(file_type_prefix.size()); @@ -155,23 +163,25 @@ File* File::Create(const char* file_name, const char* mode) { std::unique_ptr internal_file( CreateInternalFile(file_name, mode)); - base::StringPiece file_type_prefix = GetFileTypePrefix(file_name); + std::string_view file_type_prefix = GetFileTypePrefix(file_name); if (file_type_prefix == kMemoryFilePrefix || file_type_prefix == kCallbackFilePrefix) { // Disable caching for memory and callback files. return internal_file.release(); } - if (FLAGS_io_cache_size) { + if (absl::GetFlag(FLAGS_io_cache_size)) { // Enable threaded I/O for "r", "w", and "a" modes only. if (!strcmp(mode, "r")) { return new ThreadedIoFile(std::move(internal_file), - ThreadedIoFile::kInputMode, FLAGS_io_cache_size, - FLAGS_io_block_size); + ThreadedIoFile::kInputMode, + absl::GetFlag(FLAGS_io_cache_size), + absl::GetFlag(FLAGS_io_block_size)); } else if (!strcmp(mode, "w") || !strcmp(mode, "a")) { return new ThreadedIoFile(std::move(internal_file), ThreadedIoFile::kOutputMode, - FLAGS_io_cache_size, FLAGS_io_block_size); + absl::GetFlag(FLAGS_io_cache_size), + absl::GetFlag(FLAGS_io_block_size)); } } @@ -181,7 +191,7 @@ File* File::Create(const char* file_name, const char* mode) { } File* File::CreateInternalFile(const char* file_name, const char* mode) { - base::StringPiece real_file_name; + std::string_view real_file_name; const FileTypeInfo* file_type = GetFileTypeInfo(file_name, &real_file_name); DCHECK(file_type); // Calls constructor for the derived File class. @@ -212,7 +222,7 @@ File* File::OpenWithNoBuffering(const char* file_name, const char* mode) { bool File::Delete(const char* file_name) { static bool logged = false; - base::StringPiece real_file_name; + std::string_view real_file_name; const FileTypeInfo* file_type = GetFileTypeInfo(file_name, &real_file_name); DCHECK(file_type); if (file_type->delete_function) { @@ -288,7 +298,7 @@ bool File::WriteStringToFile(const char* file_name, bool File::WriteFileAtomically(const char* file_name, const std::string& contents) { VLOG(2) << "File::WriteFileAtomically: " << file_name; - base::StringPiece real_file_name; + std::string_view real_file_name; const FileTypeInfo* file_type = GetFileTypeInfo(file_name, &real_file_name); DCHECK(file_type); if (file_type->atomic_write_function) @@ -386,28 +396,15 @@ int64_t File::CopyFile(File* source, File* destination, int64_t max_copy) { } bool File::IsLocalRegularFile(const char* file_name) { - base::StringPiece real_file_name; + std::string_view real_file_name; const FileTypeInfo* file_type = GetFileTypeInfo(file_name, &real_file_name); DCHECK(file_type); + if (file_type->type != kLocalFilePrefix) return false; -#if defined(OS_WIN) - const base::FilePath file_path( - base::FilePath::FromUTF8Unsafe(real_file_name)); - const DWORD fileattr = GetFileAttributes(file_path.value().c_str()); - if (fileattr == INVALID_FILE_ATTRIBUTES) { - LOG(ERROR) << "Failed to GetFileAttributes of " << file_path.value(); - return false; - } - return (fileattr & FILE_ATTRIBUTE_DIRECTORY) == 0; -#else - struct stat info; - if (stat(real_file_name.data(), &info) != 0) { - LOG(ERROR) << "Failed to run stat on " << real_file_name; - return false; - } - return S_ISREG(info.st_mode); -#endif + + std::error_code ec; + return std::filesystem::is_regular_file(real_file_name, ec); } std::string File::MakeCallbackFileName( @@ -415,9 +412,9 @@ std::string File::MakeCallbackFileName( const std::string& name) { if (name.empty()) return ""; - return base::StringPrintf("%s%" PRIdPTR "/%s", kCallbackFilePrefix, - reinterpret_cast(&callback_params), - name.c_str()); + return absl::StrFormat("%s%" PRIdPTR "/%s", kCallbackFilePrefix, + reinterpret_cast(&callback_params), + name.c_str()); } bool File::ParseCallbackFileName(const std::string& callback_file_name, @@ -426,8 +423,7 @@ bool File::ParseCallbackFileName(const std::string& callback_file_name, size_t pos = callback_file_name.find("/"); int64_t callback_address = 0; if (pos == std::string::npos || - !base::StringToInt64(callback_file_name.substr(0, pos), - &callback_address)) { + !absl::SimpleAtoi(callback_file_name.substr(0, pos), &callback_address)) { LOG(ERROR) << "Expecting CallbackFile with name like " "'/', but seeing " << callback_file_name; diff --git a/packager/file/file.gyp b/packager/file/file.gyp deleted file mode 100644 index 9a842ee707..0000000000 --- a/packager/file/file.gyp +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file or at -# https://developers.google.com/open-source/licenses/bsd - -{ - 'variables': { - 'shaka_code': 1, - }, - 'targets': [ - { - 'target_name': 'file', - 'type': '<(component)', - 'sources': [ - 'callback_file.cc', - 'callback_file.h', - 'file.cc', - 'file.h', - 'file_util.cc', - 'file_util.h', - 'file_closer.h', - 'http_file.cc', - 'http_file.h', - 'io_cache.cc', - 'io_cache.h', - 'local_file.cc', - 'local_file.h', - 'memory_file.cc', - 'memory_file.h', - 'public/buffer_callback_params.h', - 'threaded_io_file.cc', - 'threaded_io_file.h', - 'udp_file.cc', - 'udp_file.h', - 'udp_options.cc', - 'udp_options.h', - ], - 'dependencies': [ - '../base/base.gyp:base', - '../packager.gyp:status', - '../third_party/gflags/gflags.gyp:gflags', - '../third_party/curl/curl.gyp:libcurl', - '../version/version.gyp:version', - ], - 'conditions': [ - ['libpackager_type == "shared_library"', { - 'defines': [ - 'SHARED_LIBRARY_BUILD', - 'SHAKA_IMPLEMENTATION', - ], - }], - ], - }, - { - 'target_name': 'file_unittest', - 'type': '<(gtest_target_type)', - 'sources': [ - 'callback_file_unittest.cc', - 'file_unittest.cc', - 'file_util_unittest.cc', - 'io_cache_unittest.cc', - 'memory_file_unittest.cc', - 'udp_options_unittest.cc', - 'http_file_unittest.cc', - ], - 'dependencies': [ - '../media/test/media_test.gyp:run_tests_with_atexit_manager', - '../testing/gmock.gyp:gmock', - '../testing/gtest.gyp:gtest', - '../third_party/gflags/gflags.gyp:gflags', - '../third_party/curl/curl.gyp:libcurl', - '../version/version.gyp:version', - 'file', - ], - }, - ], -} diff --git a/packager/file/file.h b/packager/file/file.h index 5c2f42b57e..ca972ff882 100644 --- a/packager/file/file.h +++ b/packager/file/file.h @@ -11,9 +11,9 @@ #include -#include "packager/base/macros.h" +#include "packager/common.h" #include "packager/file/public/buffer_callback_params.h" -#include "packager/status.h" +#include "packager/status/status.h" namespace shaka { diff --git a/packager/file/file_closer.h b/packager/file/file_closer.h index 72ed33af50..6d71971b36 100644 --- a/packager/file/file_closer.h +++ b/packager/file/file_closer.h @@ -7,7 +7,7 @@ #ifndef MEDIA_FILE_FILE_CLOSER_H_ #define MEDIA_FILE_FILE_CLOSER_H_ -#include "packager/base/logging.h" +#include "glog/logging.h" #include "packager/file/file.h" namespace shaka { diff --git a/packager/file/file_unittest.cc b/packager/file/file_unittest.cc index f40c635dc9..744d9a3b88 100644 --- a/packager/file/file_unittest.cc +++ b/packager/file/file_unittest.cc @@ -4,22 +4,85 @@ // license that can be found in the LICENSE file or at // https://developers.google.com/open-source/licenses/bsd -#include #include +#include +#include +#include -#include "packager/base/files/file_util.h" +#include + +#include "absl/flags/declare.h" #include "packager/file/file.h" +#include "packager/flag_saver.h" -DECLARE_uint64(io_cache_size); -DECLARE_uint64(io_block_size); +ABSL_DECLARE_FLAG(uint64_t, io_cache_size); +ABSL_DECLARE_FLAG(uint64_t, io_block_size); namespace { const int kDataSize = 1024; + +// Write a file with standard C library routines. +void WriteFile(const std::string& path, const std::string& data) { + FILE* f = fopen(path.c_str(), "wb"); + ASSERT_EQ(data.size(), fwrite(data.data(), 1, data.size(), f)); + fclose(f); } -namespace shaka { +void DeleteFile(const std::string& path) { + std::error_code ec; + std::filesystem::remove(path, ec); + // Ignore errors. +} -using base::FilePath; +int64_t FileSize(const std::string& path) { + std::error_code ec; + int64_t file_size = std::filesystem::file_size(path, ec); + if (ec) { + return -1; + } + return file_size; +} + +// Returns num bytes read, up to max_size. +uint64_t ReadFile(const std::string& path, + std::string* data, + uint32_t max_size) { + FILE* f = fopen(path.c_str(), "rb"); + if (!f) { + return 0; + } + + data->resize(max_size); + uint64_t bytes = fread(data->data(), 1, max_size, f); + data->resize(bytes); + return bytes; +} + +std::string generate_unique_temp_path() { + // Generate a unique name for a temporary file, using standard library + // routines, to avoid a circular dependency on any of our own code for + // generating temporary files. The template must end in 6 X's. + std::filesystem::path temp_path_template = + (std::filesystem::temp_directory_path() / "packager-test.XXXXXX"); + std::string temp_path_template_string = temp_path_template.string(); +#if defined(OS_WIN) + // _mktemp will modify the string passed to it to reflect the generated name + // (replacing the X characters with something else). + _mktemp(temp_path_template_string.data()); +#else + // mkstemp will create and open the file, modify the character points to + // reflect the generated name (replacing the X characters with something + // else), and return an open file descriptor. Then we close it and use the + // generated name. + int fd = mkstemp(temp_path_template_string.data()); + close(fd); +#endif + return temp_path_template_string; +} + +} // namespace + +namespace shaka { class LocalFileTest : public testing::Test { protected: @@ -28,26 +91,27 @@ class LocalFileTest : public testing::Test { for (int i = 0; i < kDataSize; ++i) data_[i] = i % 256; - // Test file path for file_util API. - ASSERT_TRUE(base::CreateTemporaryFile(&test_file_path_)); - local_file_name_no_prefix_ = test_file_path_.AsUTF8Unsafe(); + local_file_name_no_prefix_ = generate_unique_temp_path(); // Local file name with prefix for File API. local_file_name_ = kLocalFilePrefix; local_file_name_ += local_file_name_no_prefix_; + + // Use LocalFile directly without ThreadedIoFile. + backup_io_cache_size.reset(new FlagSaver(&FLAGS_io_cache_size)); + absl::SetFlag(&FLAGS_io_cache_size, 0); } void TearDown() override { // Remove test file if created. - base::DeleteFile(FilePath::FromUTF8Unsafe(local_file_name_no_prefix_), - false); + DeleteFile(local_file_name_no_prefix_); } + std::unique_ptr> backup_io_cache_size; + std::string data_; - // Path to the temporary file for this test. - FilePath test_file_path_; - // Same as |test_file_path_| but in string form. + // A path to a temporary test file. std::string local_file_name_no_prefix_; // Same as |local_file_name_no_prefix_| but with the file prefix. @@ -56,38 +120,30 @@ class LocalFileTest : public testing::Test { TEST_F(LocalFileTest, ReadNotExist) { // Remove test file if it exists. - base::DeleteFile(FilePath::FromUTF8Unsafe(local_file_name_no_prefix_), false); + DeleteFile(local_file_name_no_prefix_); ASSERT_TRUE(File::Open(local_file_name_.c_str(), "r") == NULL); } TEST_F(LocalFileTest, Size) { - ASSERT_EQ(kDataSize, - base::WriteFile(test_file_path_, data_.data(), kDataSize)); + WriteFile(local_file_name_no_prefix_, data_); ASSERT_EQ(kDataSize, File::GetFileSize(local_file_name_.c_str())); } TEST_F(LocalFileTest, Copy) { - ASSERT_EQ(kDataSize, - base::WriteFile(test_file_path_, data_.data(), kDataSize)); + WriteFile(local_file_name_no_prefix_, data_); - FilePath temp_dir; - ASSERT_TRUE(base::CreateNewTempDirectory(FilePath::StringType(), &temp_dir)); + std::string destination = generate_unique_temp_path(); + ASSERT_TRUE(File::Copy(local_file_name_.c_str(), destination.c_str())); - // Copy the test file to temp dir as filename "a". - FilePath destination = temp_dir.Append(FilePath::FromUTF8Unsafe("a")); - ASSERT_TRUE(File::Copy( - FilePath::FromUTF8Unsafe(local_file_name_).AsUTF8Unsafe().c_str(), - destination.AsUTF8Unsafe().c_str())); + ASSERT_EQ(kDataSize, FileSize(destination)); - // Make a buffer bigger than the expected file content size to make sure that - // there isn't extra stuff appended. - char copied_file_content_buffer[kDataSize * 2] = {}; - ASSERT_EQ(kDataSize, base::ReadFile(destination, copied_file_content_buffer, - arraysize(copied_file_content_buffer))); + // Try to read twice as much data as expected, to make sure that there isn't + // extra stuff appended. + std::string read_data; + ASSERT_EQ(kDataSize, ReadFile(destination, &read_data, kDataSize * 2)); + ASSERT_EQ(data_, read_data); - ASSERT_EQ(data_, std::string(copied_file_content_buffer, kDataSize)); - - base::DeleteFile(temp_dir, true); + DeleteFile(destination); } TEST_F(LocalFileTest, Write) { @@ -98,19 +154,17 @@ TEST_F(LocalFileTest, Write) { EXPECT_EQ(kDataSize, file->Size()); EXPECT_TRUE(file->Close()); - // Read file using file_util API. - std::string read_data(kDataSize, 0); + std::string read_data; + ASSERT_EQ(kDataSize, FileSize(local_file_name_no_prefix_)); ASSERT_EQ(kDataSize, - base::ReadFile(test_file_path_, &read_data[0], kDataSize)); + ReadFile(local_file_name_no_prefix_, &read_data, kDataSize)); // Compare data written and read. EXPECT_EQ(data_, read_data); } TEST_F(LocalFileTest, Read_And_Eof) { - // Write file using file_util API. - ASSERT_EQ(kDataSize, - base::WriteFile(test_file_path_, data_.data(), kDataSize)); + WriteFile(local_file_name_no_prefix_, data_); // Read file using File API. File* file = File::Open(local_file_name_.c_str(), "r"); @@ -195,9 +249,8 @@ TEST_F(LocalFileTest, WriteFlushCheckSize) { } } -TEST_F(LocalFileTest, IsLocalReguar) { - ASSERT_EQ(kDataSize, - base::WriteFile(test_file_path_, data_.data(), kDataSize)); +TEST_F(LocalFileTest, IsLocalRegular) { + WriteFile(local_file_name_no_prefix_, data_); ASSERT_TRUE(File::IsLocalRegularFile(local_file_name_.c_str())); } @@ -209,9 +262,10 @@ TEST_P(ParamLocalFileTest, SeekWriteAndSeekRead) { const uint32_t kInitialWriteSize(100); const uint32_t kFinalFileSize(200); - google::FlagSaver flag_saver; - FLAGS_io_block_size = kBlockSize; - FLAGS_io_cache_size = GetParam(); + FlagSaver local_backup_io_block_size(&FLAGS_io_block_size); + FlagSaver local_backup_io_cache_size(&FLAGS_io_cache_size); + absl::SetFlag(&FLAGS_io_block_size, kBlockSize); + absl::SetFlag(&FLAGS_io_cache_size, GetParam()); std::vector buffer(kInitialWriteSize); File* file = File::Open(local_file_name_no_prefix_.c_str(), "w"); @@ -223,19 +277,32 @@ TEST_P(ParamLocalFileTest, SeekWriteAndSeekRead) { ASSERT_EQ(kInitialWriteSize, position); for (uint8_t offset = 0; offset < kFinalFileSize; ++offset) { + // Seek to each offset, check that the position matches. EXPECT_TRUE(file->Seek(offset)); ASSERT_TRUE(file->Tell(&position)); EXPECT_EQ(offset, position); + + // Write two bytes of data at this offset (NULs), check that the position + // was advanced by two bytes. EXPECT_EQ(2u, file->Write(buffer.data(), 2u)); ASSERT_TRUE(file->Tell(&position)); EXPECT_EQ(offset + 2u, position); + + // Seek to the byte right after the original offset (the second NUL we + // wrote), check that the position matches. ++offset; EXPECT_TRUE(file->Seek(offset)); ASSERT_TRUE(file->Tell(&position)); EXPECT_EQ(offset, position); + + // Overwrite the byte at this position, with a value matching the current + // offset, check that the position was advanced by one byte. EXPECT_EQ(1, file->Write(&offset, 1)); ASSERT_TRUE(file->Tell(&position)); EXPECT_EQ(offset + 1u, position); + + // The pattern in bytes will be: + // 0x00, 0x01, 0x00, 0x03, 0x00, 0x05, ... } EXPECT_EQ(kFinalFileSize, file->Size()); ASSERT_TRUE(file->Close()); @@ -244,67 +311,33 @@ TEST_P(ParamLocalFileTest, SeekWriteAndSeekRead) { ASSERT_TRUE(file != nullptr); for (uint8_t offset = 1; offset < kFinalFileSize; offset += 2) { uint8_t read_byte; + + // Seek to the odd bytes, which should have values matching their offsets. EXPECT_TRUE(file->Seek(offset)); ASSERT_TRUE(file->Tell(&position)); EXPECT_EQ(offset, position); + + // Read a byte, check that the position was advanced by one byte, and that + // the value matches what we wrote in the loop above (the offset). EXPECT_EQ(1, file->Read(&read_byte, 1)); ASSERT_TRUE(file->Tell(&position)); EXPECT_EQ(offset + 1u, position); EXPECT_EQ(offset, read_byte); } + + // We can't read any more at this position (the end). EXPECT_EQ(0, file->Read(buffer.data(), 1)); + // If we seek back to 0, we can read another byte. ASSERT_TRUE(file->Seek(0)); EXPECT_EQ(1, file->Read(buffer.data(), 1)); + EXPECT_TRUE(file->Close()); } -INSTANTIATE_TEST_CASE_P(TestSeekWithDifferentCacheSizes, - ParamLocalFileTest, - ::testing::Values(20u, 1000u)); - -// This test should only be enabled for filesystems which do not allow seeking -// past EOF. -TEST_F(LocalFileTest, DISABLED_WriteSeekOutOfBounds) { - const uint32_t kFileSize(100); - - std::vector buffer(kFileSize); - File* file = File::Open(local_file_name_no_prefix_.c_str(), "w"); - ASSERT_TRUE(file != nullptr); - ASSERT_EQ(kFileSize, file->Write(buffer.data(), kFileSize)); - ASSERT_EQ(kFileSize, file->Size()); - EXPECT_FALSE(file->Seek(kFileSize + 1)); - EXPECT_TRUE(file->Seek(kFileSize)); - EXPECT_EQ(1, file->Write(buffer.data(), 1)); - EXPECT_TRUE(file->Seek(kFileSize + 1)); - EXPECT_EQ(kFileSize + 1, file->Size()); -} - -// This test should only be enabled for filesystems which do not allow seeking -// past EOF. -TEST_F(LocalFileTest, DISABLED_ReadSeekOutOfBounds) { - const uint32_t kFileSize(100); - - File::Delete(local_file_name_no_prefix_.c_str()); - std::vector buffer(kFileSize); - File* file = File::Open(local_file_name_no_prefix_.c_str(), "w"); - ASSERT_TRUE(file != nullptr); - ASSERT_EQ(kFileSize, file->Write(buffer.data(), kFileSize)); - ASSERT_EQ(kFileSize, file->Size()); - ASSERT_TRUE(file->Close()); - file = File::Open(local_file_name_no_prefix_.c_str(), "r"); - ASSERT_TRUE(file != nullptr); - EXPECT_FALSE(file->Seek(kFileSize + 1)); - EXPECT_TRUE(file->Seek(kFileSize)); - uint64_t position; - EXPECT_TRUE(file->Tell(&position)); - EXPECT_EQ(kFileSize, position); - EXPECT_EQ(0u, file->Read(buffer.data(), 1)); - EXPECT_TRUE(file->Seek(0)); - EXPECT_TRUE(file->Tell(&position)); - EXPECT_EQ(0u, position); - EXPECT_EQ(kFileSize, file->Read(buffer.data(), kFileSize)); - EXPECT_EQ(0u, file->Read(buffer.data(), 1)); - EXPECT_TRUE(file->Close()); -} +INSTANTIATE_TEST_SUITE_P(TestSeekWithDifferentCacheSizes, + ParamLocalFileTest, + // 0 disables cache, 20 is small, 61 is prime, and 1000 + // is just under the data size of 1k. + ::testing::Values(0u, 20u, 61u, 1000u)); TEST(FileTest, MakeCallbackFileName) { const BufferCallbackParams* params = diff --git a/packager/file/file_util.cc b/packager/file/file_util.cc index 9f8304d3e5..3f363ed5d6 100644 --- a/packager/file/file_util.cc +++ b/packager/file/file_util.cc @@ -8,47 +8,43 @@ #include -#include "packager/base/files/file_path.h" -#include "packager/base/files/file_util.h" -#include "packager/base/process/process_handle.h" -#include "packager/base/strings/stringprintf.h" -#include "packager/base/threading/platform_thread.h" -#include "packager/base/time/time.h" +#if defined(OS_WIN) +#include +#else +#include +#endif + +#include +#include + +#include "absl/strings/str_format.h" namespace shaka { namespace { // Create a temp file name using process id, thread id and current time. std::string TempFileName() { - const int32_t process_id = static_cast(base::GetCurrentProcId()); - const int32_t thread_id = - static_cast(base::PlatformThread::CurrentId()); +#if defined(OS_WIN) + const uint32_t process_id = static_cast(GetCurrentProcessId()); +#else + const uint32_t process_id = static_cast(getpid()); +#endif + const size_t thread_id = + std::hash{}(std::this_thread::get_id()); // We may need two or more temporary files in the same thread. There might be // name collision if they are requested around the same time, e.g. called // consecutively. Use a thread_local instance to avoid that. - static thread_local int32_t instance_id = 0; + static thread_local uint32_t instance_id = 0; ++instance_id; - const int64_t current_time = base::Time::Now().ToInternalValue(); - return base::StringPrintf("packager-tempfile-%x-%x-%x-%" PRIx64, process_id, - thread_id, instance_id, current_time); + return absl::StrFormat("packager-tempfile-%x-%zx-%x", process_id, thread_id, + instance_id); } } // namespace bool TempFilePath(const std::string& temp_dir, std::string* temp_file_path) { - if (temp_dir.empty()) { - base::FilePath file_path; - if (!base::CreateTemporaryFile(&file_path)) { - LOG(ERROR) << "Failed to create temporary file."; - return false; - } - *temp_file_path = file_path.AsUTF8Unsafe(); - } else { - *temp_file_path = - base::FilePath::FromUTF8Unsafe(temp_dir) - .Append(base::FilePath::FromUTF8Unsafe(TempFileName())) - .AsUTF8Unsafe(); - } + std::filesystem::path temp_dir_path(temp_dir); + *temp_file_path = (temp_dir_path / TempFileName()).string(); return true; } diff --git a/packager/file/file_util_unittest.cc b/packager/file/file_util_unittest.cc index f6b76faf98..e1a7d006a5 100644 --- a/packager/file/file_util_unittest.cc +++ b/packager/file/file_util_unittest.cc @@ -8,7 +8,7 @@ #include -#include "packager/base/logging.h" +#include "glog/logging.h" namespace shaka { diff --git a/packager/file/http_file.cc b/packager/file/http_file.cc index 404ef6fb97..4cb065947b 100644 --- a/packager/file/http_file.cc +++ b/packager/file/http_file.cc @@ -7,36 +7,45 @@ #include "packager/file/http_file.h" #include -#include -#include "packager/base/bind.h" -#include "packager/base/files/file_util.h" -#include "packager/base/logging.h" -#include "packager/base/strings/string_number_conversions.h" -#include "packager/base/strings/stringprintf.h" -#include "packager/base/threading/worker_pool.h" +#include "absl/flags/declare.h" +#include "absl/flags/flag.h" +#include "absl/strings/escaping.h" +#include "absl/strings/str_format.h" +#include "glog/logging.h" + +#include "packager/common.h" +#include "packager/file/thread_pool.h" #include "packager/version/version.h" -DEFINE_string(user_agent, "", - "Set a custom User-Agent string for HTTP requests."); -DEFINE_string(ca_file, - "", - "Absolute path to the Certificate Authority file for the " - "server cert. PEM format"); -DEFINE_string(client_cert_file, - "", - "Absolute path to client certificate file."); -DEFINE_string(client_cert_private_key_file, - "", - "Absolute path to the Private Key file."); -DEFINE_string(client_cert_private_key_password, - "", - "Password to the private key file."); -DEFINE_bool(disable_peer_verification, - false, - "Disable peer verification. This is needed to talk to servers " - "without valid certificates."); -DECLARE_uint64(io_cache_size); +ABSL_FLAG(std::string, + user_agent, + "", + "Set a custom User-Agent string for HTTP requests."); +ABSL_FLAG(std::string, + ca_file, + "", + "Absolute path to the Certificate Authority file for the " + "server cert. PEM format"); +ABSL_FLAG(std::string, + client_cert_file, + "", + "Absolute path to client certificate file."); +ABSL_FLAG(std::string, + client_cert_private_key_file, + "", + "Absolute path to the Private Key file."); +ABSL_FLAG(std::string, + client_cert_private_key_password, + "", + "Password to the private key file."); +ABSL_FLAG(bool, + disable_peer_verification, + false, + "Disable peer verification. This is needed to talk to servers " + "without valid certificates."); + +ABSL_DECLARE_FLAG(uint64_t, io_cache_size); namespace shaka { @@ -114,11 +123,12 @@ int CurlDebugCallback(CURL* /* handle */, return 0; } + const std::string data_string(data, size); VLOG(log_level) << "\n\n" << type_text << " (0x" << std::hex << size << std::dec << " bytes)\n" - << (in_hex ? base::HexEncode(data, size) - : std::string(data, size)); + << (in_hex ? absl::BytesToHexString(data_string) + : data_string); return 0; } @@ -163,13 +173,17 @@ HttpFile::HttpFile(HttpMethod method, upload_content_type_(upload_content_type), timeout_in_seconds_(timeout_in_seconds), method_(method), - download_cache_(FLAGS_io_cache_size), - upload_cache_(FLAGS_io_cache_size), + download_cache_(absl::GetFlag(FLAGS_io_cache_size)), + upload_cache_(absl::GetFlag(FLAGS_io_cache_size)), curl_(curl_easy_init()), status_(Status::OK), - user_agent_(FLAGS_user_agent), - task_exit_event_(base::WaitableEvent::ResetPolicy::MANUAL, - base::WaitableEvent::InitialState::NOT_SIGNALED) { + user_agent_(absl::GetFlag(FLAGS_user_agent)), + ca_file_(absl::GetFlag(FLAGS_ca_file)), + client_cert_file_(absl::GetFlag(FLAGS_client_cert_file)), + client_cert_private_key_file_( + absl::GetFlag(FLAGS_client_cert_private_key_file)), + client_cert_private_key_password_( + absl::GetFlag(FLAGS_client_cert_private_key_password)) { static LibCurlInitializer lib_curl_initializer; if (user_agent_.empty()) { user_agent_ += "ShakaPackager/" + GetPackagerVersion(); @@ -212,9 +226,7 @@ bool HttpFile::Open() { // TODO: Implement retrying with exponential backoff, see // "widevine_key_source.cc" - base::WorkerPool::PostTask( - FROM_HERE, base::Bind(&HttpFile::ThreadMain, base::Unretained(this)), - /* task_is_slow= */ true); + ThreadPool::instance.PostTask(std::bind(&HttpFile::ThreadMain, this)); return true; } @@ -225,7 +237,7 @@ Status HttpFile::CloseWithStatus() { // will wait for more data forever. download_cache_.Close(); upload_cache_.Close(); - task_exit_event_.Wait(); + task_exit_event_.WaitForNotification(); const Status result = status_; LOG_IF(ERROR, !result.ok()) << "HttpFile request failed: " << result; @@ -258,11 +270,13 @@ bool HttpFile::Flush() { } bool HttpFile::Seek(uint64_t position) { + UNUSED(position); LOG(ERROR) << "HttpFile does not support Seek()."; return false; } bool HttpFile::Tell(uint64_t* position) { + UNUSED(position); LOG(ERROR) << "HttpFile does not support Tell()."; return false; } @@ -304,25 +318,24 @@ void HttpFile::SetupRequest() { curl_easy_setopt(curl, CURLOPT_HTTPHEADER, request_headers_.get()); - if (FLAGS_disable_peer_verification) + if (absl::GetFlag(FLAGS_disable_peer_verification)) curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); // Client authentication - if (!FLAGS_client_cert_private_key_file.empty() && - !FLAGS_client_cert_file.empty()) { + if (!client_cert_private_key_file_.empty() && !client_cert_file_.empty()) { curl_easy_setopt(curl, CURLOPT_SSLKEY, - FLAGS_client_cert_private_key_file.data()); - curl_easy_setopt(curl, CURLOPT_SSLCERT, FLAGS_client_cert_file.data()); + client_cert_private_key_file_.data()); + curl_easy_setopt(curl, CURLOPT_SSLCERT, client_cert_file_.data()); curl_easy_setopt(curl, CURLOPT_SSLKEYTYPE, "PEM"); curl_easy_setopt(curl, CURLOPT_SSLCERTTYPE, "PEM"); - if (!FLAGS_client_cert_private_key_password.empty()) { + if (!client_cert_private_key_password_.empty()) { curl_easy_setopt(curl, CURLOPT_KEYPASSWD, - FLAGS_client_cert_private_key_password.data()); + client_cert_private_key_password_.data()); } } - if (!FLAGS_ca_file.empty()) { - curl_easy_setopt(curl, CURLOPT_CAINFO, FLAGS_ca_file.data()); + if (!ca_file_.empty()) { + curl_easy_setopt(curl, CURLOPT_CAINFO, ca_file_.data()); } if (VLOG_IS_ON(kMinLogLevelForCurlDebugFunction)) { @@ -340,8 +353,7 @@ void HttpFile::ThreadMain() { if (res == CURLE_HTTP_RETURNED_ERROR) { long response_code = 0; curl_easy_getinfo(curl_.get(), CURLINFO_RESPONSE_CODE, &response_code); - error_message += - base::StringPrintf(", response code: %ld.", response_code); + error_message += absl::StrFormat(", response code: %ld.", response_code); } status_ = Status( @@ -350,7 +362,7 @@ void HttpFile::ThreadMain() { } download_cache_.Close(); - task_exit_event_.Signal(); + task_exit_event_.Notify(); } } // namespace shaka diff --git a/packager/file/http_file.h b/packager/file/http_file.h index e86131c0b0..929d511aea 100644 --- a/packager/file/http_file.h +++ b/packager/file/http_file.h @@ -10,10 +10,10 @@ #include #include -#include "packager/base/synchronization/waitable_event.h" +#include "absl/synchronization/notification.h" + #include "packager/file/file.h" #include "packager/file/io_cache.h" -#include "packager/status.h" typedef void CURL; struct curl_slist; @@ -84,9 +84,13 @@ class HttpFile : public File { std::unique_ptr request_headers_; Status status_; std::string user_agent_; + std::string ca_file_; + std::string client_cert_file_; + std::string client_cert_private_key_file_; + std::string client_cert_private_key_password_; // Signaled when the "curl easy perform" task completes. - base::WaitableEvent task_exit_event_; + absl::Notification task_exit_event_; }; } // namespace shaka diff --git a/packager/file/http_file_unittest.cc b/packager/file/http_file_unittest.cc index a3aec0c937..4865996309 100644 --- a/packager/file/http_file_unittest.cc +++ b/packager/file/http_file_unittest.cc @@ -11,17 +11,13 @@ #include #include -#include "packager/base/json/json_reader.h" -#include "packager/base/values.h" +#include "absl/strings/str_split.h" +#include "nlohmann/json.hpp" #include "packager/file/file.h" #include "packager/file/file_closer.h" -#define ASSERT_JSON_STRING(json, key, value) \ - do { \ - std::string actual; \ - ASSERT_TRUE((json)->GetString((key), &actual)); \ - ASSERT_EQ(actual, (value)); \ - } while (false) +#define ASSERT_JSON_STRING(json, key, value) \ + ASSERT_EQ(GetJsonString((json), (key)), (value)) << "JSON is " << (json) namespace shaka { @@ -29,7 +25,27 @@ namespace { using FilePtr = std::unique_ptr; -std::unique_ptr HandleResponse(const FilePtr& file) { +// Handles keys with dots, indicating a nested field. +std::string GetJsonString(const nlohmann::json& json, + const std::string& combined_key) { + std::vector keys = absl::StrSplit(combined_key, '.'); + nlohmann::json current = json; + + for (const std::string& key : keys) { + if (!current.contains(key)) { + return ""; + } + current = current[key]; + } + + if (current.is_string()) { + return current.get(); + } + + return ""; +} + +nlohmann::json HandleResponse(const FilePtr& file) { std::string result; while (true) { char buffer[64 * 1024]; @@ -42,27 +58,26 @@ std::unique_ptr HandleResponse(const FilePtr& file) { } VLOG(1) << "Response:\n" << result; - auto value = base::JSONReader::Read(result); - if (!value || !value->IsType(base::Value::TYPE_DICTIONARY)) - return nullptr; - return std::unique_ptr{ - static_cast(value.release())}; + nlohmann::json value = nlohmann::json::parse(result, + /* parser callback */ nullptr, + /* allow exceptions */ false); + return value; } } // namespace -TEST(HttpFileTest, DISABLED_BasicGet) { +TEST(HttpFileTest, BasicGet) { FilePtr file(new HttpFile(HttpMethod::kGet, "https://httpbin.org/anything")); ASSERT_TRUE(file); ASSERT_TRUE(file->Open()); auto json = HandleResponse(file); - ASSERT_TRUE(json); + ASSERT_TRUE(json.is_object()); ASSERT_TRUE(file.release()->Close()); ASSERT_JSON_STRING(json, "method", "GET"); } -TEST(HttpFileTest, DISABLED_CustomHeaders) { +TEST(HttpFileTest, CustomHeaders) { std::vector headers{"Host: foo", "X-My-Header: Something"}; FilePtr file(new HttpFile(HttpMethod::kGet, "https://httpbin.org/anything", "", headers, 0)); @@ -70,7 +85,7 @@ TEST(HttpFileTest, DISABLED_CustomHeaders) { ASSERT_TRUE(file->Open()); auto json = HandleResponse(file); - ASSERT_TRUE(json); + ASSERT_TRUE(json.is_object()); ASSERT_TRUE(file.release()->Close()); ASSERT_JSON_STRING(json, "method", "GET"); @@ -78,7 +93,7 @@ TEST(HttpFileTest, DISABLED_CustomHeaders) { ASSERT_JSON_STRING(json, "headers.X-My-Header", "Something"); } -TEST(HttpFileTest, DISABLED_BasicPost) { +TEST(HttpFileTest, BasicPost) { FilePtr file(new HttpFile(HttpMethod::kPost, "https://httpbin.org/anything")); ASSERT_TRUE(file); ASSERT_TRUE(file->Open()); @@ -90,17 +105,25 @@ TEST(HttpFileTest, DISABLED_BasicPost) { ASSERT_TRUE(file->Flush()); auto json = HandleResponse(file); - ASSERT_TRUE(json); + ASSERT_TRUE(json.is_object()); ASSERT_TRUE(file.release()->Close()); ASSERT_JSON_STRING(json, "method", "POST"); ASSERT_JSON_STRING(json, "data", data); ASSERT_JSON_STRING(json, "headers.Content-Type", "application/octet-stream"); - ASSERT_JSON_STRING(json, "headers.Content-Length", - std::to_string(data.size())); + + // Curl may choose to send chunked or not based on the data. We request + // chunked encoding, but don't control if it is actually used. If we get + // chunked transfer, there is no Content-Length header reflected back to us. + if (!GetJsonString(json, "headers.Content-Length").empty()) { + ASSERT_JSON_STRING(json, "headers.Content-Length", + std::to_string(data.size())); + } else { + ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked"); + } } -TEST(HttpFileTest, DISABLED_BasicPut) { +TEST(HttpFileTest, BasicPut) { FilePtr file(new HttpFile(HttpMethod::kPut, "https://httpbin.org/anything")); ASSERT_TRUE(file); ASSERT_TRUE(file->Open()); @@ -112,17 +135,25 @@ TEST(HttpFileTest, DISABLED_BasicPut) { ASSERT_TRUE(file->Flush()); auto json = HandleResponse(file); - ASSERT_TRUE(json); + ASSERT_TRUE(json.is_object()); ASSERT_TRUE(file.release()->Close()); ASSERT_JSON_STRING(json, "method", "PUT"); ASSERT_JSON_STRING(json, "data", data); ASSERT_JSON_STRING(json, "headers.Content-Type", "application/octet-stream"); - ASSERT_JSON_STRING(json, "headers.Content-Length", - std::to_string(data.size())); + + // Curl may choose to send chunked or not based on the data. We request + // chunked encoding, but don't control if it is actually used. If we get + // chunked transfer, there is no Content-Length header reflected back to us. + if (!GetJsonString(json, "headers.Content-Length").empty()) { + ASSERT_JSON_STRING(json, "headers.Content-Length", + std::to_string(data.size())); + } else { + ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked"); + } } -TEST(HttpFileTest, DISABLED_MultipleWrites) { +TEST(HttpFileTest, MultipleWrites) { FilePtr file(new HttpFile(HttpMethod::kPut, "https://httpbin.org/anything")); ASSERT_TRUE(file); ASSERT_TRUE(file->Open()); @@ -143,22 +174,28 @@ TEST(HttpFileTest, DISABLED_MultipleWrites) { ASSERT_TRUE(file->Flush()); auto json = HandleResponse(file); - ASSERT_TRUE(json); + ASSERT_TRUE(json.is_object()); ASSERT_TRUE(file.release()->Close()); ASSERT_JSON_STRING(json, "method", "PUT"); ASSERT_JSON_STRING(json, "data", data1 + data2 + data3 + data4); ASSERT_JSON_STRING(json, "headers.Content-Type", "application/octet-stream"); - ASSERT_JSON_STRING(json, "headers.Content-Length", - std::to_string(data1.size() + data2.size() + data3.size() + - data4.size())); + + // Curl may choose to send chunked or not based on the data. We request + // chunked encoding, but don't control if it is actually used. If we get + // chunked transfer, there is no Content-Length header reflected back to us. + if (!GetJsonString(json, "headers.Content-Length").empty()) { + auto totalSize = data1.size() + data2.size() + data3.size() + data4.size(); + ASSERT_JSON_STRING(json, "headers.Content-Length", + std::to_string(totalSize)); + } else { + ASSERT_JSON_STRING(json, "headers.Transfer-Encoding", "chunked"); + } } -// TODO: Test chunked uploads. Since we can only read the response, we have no -// way to detect if we are streaming the upload like we want. httpbin seems to -// populate the Content-Length even if we don't give it in the request. +// TODO: Test chunked uploads explicitly. -TEST(HttpFileTest, DISABLED_Error404) { +TEST(HttpFileTest, Error404) { FilePtr file( new HttpFile(HttpMethod::kGet, "https://httpbin.org/status/404")); ASSERT_TRUE(file); @@ -173,7 +210,7 @@ TEST(HttpFileTest, DISABLED_Error404) { ASSERT_EQ(status.error_code(), error::HTTP_FAILURE); } -TEST(HttpFileTest, DISABLED_TimeoutTriggered) { +TEST(HttpFileTest, TimeoutTriggered) { FilePtr file( new HttpFile(HttpMethod::kGet, "https://httpbin.org/delay/8", "", {}, 1)); ASSERT_TRUE(file); @@ -188,14 +225,14 @@ TEST(HttpFileTest, DISABLED_TimeoutTriggered) { ASSERT_EQ(status.error_code(), error::TIME_OUT); } -TEST(HttpFileTest, DISABLED_TimeoutNotTriggered) { +TEST(HttpFileTest, TimeoutNotTriggered) { FilePtr file( new HttpFile(HttpMethod::kGet, "https://httpbin.org/delay/1", "", {}, 5)); ASSERT_TRUE(file); ASSERT_TRUE(file->Open()); auto json = HandleResponse(file); - ASSERT_TRUE(json); + ASSERT_TRUE(json.is_object()); ASSERT_TRUE(file.release()->Close()); } diff --git a/packager/file/io_cache.cc b/packager/file/io_cache.cc index b2b1f866b0..c08b2ae108 100644 --- a/packager/file/io_cache.cc +++ b/packager/file/io_cache.cc @@ -10,19 +10,12 @@ #include -#include "packager/base/logging.h" +#include "glog/logging.h" namespace shaka { -using base::AutoLock; -using base::AutoUnlock; - IoCache::IoCache(uint64_t cache_size) : cache_size_(cache_size), - read_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC, - base::WaitableEvent::InitialState::NOT_SIGNALED), - write_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC, - base::WaitableEvent::InitialState::NOT_SIGNALED), // Make the buffer one byte larger than the cache so that when the // condition r_ptr == w_ptr is unambiguous (buffer empty). circular_buffer_(cache_size + 1), @@ -38,10 +31,9 @@ IoCache::~IoCache() { uint64_t IoCache::Read(void* buffer, uint64_t size) { DCHECK(buffer); - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); while (!closed_ && (BytesCachedInternal() == 0)) { - AutoUnlock unlock(lock_); - write_event_.Wait(); + write_event_.Wait(&mutex_); } size = std::min(size, BytesCachedInternal()); @@ -69,13 +61,12 @@ uint64_t IoCache::Write(const void* buffer, uint64_t size) { const uint8_t* r_ptr(static_cast(buffer)); uint64_t bytes_left(size); while (bytes_left) { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); while (!closed_ && (BytesFreeInternal() == 0)) { - AutoUnlock unlock(lock_); VLOG(1) << "Circular buffer is full, which can happen if data arrives " "faster than being consumed by packager. Ignore if it is not " "live packaging. Otherwise, try increasing --io_cache_size."; - read_event_.Wait(); + read_event_.Wait(&mutex_); } if (closed_) return 0; @@ -103,35 +94,33 @@ uint64_t IoCache::Write(const void* buffer, uint64_t size) { } void IoCache::Clear() { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); r_ptr_ = w_ptr_ = circular_buffer_.data(); // Let any writers know that there is room in the cache. read_event_.Signal(); } void IoCache::Close() { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); closed_ = true; read_event_.Signal(); write_event_.Signal(); } void IoCache::Reopen() { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); CHECK(closed_); r_ptr_ = w_ptr_ = circular_buffer_.data(); closed_ = false; - read_event_.Reset(); - write_event_.Reset(); } uint64_t IoCache::BytesCached() { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); return BytesCachedInternal(); } uint64_t IoCache::BytesFree() { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); return BytesFreeInternal(); } @@ -146,10 +135,9 @@ uint64_t IoCache::BytesFreeInternal() { } void IoCache::WaitUntilEmptyOrClosed() { - AutoLock lock(lock_); + absl::MutexLock lock(&mutex_); while (!closed_ && BytesCachedInternal()) { - AutoUnlock unlock(lock_); - read_event_.Wait(); + read_event_.Wait(&mutex_); } } diff --git a/packager/file/io_cache.h b/packager/file/io_cache.h index d986a6f13a..b7c93eb307 100644 --- a/packager/file/io_cache.h +++ b/packager/file/io_cache.h @@ -8,10 +8,11 @@ #define PACKAGER_FILE_IO_CACHE_H_ #include + #include -#include "packager/base/macros.h" -#include "packager/base/synchronization/lock.h" -#include "packager/base/synchronization/waitable_event.h" + +#include "absl/synchronization/mutex.h" +#include "packager/common.h" namespace shaka { @@ -67,14 +68,14 @@ class IoCache { uint64_t BytesFreeInternal(); const uint64_t cache_size_; - base::Lock lock_; - base::WaitableEvent read_event_; - base::WaitableEvent write_event_; - std::vector circular_buffer_; - const uint8_t* end_ptr_; - uint8_t* r_ptr_; - uint8_t* w_ptr_; - bool closed_; + absl::Mutex mutex_; + absl::CondVar read_event_ GUARDED_BY(mutex_); + absl::CondVar write_event_ GUARDED_BY(mutex_); + std::vector circular_buffer_ GUARDED_BY(mutex_); + const uint8_t* end_ptr_ GUARDED_BY(mutex_); + uint8_t* r_ptr_ GUARDED_BY(mutex_); + uint8_t* w_ptr_ GUARDED_BY(mutex_); + bool closed_ GUARDED_BY(mutex_); DISALLOW_COPY_AND_ASSIGN(IoCache); }; diff --git a/packager/file/io_cache_unittest.cc b/packager/file/io_cache_unittest.cc index db1f7d9157..0dcab2bf7b 100644 --- a/packager/file/io_cache_unittest.cc +++ b/packager/file/io_cache_unittest.cc @@ -5,12 +5,13 @@ // https://developers.google.com/open-source/licenses/bsd #include "packager/file/io_cache.h" + #include #include + #include -#include "packager/base/bind.h" -#include "packager/base/bind_helpers.h" -#include "packager/base/threading/simple_thread.h" +#include +#include namespace { const uint64_t kBlockSize = 256; @@ -19,27 +20,11 @@ const uint64_t kCacheSize = 16 * kBlockSize; namespace shaka { -class ClosureThread : public base::SimpleThread { - public: - ClosureThread(const std::string& name_prefix, const base::Closure& task) - : base::SimpleThread(name_prefix), task_(task) {} - - ~ClosureThread() { - if (HasBeenStarted() && !HasBeenJoined()) - Join(); - } - - void Run() { task_.Run(); } - - private: - const base::Closure task_; -}; - class IoCacheTest : public testing::Test { public: void WriteToCache(const std::vector& test_buffer, uint64_t num_writes, - int sleep_between_writes, + int sleep_between_writes_ms, bool close_when_done) { for (uint64_t write_idx = 0; write_idx < num_writes; ++write_idx) { uint64_t write_result = @@ -50,9 +35,9 @@ class IoCacheTest : public testing::Test { break; } EXPECT_EQ(test_buffer.size(), write_result); - if (sleep_between_writes) { - base::PlatformThread::Sleep( - base::TimeDelta::FromMilliseconds(sleep_between_writes)); + if (sleep_between_writes_ms) { + std::this_thread::sleep_for( + std::chrono::milliseconds(sleep_between_writes_ms)); } } if (close_when_done) @@ -62,7 +47,7 @@ class IoCacheTest : public testing::Test { protected: void SetUp() override { for (unsigned int idx = 0; idx < kBlockSize; ++idx) - reference_block_[idx] = idx; + reference_block_[idx] = idx & 0xff; cache_.reset(new IoCache(kCacheSize)); cache_closed_ = false; } @@ -82,25 +67,22 @@ class IoCacheTest : public testing::Test { void WriteToCacheThreaded(const std::vector& test_buffer, uint64_t num_writes, - int sleep_between_writes, + int sleep_between_writes_ms, bool close_when_done) { - writer_thread_.reset(new ClosureThread( - "WriterThread", - base::Bind(&IoCacheTest::WriteToCache, base::Unretained(this), - test_buffer, num_writes, sleep_between_writes, - close_when_done))); - writer_thread_->Start(); + writer_thread_.reset(new std::thread( + std::bind(&IoCacheTest::WriteToCache, this, test_buffer, num_writes, + sleep_between_writes_ms, close_when_done))); } void WaitForWriterThread() { if (writer_thread_) { - writer_thread_->Join(); + writer_thread_->join(); writer_thread_.reset(); } } std::unique_ptr cache_; - std::unique_ptr writer_thread_; + std::unique_ptr writer_thread_; uint8_t reference_block_[kBlockSize]; bool cache_closed_; }; @@ -186,8 +168,7 @@ TEST_F(IoCacheTest, SlowRead) { std::vector read_buffer(kBlockSize); EXPECT_EQ(kBlockSize, cache_->Read(read_buffer.data(), kBlockSize)); EXPECT_EQ(write_buffer, read_buffer); - base::PlatformThread::Sleep( - base::TimeDelta::FromMilliseconds(kReadDelayMs)); + std::this_thread::sleep_for(std::chrono::milliseconds(kReadDelayMs)); } } @@ -198,7 +179,7 @@ TEST_F(IoCacheTest, CloseByReader) { GenerateTestBuffer(kBlockSize, &write_buffer); WriteToCacheThreaded(write_buffer, kNumWrites, 0, false); while (cache_->BytesCached() < kCacheSize) { - base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10)); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); } cache_->Close(); WaitForWriterThread(); @@ -264,7 +245,7 @@ TEST_F(IoCacheTest, LargeRead) { write_buffer.end()); } while (cache_->BytesCached() < kCacheSize) { - base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10)); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); } std::vector read_buffer(kCacheSize); EXPECT_EQ(kCacheSize, cache_->Read(read_buffer.data(), kCacheSize)); diff --git a/packager/file/local_file.cc b/packager/file/local_file.cc index 4120df1812..a0a24ff834 100644 --- a/packager/file/local_file.cc +++ b/packager/file/local_file.cc @@ -7,90 +7,18 @@ #include "packager/file/local_file.h" #include + #if defined(OS_WIN) #include #else #include #endif // defined(OS_WIN) -#include "packager/base/files/file_path.h" -#include "packager/base/files/file_util.h" -#include "packager/base/logging.h" + +#include + +#include "glog/logging.h" namespace shaka { -namespace { - -// Check if the directory |path| exists. Returns false if it does not exist or -// it is not a directory. On non-Windows, |mode| will be filled with the file -// permission bits on success. -bool DirectoryExists(const base::FilePath& path, int* mode) { -#if defined(OS_WIN) - DWORD fileattr = GetFileAttributes(path.value().c_str()); - if (fileattr != INVALID_FILE_ATTRIBUTES) - return (fileattr & FILE_ATTRIBUTE_DIRECTORY) != 0; -#else - struct stat info; - if (stat(path.value().c_str(), &info) != 0) - return false; - if (S_ISDIR(info.st_mode)) { - const int FILE_PERMISSION_MASK = S_IRWXU | S_IRWXG | S_IRWXO; - if (mode) - *mode = info.st_mode & FILE_PERMISSION_MASK; - return true; - } -#endif - return false; -} - -// Create all the inexistent directories in the path. Returns true on success or -// if the directory already exists. -bool CreateDirectory(const base::FilePath& full_path) { - std::vector subpaths; - - // Collect a list of all parent directories. - base::FilePath last_path = full_path; - subpaths.push_back(full_path); - for (base::FilePath path = full_path.DirName(); - path.value() != last_path.value(); path = path.DirName()) { - subpaths.push_back(path); - last_path = path; - } - - // For non-Windows only. File permission for the new directories. - // The file permission will be inherited from the last existing directory in - // the file path. If none of the directory exists in the path, it is set to - // 0755 by default. - int mode = 0755; - - // Iterate through the parents and create the missing ones. - for (auto i = subpaths.rbegin(); i != subpaths.rend(); ++i) { - if (DirectoryExists(*i, &mode)) { - continue; - } -#if defined(OS_WIN) - if (::CreateDirectory(i->value().c_str(), nullptr)) { - continue; - } -#else - if (mkdir(i->value().c_str(), mode) == 0) { - continue; - } -#endif - - // Mkdir failed, but it might have failed with EEXIST, or some other error - // due to the the directory appearing out of thin air. This can occur if - // two processes are trying to create the same file system tree at the same - // time. Check to see if it exists and make sure it is a directory. - const auto saved_error_code = ::logging::GetLastSystemErrorCode(); - if (!DirectoryExists(*i, nullptr)) { - LOG(ERROR) << "Failed to create directory " << i->value().c_str() - << " ErrorCode " << saved_error_code; - return false; - } - } - return true; -} - -} // namespace // Always open files in binary mode. const char kAdditionalFileMode[] = "b"; @@ -104,7 +32,7 @@ LocalFile::LocalFile(const char* file_name, const char* mode) bool LocalFile::Close() { bool result = true; if (internal_file_) { - result = base::CloseFile(internal_file_); + result = fclose(internal_file_) == 0; internal_file_ = NULL; } delete this; @@ -144,10 +72,10 @@ int64_t LocalFile::Size() { return -1; } - int64_t file_size; - if (!base::GetFileSize(base::FilePath::FromUTF8Unsafe(file_name()), - &file_size)) { - LOG(ERROR) << "Cannot get file size."; + std::error_code ec; + int64_t file_size = std::filesystem::file_size(file_name(), ec); + if (ec) { + LOG(ERROR) << "Cannot get file size, error: " << ec; return -1; } return file_size; @@ -182,22 +110,30 @@ bool LocalFile::Tell(uint64_t* position) { LocalFile::~LocalFile() {} bool LocalFile::Open() { - base::FilePath file_path(base::FilePath::FromUTF8Unsafe(file_name())); + std::filesystem::path file_path(file_name()); // Create upper level directories for write mode. if (file_mode_.find("w") != std::string::npos) { - // The function returns true if the directories already exist. - if (!shaka::CreateDirectory(file_path.DirName())) { - return false; + // From the return value of filesystem::create_directories, you can't tell + // the difference between pre-existing directories and failure. So check + // first if it needs to be created. + auto parent_path = file_path.parent_path(); + std::error_code ec; + if (!std::filesystem::is_directory(parent_path, ec)) { + if (!std::filesystem::create_directories(parent_path, ec)) { + return false; + } } } - internal_file_ = base::OpenFile(file_path, file_mode_.c_str()); + internal_file_ = fopen(file_path.u8string().c_str(), file_mode_.c_str()); return (internal_file_ != NULL); } bool LocalFile::Delete(const char* file_name) { - return base::DeleteFile(base::FilePath::FromUTF8Unsafe(file_name), false); + std::error_code ec; + // On error (ec truthy), remove() will return false anyway. + return std::filesystem::remove(file_name, ec); } } // namespace shaka diff --git a/packager/file/local_file.h b/packager/file/local_file.h index ecc3c0b62c..89719a73ce 100644 --- a/packager/file/local_file.h +++ b/packager/file/local_file.h @@ -11,7 +11,6 @@ #include -#include "packager/base/compiler_specific.h" #include "packager/file/file.h" namespace shaka { diff --git a/packager/file/memory_file.cc b/packager/file/memory_file.cc index 415a738723..e14d230505 100644 --- a/packager/file/memory_file.cc +++ b/packager/file/memory_file.cc @@ -13,8 +13,8 @@ #include #include -#include "packager/base/logging.h" -#include "packager/base/synchronization/lock.h" +#include "absl/synchronization/mutex.h" +#include "glog/logging.h" namespace shaka { namespace { @@ -30,7 +30,7 @@ class FileSystem { } void Delete(const std::string& file_name) { - base::AutoLock auto_lock(lock_); + absl::MutexLock auto_lock(&mutex_); if (open_files_.find(file_name) != open_files_.end()) { LOG(ERROR) << "File '" << file_name @@ -43,7 +43,7 @@ class FileSystem { } void DeleteAll() { - base::AutoLock auto_lock(lock_); + absl::MutexLock auto_lock(&mutex_); if (!open_files_.empty()) { LOG(ERROR) << "There are still files open. Deleting an open MemoryFile " "is not allowed. Exit without deleting the file."; @@ -54,12 +54,12 @@ class FileSystem { std::vector* Open(const std::string& file_name, const std::string& mode) { - base::AutoLock auto_lock(lock_); + absl::MutexLock auto_lock(&mutex_); if (open_files_.find(file_name) != open_files_.end()) { NOTIMPLEMENTED() << "File '" << file_name << "' is already open. MemoryFile does not support " - "open the same file before it is closed."; + "opening the same file before it is closed."; return nullptr; } @@ -81,7 +81,7 @@ class FileSystem { } bool Close(const std::string& file_name) { - base::AutoLock auto_lock(lock_); + absl::MutexLock auto_lock(&mutex_); auto iter = open_files_.find(file_name); if (iter == open_files_.end()) { @@ -101,11 +101,11 @@ class FileSystem { FileSystem() = default; // Filename to file data map. - std::map> files_; + std::map> files_ GUARDED_BY(mutex_); // Filename to file open modes map. - std::map open_files_; + std::map open_files_ GUARDED_BY(mutex_); - base::Lock lock_; + absl::Mutex mutex_; }; } // namespace diff --git a/packager/file/memory_file_unittest.cc b/packager/file/memory_file_unittest.cc index f9813dd352..2001a13892 100644 --- a/packager/file/memory_file_unittest.cc +++ b/packager/file/memory_file_unittest.cc @@ -3,8 +3,10 @@ // found in the LICENSE file. #include "packager/file/memory_file.h" + #include #include + #include "packager/file/file.h" #include "packager/file/file_closer.h" diff --git a/packager/file/thread_pool.cc b/packager/file/thread_pool.cc new file mode 100644 index 0000000000..460cfeb29a --- /dev/null +++ b/packager/file/thread_pool.cc @@ -0,0 +1,108 @@ +// Copyright 2022 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +#include "packager/file/thread_pool.h" + +#include + +#include "absl/time/time.h" +#include "glog/logging.h" + +namespace shaka { + +namespace { + +const absl::Duration kMaxThreadIdleTime = absl::Minutes(10); + +} // namespace + +// static +ThreadPool ThreadPool::instance; + +ThreadPool::ThreadPool() : num_idle_threads_(0), terminated_(false) {} + +ThreadPool::~ThreadPool() { + Terminate(); +} + +void ThreadPool::PostTask(const std::function& task) { + absl::MutexLock lock(&mutex_); + + DCHECK(!terminated_) << "Should not call PostTask after Terminate!"; + + if (terminated_) { + return; + } + + // An empty task is used internally to signal the thread to terminate. This + // should never be sent on input. + if (!task) { + DLOG(ERROR) << "Should not post an empty task!"; + return; + } + + tasks_.push(std::move(task)); + + if (num_idle_threads_ >= tasks_.size()) { + // We have enough threads available. + tasks_available_.SignalAll(); + } else { + // We need to start an additional thread. + std::thread thread(std::bind(&ThreadPool::ThreadMain, this)); + thread.detach(); + } +} + +void ThreadPool::Terminate() { + { + absl::MutexLock lock(&mutex_); + terminated_ = true; + while (!tasks_.empty()) { + tasks_.pop(); + } + } + tasks_available_.SignalAll(); +} + +ThreadPool::Task ThreadPool::WaitForTask() { + absl::MutexLock lock(&mutex_); + if (terminated_) { + // The pool is terminated. Terminate this thread. + return Task(); + } + + if (tasks_.empty()) { + num_idle_threads_++; + // Wait for a task, up to the maximum idle time. + tasks_available_.WaitWithTimeout(&mutex_, kMaxThreadIdleTime); + num_idle_threads_--; + + if (tasks_.empty()) { + // No work before the timeout. Terminate this thread. + return Task(); + } + } + + // Get the next task from the queue. + Task task = tasks_.front(); + tasks_.pop(); + return task; +} + +void ThreadPool::ThreadMain() { + while (true) { + auto task = WaitForTask(); + if (!task) { + // An empty task signals the thread to terminate. + return; + } + + // Run the task, then loop to wait for another. + task(); + } +} + +} // namespace shaka diff --git a/packager/file/thread_pool.h b/packager/file/thread_pool.h new file mode 100644 index 0000000000..26532bc56c --- /dev/null +++ b/packager/file/thread_pool.h @@ -0,0 +1,56 @@ +// Copyright 2022 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +#ifndef PACKAGER_FILE_THREAD_POOL_H_ +#define PACKAGER_FILE_THREAD_POOL_H_ + +#include "packager/common.h" + +#include +#include + +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" + +namespace shaka { + +/// A simple thread pool. We used to get this from Chromium base::, but there +/// is no replacement in the C++ standard library nor in absl. +/// (As of June 2022.) The pool will grow when there are no threads available +/// to handle a task, and it will shrink when a thread is idle for too long. +class ThreadPool { + public: + typedef std::function Task; + + ThreadPool(); + ~ThreadPool(); + + /// Find or spawn a worker thread to handle |task|. + /// @param task A potentially long-running task to be handled by the pool. + void PostTask(const Task& task); + + static ThreadPool instance; + + private: + /// Stop handing out tasks to workers, wake up all threads, and make them + /// exit. + void Terminate(); + + Task WaitForTask(); + void ThreadMain(); + + absl::Mutex mutex_; + absl::CondVar tasks_available_ GUARDED_BY(mutex_); + std::queue tasks_ GUARDED_BY(mutex_); + size_t num_idle_threads_ GUARDED_BY(mutex_); + bool terminated_ GUARDED_BY(mutex_); + + DISALLOW_COPY_AND_ASSIGN(ThreadPool); +}; + +} // namespace shaka + +#endif // PACKAGER_FILE_THREAD_POOL_H_ diff --git a/packager/file/threaded_io_file.cc b/packager/file/threaded_io_file.cc index ff8ca7313a..c92230f990 100644 --- a/packager/file/threaded_io_file.cc +++ b/packager/file/threaded_io_file.cc @@ -6,10 +6,7 @@ #include "packager/file/threaded_io_file.h" -#include "packager/base/bind.h" -#include "packager/base/bind_helpers.h" -#include "packager/base/location.h" -#include "packager/base/threading/worker_pool.h" +#include "packager/file/thread_pool.h" namespace shaka { @@ -25,12 +22,10 @@ ThreadedIoFile::ThreadedIoFile(std::unique_ptr internal_file, position_(0), size_(0), eof_(false), - flushing_(false), - flush_complete_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC, - base::WaitableEvent::InitialState::NOT_SIGNALED), internal_file_error_(0), - task_exit_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC, - base::WaitableEvent::InitialState::NOT_SIGNALED) { + flushing_(false), + flush_complete_(false), + task_exited_(false) { DCHECK(internal_file_); } @@ -45,10 +40,7 @@ bool ThreadedIoFile::Open() { position_ = 0; size_ = internal_file_->Size(); - base::WorkerPool::PostTask( - FROM_HERE, - base::Bind(&ThreadedIoFile::TaskHandler, base::Unretained(this)), - true /* task_is_slow */); + ThreadPool::instance.PostTask(std::bind(&ThreadedIoFile::TaskHandler, this)); return true; } @@ -60,7 +52,7 @@ bool ThreadedIoFile::Close() { result = Flush(); cache_.Close(); - task_exit_event_.Wait(); + WaitForSignal(&task_exited_mutex_, &task_exited_); result &= internal_file_.release()->Close(); delete this; @@ -111,9 +103,15 @@ bool ThreadedIoFile::Flush() { if (internal_file_error_.load(std::memory_order_relaxed)) return false; - flushing_ = true; + { + absl::MutexLock lock(&flush_mutex_); + flushing_ = true; + flush_complete_ = false; + } cache_.Close(); - flush_complete_event_.Wait(); + + WaitForSignal(&flush_mutex_, &flush_complete_); + return internal_file_->Flush(); } @@ -128,7 +126,8 @@ bool ThreadedIoFile::Seek(uint64_t position) { // Reading. Close cache, wait for thread task to exit, seek, and re-post // the task. cache_.Close(); - task_exit_event_.Wait(); + WaitForSignal(&task_exited_mutex_, &task_exited_); + bool result = internal_file_->Seek(position); if (!result) { // Seek failed. Seek to logical position instead. @@ -138,10 +137,9 @@ bool ThreadedIoFile::Seek(uint64_t position) { } cache_.Reopen(); eof_ = false; - base::WorkerPool::PostTask( - FROM_HERE, - base::Bind(&ThreadedIoFile::TaskHandler, base::Unretained(this)), - true /* task_is_slow */); + + ThreadPool::instance.PostTask( + std::bind(&ThreadedIoFile::TaskHandler, this)); if (!result) return false; } @@ -157,11 +155,20 @@ bool ThreadedIoFile::Tell(uint64_t* position) { } void ThreadedIoFile::TaskHandler() { + { + absl::MutexLock lock(&task_exited_mutex_); + task_exited_ = false; + } + if (mode_ == kInputMode) RunInInputMode(); else RunInOutputMode(); - task_exit_event_.Signal(); + + { + absl::MutexLock lock(&task_exited_mutex_); + task_exited_ = true; + } } void ThreadedIoFile::RunInInputMode() { @@ -190,10 +197,11 @@ void ThreadedIoFile::RunInOutputMode() { while (true) { uint64_t write_bytes = cache_.Read(&io_buffer_[0], io_buffer_.size()); if (write_bytes == 0) { + absl::MutexLock lock(&flush_mutex_); if (flushing_) { cache_.Reopen(); flushing_ = false; - flush_complete_event_.Signal(); + flush_complete_ = true; } else { return; } @@ -205,9 +213,11 @@ void ThreadedIoFile::RunInOutputMode() { if (write_result < 0) { internal_file_error_.store(write_result, std::memory_order_relaxed); cache_.Close(); + + absl::MutexLock lock(&flush_mutex_); if (flushing_) { flushing_ = false; - flush_complete_event_.Signal(); + flush_complete_ = true; } return; } @@ -217,4 +227,15 @@ void ThreadedIoFile::RunInOutputMode() { } } +void ThreadedIoFile::WaitForSignal(absl::Mutex* mutex, bool* condition) { + // This waits until the boolean condition variable is true, then locks the + // mutex. The check is done every time the mutex is unlocked. As long as + // this mutex is held when the variable is modified, this wait will always + // wake up when the variable is changed to true. + mutex->LockWhen(absl::Condition(condition)); + + // LockWhen leaves the mutex locked. Return after unlocking the mutex again. + mutex->Unlock(); +} + } // namespace shaka diff --git a/packager/file/threaded_io_file.h b/packager/file/threaded_io_file.h index dacfe8c43d..c6388bff4b 100644 --- a/packager/file/threaded_io_file.h +++ b/packager/file/threaded_io_file.h @@ -9,7 +9,8 @@ #include #include -#include "packager/base/synchronization/waitable_event.h" + +#include "absl/synchronization/mutex.h" #include "packager/file/file.h" #include "packager/file/file_closer.h" #include "packager/file/io_cache.h" @@ -48,6 +49,7 @@ class ThreadedIoFile : public File { void TaskHandler(); void RunInInputMode(); void RunInOutputMode(); + void WaitForSignal(absl::Mutex* mutex, bool* condition); std::unique_ptr internal_file_; const Mode mode_; @@ -56,11 +58,14 @@ class ThreadedIoFile : public File { uint64_t position_; uint64_t size_; std::atomic eof_; - bool flushing_; - base::WaitableEvent flush_complete_event_; - std::atomic internal_file_error_; - // Signalled when thread task exits. - base::WaitableEvent task_exit_event_; + std::atomic internal_file_error_; + + absl::Mutex flush_mutex_; + bool flushing_ GUARDED_BY(flush_mutex_); + bool flush_complete_ GUARDED_BY(flush_mutex_); + + absl::Mutex task_exited_mutex_; + bool task_exited_ GUARDED_BY(task_exited_mutex_); DISALLOW_COPY_AND_ASSIGN(ThreadedIoFile); }; diff --git a/packager/file/udp_file.cc b/packager/file/udp_file.cc index d7fb03505c..11a02b4e56 100644 --- a/packager/file/udp_file.cc +++ b/packager/file/udp_file.cc @@ -7,34 +7,28 @@ #include "packager/file/udp_file.h" #if defined(OS_WIN) - -#include #include #define close closesocket #define EINTR_CODE WSAEINTR - #else - #include #include -#include +#include #include #include #include #define INVALID_SOCKET -1 #define EINTR_CODE EINTR - // IP_MULTICAST_ALL has been supported since kernel version 2.6.31 but we may be // building on a machine that is older than that. #ifndef IP_MULTICAST_ALL -#define IP_MULTICAST_ALL 49 +#define IP_MULTICAST_ALL 49 #endif - #endif // defined(OS_WIN) #include -#include "packager/base/logging.h" +#include "glog/logging.h" #include "packager/file/udp_options.h" namespace shaka { @@ -83,15 +77,17 @@ int64_t UdpFile::Read(void* buffer, uint64_t length) { int64_t result; do { - result = - recvfrom(socket_, reinterpret_cast(buffer), length, 0, NULL, 0); + result = recvfrom(socket_, reinterpret_cast(buffer), + static_cast(length), 0, NULL, 0); } while (result == -1 && GetSocketErrorCode() == EINTR_CODE); return result; } int64_t UdpFile::Write(const void* buffer, uint64_t length) { - NOTIMPLEMENTED(); + UNUSED(buffer); + UNUSED(length); + NOTIMPLEMENTED() << "UdpFile is unwritable!"; return -1; } @@ -103,17 +99,19 @@ int64_t UdpFile::Size() { } bool UdpFile::Flush() { - NOTIMPLEMENTED(); + NOTIMPLEMENTED() << "UdpFile is unflushable!"; return false; } bool UdpFile::Seek(uint64_t position) { - NOTIMPLEMENTED(); + UNUSED(position); + NOTIMPLEMENTED() << "UdpFile is unseekable!"; return false; } bool UdpFile::Tell(uint64_t* position) { - NOTIMPLEMENTED(); + UNUSED(position); + NOTIMPLEMENTED() << "UdpFile is unseekable!"; return false; } @@ -170,10 +168,12 @@ bool UdpFile::Open() { return false; } - struct sockaddr_in local_sock_addr = {0}; // TODO(kqyang): Support IPv6. + struct sockaddr_in local_sock_addr; + memset(&local_sock_addr, 0, sizeof(local_sock_addr)); local_sock_addr.sin_family = AF_INET; local_sock_addr.sin_port = htons(options->port()); + const bool is_multicast = IsIpv4MulticastAddress(local_in_addr); if (is_multicast) { local_sock_addr.sin_addr.s_addr = htonl(INADDR_ANY); diff --git a/packager/file/udp_file.h b/packager/file/udp_file.h index 039563abdb..18a3a29a28 100644 --- a/packager/file/udp_file.h +++ b/packager/file/udp_file.h @@ -11,10 +11,10 @@ #include -#include "packager/base/compiler_specific.h" #include "packager/file/file.h" #if defined(OS_WIN) +#include #include #else typedef int SOCKET; diff --git a/packager/file/udp_options.cc b/packager/file/udp_options.cc index b5f61041ef..8bf5bf0c25 100644 --- a/packager/file/udp_options.cc +++ b/packager/file/udp_options.cc @@ -6,15 +6,18 @@ #include "packager/file/udp_options.h" -#include +#include "absl/flags/flag.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_split.h" +#include "glog/logging.h" +#include "packager/common.h" +#include "packager/kv_pairs/kv_pairs.h" -#include "packager/base/strings/string_number_conversions.h" -#include "packager/base/strings/string_split.h" - -DEFINE_string(udp_interface_address, - "", - "IP address of the interface over which to receive UDP unicast" - " or multicast streams"); +ABSL_FLAG(std::string, + udp_interface_address, + "", + "IP address of the interface over which to receive UDP unicast" + " or multicast streams"); namespace shaka { @@ -50,47 +53,46 @@ FieldType GetFieldType(const std::string& field_name) { return kUnknownField; } -bool StringToAddressAndPort(base::StringPiece addr_and_port, +bool StringToAddressAndPort(std::string_view addr_and_port, std::string* addr, uint16_t* port) { DCHECK(addr); DCHECK(port); const size_t colon_pos = addr_and_port.find(':'); - if (colon_pos == base::StringPiece::npos) { + if (colon_pos == std::string_view::npos) { return false; } - *addr = addr_and_port.substr(0, colon_pos).as_string(); - unsigned port_value; - if (!base::StringToUint(addr_and_port.substr(colon_pos + 1), &port_value) || + *addr = addr_and_port.substr(0, colon_pos); + + // NOTE: SimpleAtoi will not take a uint16_t. So we check the bounds of the + // value and then cast to uint16_t. + uint32_t port_value; + if (!absl::SimpleAtoi(addr_and_port.substr(colon_pos + 1), &port_value) || (port_value > 65535)) { return false; } - *port = port_value; + *port = static_cast(port_value); return true; } } // namespace std::unique_ptr UdpOptions::ParseFromString( - base::StringPiece udp_url) { + std::string_view udp_url) { std::unique_ptr options(new UdpOptions); const size_t question_mark_pos = udp_url.find('?'); - base::StringPiece address_str = udp_url.substr(0, question_mark_pos); + std::string_view address_str = udp_url.substr(0, question_mark_pos); - if (question_mark_pos != base::StringPiece::npos) { - base::StringPiece options_str = udp_url.substr(question_mark_pos + 1); + if (question_mark_pos != std::string_view::npos) { + std::string_view options_str = udp_url.substr(question_mark_pos + 1); + std::vector kv_pairs = SplitStringIntoKeyValuePairs(options_str); - base::StringPairs pairs; - if (!base::SplitStringIntoKeyValuePairs(options_str, '=', '&', &pairs)) { - LOG(ERROR) << "Invalid udp options name/value pairs " << options_str; - return nullptr; - } - for (const auto& pair : pairs) { + for (const auto& pair : kv_pairs) { switch (GetFieldType(pair.first)) { case kBufferSizeField: - if (!base::StringToInt(pair.second, &options->buffer_size_)) { + if (!absl::SimpleAtoi(pair.second, &options->buffer_size_)) { LOG(ERROR) << "Invalid udp option for buffer_size field " << pair.second; return nullptr; @@ -105,7 +107,7 @@ std::unique_ptr UdpOptions::ParseFromString( break; case kReuseField: { int reuse_value = 0; - if (!base::StringToInt(pair.second, &reuse_value)) { + if (!absl::SimpleAtoi(pair.second, &reuse_value)) { LOG(ERROR) << "Invalid udp option for reuse field " << pair.second; return nullptr; } @@ -113,7 +115,7 @@ std::unique_ptr UdpOptions::ParseFromString( break; } case kTimeoutField: - if (!base::StringToUint(pair.second, &options->timeout_us_)) { + if (!absl::SimpleAtoi(pair.second, &options->timeout_us_)) { LOG(ERROR) << "Invalid udp option for timeout field " << pair.second; return nullptr; @@ -127,11 +129,11 @@ std::unique_ptr UdpOptions::ParseFromString( } } - if (!FLAGS_udp_interface_address.empty()) { + if (!absl::GetFlag(FLAGS_udp_interface_address).empty()) { LOG(WARNING) << "--udp_interface_address is deprecated. Consider switching " "to udp options instead, something like " "udp:://ip:port?interface=interface_ip."; - options->interface_address_ = FLAGS_udp_interface_address; + options->interface_address_ = absl::GetFlag(FLAGS_udp_interface_address); } if (!StringToAddressAndPort(address_str, &options->address_, diff --git a/packager/file/udp_options.h b/packager/file/udp_options.h index b98e9b2e3e..7198edb47f 100644 --- a/packager/file/udp_options.h +++ b/packager/file/udp_options.h @@ -7,8 +7,6 @@ #include #include -#include "packager/base/strings/string_piece.h" - namespace shaka { /// Options parsed from UDP url string of the form: udp://ip:port[?options] @@ -19,7 +17,7 @@ class UdpOptions { /// Parse from UDP url. /// @param udp_url is the url of the form udp://ip:port[?options] /// @returns a UdpOptions object on success, nullptr otherwise. - static std::unique_ptr ParseFromString(base::StringPiece udp_url); + static std::unique_ptr ParseFromString(std::string_view udp_url); const std::string& address() const { return address_; } uint16_t port() const { return port_; } diff --git a/packager/file/udp_options_unittest.cc b/packager/file/udp_options_unittest.cc index 552ad9feb6..f1a72f1bc8 100644 --- a/packager/file/udp_options_unittest.cc +++ b/packager/file/udp_options_unittest.cc @@ -6,16 +6,24 @@ #include "packager/file/udp_options.h" -#include #include -DECLARE_string(udp_interface_address); +#include "absl/flags/declare.h" +#include "absl/flags/flag.h" +#include "packager/flag_saver.h" + +ABSL_DECLARE_FLAG(std::string, udp_interface_address); namespace shaka { class UdpOptionsTest : public testing::Test { public: - void SetUp() override { FLAGS_udp_interface_address = ""; } + UdpOptionsTest() : saver(&FLAGS_udp_interface_address) {} + + void SetUp() override { absl::SetFlag(&FLAGS_udp_interface_address, ""); } + + private: + FlagSaver saver; }; TEST_F(UdpOptionsTest, AddressAndPort) { @@ -47,7 +55,7 @@ TEST_F(UdpOptionsTest, MissingAddress) { } TEST_F(UdpOptionsTest, UdpInterfaceAddressFlag) { - FLAGS_udp_interface_address = "10.11.12.13"; + absl::SetFlag(&FLAGS_udp_interface_address, "10.11.12.13"); auto options = UdpOptions::ParseFromString("224.1.2.30:88"); ASSERT_TRUE(options); diff --git a/packager/flag_saver.h b/packager/flag_saver.h new file mode 100644 index 0000000000..d4ffac70a7 --- /dev/null +++ b/packager/flag_saver.h @@ -0,0 +1,33 @@ +// Copyright 2022 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +#ifndef PACKAGER_FLAG_SAVER_H_ +#define PACKAGER_FLAG_SAVER_H_ + +#include "absl/flags/flag.h" + +namespace shaka { + +/// A replacement for gflags' FlagSaver, which is used in testing. +/// A FlagSaver is an RAII object to save and restore the values of +/// command-line flags during a test. Unlike the gflags version, flags to be +/// saved and restored must be listed explicitly. +template +class FlagSaver { + public: + FlagSaver(absl::Flag* flag) + : flag_(flag), original_value_(absl::GetFlag(*flag)) {} + + ~FlagSaver() { absl::SetFlag(flag_, original_value_); } + + private: + absl::Flag* flag_; // unowned + T original_value_; +}; + +} // namespace shaka + +#endif // PACKAGER_FLAG_SAVER_H_ diff --git a/packager/kv_pairs/CMakeLists.txt b/packager/kv_pairs/CMakeLists.txt new file mode 100644 index 0000000000..15b50ccdfa --- /dev/null +++ b/packager/kv_pairs/CMakeLists.txt @@ -0,0 +1,20 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +add_library(kv_pairs STATIC + kv_pairs.cc) +target_link_libraries(kv_pairs + absl::str_format + glog) + +add_executable(kv_pairs_unittest + kv_pairs_unittest.cc) +target_link_libraries(kv_pairs_unittest + kv_pairs + gmock + gtest + gtest_main) +add_test(NAME kv_pairs_unittest COMMAND kv_pairs_unittest) diff --git a/packager/kv_pairs/kv_pairs.cc b/packager/kv_pairs/kv_pairs.cc new file mode 100644 index 0000000000..f85b433896 --- /dev/null +++ b/packager/kv_pairs/kv_pairs.cc @@ -0,0 +1,30 @@ +// Copyright 2022 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +#include "packager/kv_pairs/kv_pairs.h" + +#include "absl/strings/str_split.h" + +namespace shaka { + +std::vector SplitStringIntoKeyValuePairs(std::string_view str) { + std::vector kv_pairs; + + // Edge case: 0 pairs. + if (str.size() == 0) { + return kv_pairs; + } + + std::vector kv_strings = absl::StrSplit(str, '&'); + for (const auto& kv_string : kv_strings) { + KVPair pair = absl::StrSplit(kv_string, absl::MaxSplits('=', 1)); + kv_pairs.push_back(pair); + } + + return kv_pairs; +} + +} // namespace shaka diff --git a/packager/kv_pairs/kv_pairs.h b/packager/kv_pairs/kv_pairs.h new file mode 100644 index 0000000000..c909794ebc --- /dev/null +++ b/packager/kv_pairs/kv_pairs.h @@ -0,0 +1,17 @@ +// Copyright 2022 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +#include +#include +#include +#include + +namespace shaka { + +typedef std::pair KVPair; +std::vector SplitStringIntoKeyValuePairs(std::string_view str); + +} // namespace shaka diff --git a/packager/kv_pairs/kv_pairs_unittest.cc b/packager/kv_pairs/kv_pairs_unittest.cc new file mode 100644 index 0000000000..7bdb2ae6c1 --- /dev/null +++ b/packager/kv_pairs/kv_pairs_unittest.cc @@ -0,0 +1,38 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +#include +#include + +#include "packager/kv_pairs/kv_pairs.h" + +namespace shaka { + +using ::std::make_pair; +using ::testing::ElementsAre; + +TEST(KVPairs, Empty) { + ASSERT_THAT(SplitStringIntoKeyValuePairs(""), ElementsAre()); +} + +TEST(KVPairs, Single) { + ASSERT_THAT(SplitStringIntoKeyValuePairs("a=b"), + ElementsAre(make_pair("a", "b"))); +} + +TEST(KVPairs, Multiple) { + ASSERT_THAT(SplitStringIntoKeyValuePairs("a=b&c=d&e=f"), + ElementsAre(make_pair("a", "b"), make_pair("c", "d"), + make_pair("e", "f"))); +} + +TEST(KVPairs, ExtraEqualsSigns) { + ASSERT_THAT(SplitStringIntoKeyValuePairs("a=b&c==d&e=f=g=h"), + ElementsAre(make_pair("a", "b"), make_pair("c", "=d"), + make_pair("e", "f=g=h"))); +} + +} // namespace shaka diff --git a/packager/status/CMakeLists.txt b/packager/status/CMakeLists.txt new file mode 100644 index 0000000000..29c664cd32 --- /dev/null +++ b/packager/status/CMakeLists.txt @@ -0,0 +1,24 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +add_library(status STATIC + status.cc) +target_link_libraries(status + absl::str_format + glog) + +if(LIBPACKAGER_SHARED) + target_compile_definitions(status PUBLIC SHAKA_IMPLEMENTATION) +endif() + +add_executable(status_unittest + status_unittest.cc) +target_link_libraries(status_unittest + status + gmock + gtest + gtest_main) +add_test(NAME status_unittest COMMAND status_unittest) diff --git a/packager/status.cc b/packager/status/status.cc similarity index 87% rename from packager/status.cc rename to packager/status/status.cc index 35080ec634..81107794b0 100644 --- a/packager/status.cc +++ b/packager/status/status.cc @@ -4,10 +4,11 @@ // license that can be found in the LICENSE file or at // https://developers.google.com/open-source/licenses/bsd -#include "packager/status.h" +#include "packager/status/status.h" -#include "packager/base/logging.h" -#include "packager/base/strings/stringprintf.h" +#include "absl/strings/str_format.h" +#include "glog/logging.h" +#include "packager/common.h" namespace shaka { @@ -84,9 +85,9 @@ std::string Status::ToString() const { if (error_code_ == error::OK) return "OK"; - return base::StringPrintf("%d (%s): %s", error_code_, - error::ErrorCodeToString(error_code_), - error_message_.c_str()); + return absl::StrFormat("%d (%s): %s", error_code_, + error::ErrorCodeToString(error_code_), + error_message_.c_str()); } std::ostream& operator<<(std::ostream& os, const Status& x) { diff --git a/packager/status.h b/packager/status/status.h similarity index 100% rename from packager/status.h rename to packager/status/status.h diff --git a/packager/status_macros.h b/packager/status/status_macros.h similarity index 100% rename from packager/status_macros.h rename to packager/status/status_macros.h diff --git a/packager/status_test_util.h b/packager/status/status_test_util.h similarity index 100% rename from packager/status_test_util.h rename to packager/status/status_test_util.h diff --git a/packager/status_unittest.cc b/packager/status/status_unittest.cc similarity index 93% rename from packager/status_unittest.cc rename to packager/status/status_unittest.cc index 723e259073..f4ecfb903d 100644 --- a/packager/status_unittest.cc +++ b/packager/status/status_unittest.cc @@ -7,8 +7,8 @@ #include #include -#include "packager/base/strings/string_number_conversions.h" -#include "packager/status.h" +#include "absl/strings/str_format.h" +#include "packager/status/status.h" namespace shaka { @@ -24,7 +24,7 @@ static void CheckStatus(const Status& s, } else { EXPECT_TRUE(!s.ok()); EXPECT_THAT(s.ToString(), testing::HasSubstr(message)); - EXPECT_THAT(s.ToString(), testing::HasSubstr(base::UintToString(code))); + EXPECT_THAT(s.ToString(), testing::HasSubstr(absl::StrFormat("%d", code))); } } diff --git a/packager/testing/coverage_util_ios.cc b/packager/testing/coverage_util_ios.cc deleted file mode 100644 index 15ac1b4a0a..0000000000 --- a/packager/testing/coverage_util_ios.cc +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2014 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -extern "C" void __gcov_flush(); - -namespace coverage_util { - -void FlushCoverageDataIfNecessary() { -#if defined(ENABLE_TEST_CODE_COVERAGE) - __gcov_flush(); -#endif -} - -} // namespace coverage_util diff --git a/packager/testing/coverage_util_ios.h b/packager/testing/coverage_util_ios.h deleted file mode 100644 index 702811aa7b..0000000000 --- a/packager/testing/coverage_util_ios.h +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef TESTING_COVERAGE_UTIL_IOS_H_ -#define TESTING_COVERAGE_UTIL_IOS_H_ - -namespace coverage_util { - -// Flushes .gcda coverage files if ENABLE_TEST_CODE_COVERAGE is defined. iOS 7 -// does not call any code at the "end" of an app so flushing should be -// performed manually. -void FlushCoverageDataIfNecessary(); - -} // namespace coverage_util - -#endif // TESTING_COVERAGE_UTIL_IOS_H_ diff --git a/packager/testing/dockers/Alpine_Dockerfile b/packager/testing/dockers/Alpine_Dockerfile index df32f525cf..6a8f983dc7 100644 --- a/packager/testing/dockers/Alpine_Dockerfile +++ b/packager/testing/dockers/Alpine_Dockerfile @@ -1,29 +1,10 @@ -FROM alpine:3.11 +FROM alpine:3.12 # Install utilities, libraries, and dev tools. RUN apk add --no-cache \ bash curl \ bsd-compat-headers c-ares-dev linux-headers \ - build-base git ninja python2 python3 - -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -WORKDIR / -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH $PATH:/depot_tools - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" - -# Alpine uses musl which does not have mallinfo defined in malloc.h. Define the -# structure to workaround a Chromium base bug. -RUN sed -i \ - '/malloc_usable_size/a \\nstruct mallinfo {\n int arena;\n int hblkhd;\n int uordblks;\n};' \ - /usr/include/malloc.h -ENV GYP_DEFINES='musl=1' + build-base cmake git python3 # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/ArchLinux_Dockerfile b/packager/testing/dockers/ArchLinux_Dockerfile index 1b0435687e..72f1129022 100644 --- a/packager/testing/dockers/ArchLinux_Dockerfile +++ b/packager/testing/dockers/ArchLinux_Dockerfile @@ -4,19 +4,7 @@ FROM archlinux:latest RUN pacman -Sy --needed --noconfirm \ core/which \ c-ares \ - gcc git python2 python3 - -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -WORKDIR / -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH /depot_tools:$PATH - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" + cmake gcc git make python3 # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/CentOS_Dockerfile b/packager/testing/dockers/CentOS_Dockerfile index e2c604b04b..681340ceca 100644 --- a/packager/testing/dockers/CentOS_Dockerfile +++ b/packager/testing/dockers/CentOS_Dockerfile @@ -10,19 +10,11 @@ RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org| RUN yum install -y \ which \ c-ares-devel libatomic \ - gcc-c++ git python2 python3 + cmake gcc-toolset-9-gcc gcc-toolset-9-gcc-c++ git python3 -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -WORKDIR / -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH /depot_tools:$PATH - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" +# CentOS 8 is old, and the g++ version installed doesn't automatically link the +# C++ filesystem library as it should. Activate a newer dev environment. +ENV PATH="/opt/rh/gcc-toolset-9/root/usr/bin:$PATH" # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/Debian_Dockerfile b/packager/testing/dockers/Debian_Dockerfile index 12a271740e..3c3b38a108 100644 --- a/packager/testing/dockers/Debian_Dockerfile +++ b/packager/testing/dockers/Debian_Dockerfile @@ -1,22 +1,11 @@ -FROM debian:9 +FROM debian:11 # Install utilities, libraries, and dev tools. RUN apt-get update && apt-get install -y apt-utils RUN apt-get install -y \ curl \ libc-ares-dev \ - build-essential git python python3 - -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH /depot_tools:$PATH - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" + build-essential cmake git python3 # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/Fedora_Dockerfile b/packager/testing/dockers/Fedora_Dockerfile index f4edfb531e..12e6be8816 100644 --- a/packager/testing/dockers/Fedora_Dockerfile +++ b/packager/testing/dockers/Fedora_Dockerfile @@ -4,19 +4,7 @@ FROM fedora:34 RUN yum install -y \ which \ c-ares-devel libatomic \ - gcc-c++ git python2 - -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -WORKDIR / -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH /depot_tools:$PATH - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" + cmake gcc-c++ git python3 # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/OpenSUSE_Dockerfile b/packager/testing/dockers/OpenSUSE_Dockerfile index 99cb654df3..e812ab9ff3 100644 --- a/packager/testing/dockers/OpenSUSE_Dockerfile +++ b/packager/testing/dockers/OpenSUSE_Dockerfile @@ -1,22 +1,12 @@ -FROM opensuse/leap:15 +# Older versions of OpenSUSE (like leap 15) have compilers too old for C++17. +# Tumbleweed is a rolling release system, but that's our only option now. +FROM opensuse/tumbleweed:latest # Install utilities, libraries, and dev tools. RUN zypper in -y \ curl which \ c-ares-devel \ - gcc-c++ git python python3 - -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -WORKDIR / -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH /depot_tools:$PATH - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" + cmake gcc-c++ git python3 # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/Ubuntu_Dockerfile b/packager/testing/dockers/Ubuntu_Dockerfile index 3e3a14ba8e..f6335c0395 100644 --- a/packager/testing/dockers/Ubuntu_Dockerfile +++ b/packager/testing/dockers/Ubuntu_Dockerfile @@ -1,22 +1,14 @@ -FROM ubuntu:18.04 +FROM ubuntu:20.04 + +# Tell apt not to prompt us for anything. +ENV DEBIAN_FRONTEND noninteractive # Install utilities, libraries, and dev tools. RUN apt-get update && apt-get install -y apt-utils RUN apt-get install -y \ curl \ libc-ares-dev \ - build-essential git python python3 - -# Default to python2 because our build system is ancient. -RUN ln -sf python2 /usr/bin/python - -# Install depot_tools. -RUN git clone -b chrome/4147 https://chromium.googlesource.com/chromium/tools/depot_tools.git -RUN touch depot_tools/.disable_auto_update -ENV PATH /depot_tools:$PATH - -# Bypass VPYTHON included by depot_tools. Prefer the system installation. -ENV VPYTHON_BYPASS="manually managed python not supported by chrome operations" + build-essential cmake git python3 # Build and run this docker by mapping shaka-packager with # -v "shaka-packager:/shaka-packager". diff --git a/packager/testing/dockers/test_dockers.sh b/packager/testing/dockers/test_dockers.sh deleted file mode 100755 index 3cf38bc4ad..0000000000 --- a/packager/testing/dockers/test_dockers.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -# Exit on first error. -set -e - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -PACKAGER_DIR="$(dirname "$(dirname "$(dirname "$(dirname ${SCRIPT_DIR})")")")" - -function docker_run() { - docker run -v ${PACKAGER_DIR}:/shaka-packager -w /shaka-packager/src ${CONTAINER} "$@" -} - -# Command line arguments will be taken as an allowlist of OSes to run. -# By default, a regex that matches everything. -FILTER=".*" -if [[ $# != 0 ]]; then - # Join arguments with a pipe, to make a regex alternation to match any of - # them. The syntax is a mess, but that's bash. Set IFS (the separator - # variable) in a subshell and print the array. This has the effect of joining - # them by the character in IFS. Then add parentheses to make a complete regex - # to match all the arguments. - FILTER=$(IFS="|"; echo "$*") - FILTER="($FILTER)" -fi - -# On exit, print the name of the OS we were on. This helps identify what to -# debug when the start of a test run scrolls off-screen. -trap 'echo "Failed on $OS_NAME!"' exit - -echo "Using OS filter: $FILTER" -RAN_SOMETHING=0 -for DOCKER_FILE in ${SCRIPT_DIR}/*_Dockerfile ; do - # Take the basename of the dockerfile path, then remove the trailing - # "_Dockerfile" from the file name. This is the OS name. - OS_NAME="$( basename "$DOCKER_FILE" | sed -e 's/_Dockerfile//' )" - - if echo "$OS_NAME" | grep -Eqi "$FILTER"; then - echo "Testing $OS_NAME." - # Fall through. - else - echo "Skipping $OS_NAME." - continue - fi - - # Build a unique container name per OS for debugging purposes and to improve - # caching. Containers names must be in lowercase. - # To debug a failure in Alpine, for example, use: - # docker run -it -v /path/to/packager:/shaka-packager \ - # packager_test_alpine:latest /bin/bash - CONTAINER="$( echo "packager_test_${OS_NAME}" | tr A-Z a-z )" - - RAN_SOMETHING=1 - docker build -t ${CONTAINER} -f ${DOCKER_FILE} ${SCRIPT_DIR} - docker_run rm -rf out/Release - docker_run gclient runhooks - docker_run ninja -C out/Release - docker_run out/Release/packager_test.py -v -done - -# Clear the exit trap from above. -trap - exit - -if [[ "$RAN_SOMETHING" == "0" ]]; then - echo "No tests were run! The filter $FILTER did not match any OSes." 1>&2 - exit 1 -fi diff --git a/packager/testing/gmock.gyp b/packager/testing/gmock.gyp deleted file mode 100644 index 10ccf7949e..0000000000 --- a/packager/testing/gmock.gyp +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - 'targets': [ - { - 'target_name': 'gmock', - 'type': 'static_library', - 'dependencies': [ - 'gtest.gyp:gtest', - ], - 'sources': [ - # Sources based on files in r173 of gmock. - 'gmock/include/gmock/gmock-actions.h', - 'gmock/include/gmock/gmock-cardinalities.h', - 'gmock/include/gmock/gmock-generated-actions.h', - 'gmock/include/gmock/gmock-generated-function-mockers.h', - 'gmock/include/gmock/gmock-generated-matchers.h', - 'gmock/include/gmock/gmock-generated-nice-strict.h', - 'gmock/include/gmock/gmock-matchers.h', - 'gmock/include/gmock/gmock-spec-builders.h', - 'gmock/include/gmock/gmock.h', - 'gmock/include/gmock/internal/gmock-generated-internal-utils.h', - 'gmock/include/gmock/internal/gmock-internal-utils.h', - 'gmock/include/gmock/internal/gmock-port.h', - 'gmock/src/gmock-all.cc', - 'gmock/src/gmock-cardinalities.cc', - 'gmock/src/gmock-internal-utils.cc', - 'gmock/src/gmock-matchers.cc', - 'gmock/src/gmock-spec-builders.cc', - 'gmock/src/gmock.cc', - "gmock_custom/gmock/internal/custom/gmock-port.h", - 'gmock_mutant.h', # gMock helpers - ], - 'sources!': [ - 'gmock/src/gmock-all.cc', # Not needed by our build. - ], - 'include_dirs': [ - 'gmock', - 'gmock_custom', - 'gmock/include', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - 'gmock_custom', - 'gmock/include', # Allow #include - ], - }, - 'export_dependent_settings': [ - 'gtest.gyp:gtest', - ], - }, - { - 'target_name': 'gmock_main', - 'type': 'static_library', - 'dependencies': [ - 'gmock', - ], - 'sources': [ - 'gmock/src/gmock_main.cc', - ], - }, - ], -} diff --git a/packager/testing/gmock_custom/gmock/internal/custom/gmock-port.h b/packager/testing/gmock_custom/gmock/internal/custom/gmock-port.h deleted file mode 100644 index b329765117..0000000000 --- a/packager/testing/gmock_custom/gmock/internal/custom/gmock-port.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -#ifndef PACKAGER_TESTING_GMOCK_CUSTOM_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ -#define PACKAGER_TESTING_GMOCK_CUSTOM_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ - -#include - -namespace std { - -// Provide alternative implementation of std::is_default_constructible for -// old, pre-4.7 of libstdc++, where is_default_constructible is missing. -// <20120322 below implies pre-4.7.0. In addition we blacklist several version -// that released after 4.7.0 from pre-4.7.0 branch. 20120702 implies 4.5.4, and -// 20121127 implies 4.6.4. -#if defined(__GLIBCXX__) && \ - (__GLIBCXX__ < 20120322 || __GLIBCXX__ == 20120702 || \ - __GLIBCXX__ == 20121127) -template -using is_default_constructible = std::is_constructible; -#endif - -} // namespace std - -#endif // PACKAGER_TESTING_GMOCK_CUSTOM_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ diff --git a/packager/testing/gmock_mutant.h b/packager/testing/gmock_mutant.h deleted file mode 100644 index 90d303efec..0000000000 --- a/packager/testing/gmock_mutant.h +++ /dev/null @@ -1,4995 +0,0 @@ -// Copyright (c) 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// This file automatically generated by testing/generate_gmock_mutant.py. -// DO NOT EDIT. - -#ifndef TESTING_GMOCK_MUTANT_H_ -#define TESTING_GMOCK_MUTANT_H_ - -// The intention of this file is to make possible using GMock actions in -// all of its syntactic beauty. Classes and helper functions can be used as -// more generic variants of Task and Callback classes (see base/task.h) -// Mutant supports both pre-bound arguments (like Task) and call-time -// arguments (like Callback) - hence the name. :-) -// -// DispatchToMethod/Function supports two sets of arguments: pre-bound (P) and -// call-time (C). The arguments as well as the return type are templatized. -// DispatchToMethod/Function will also try to call the selected method or -// function even if provided pre-bound arguments does not match exactly with -// the function signature hence the X1, X2 ... XN parameters in CreateFunctor. -// DispatchToMethod will try to invoke method that may not belong to the -// object's class itself but to the object's class base class. -// -// Additionally you can bind the object at calltime by binding a pointer to -// pointer to the object at creation time - before including this file you -// have to #define GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING. -// -// TODO(stoyan): It's yet not clear to me should we use T& and T&* instead -// of T* and T** when we invoke CreateFunctor to match the EXPECT_CALL style. -// -// -// Sample usage with gMock: -// -// struct Mock : public ObjectDelegate { -// MOCK_METHOD2(string, OnRequest(int n, const string& request)); -// MOCK_METHOD1(void, OnQuit(int exit_code)); -// MOCK_METHOD2(void, LogMessage(int level, const string& message)); -// -// string HandleFlowers(const string& reply, int n, const string& request) { -// string result = SStringPrintf("In request of %d %s ", n, request); -// for (int i = 0; i < n; ++i) result.append(reply) -// return result; -// } -// -// void DoLogMessage(int level, const string& message) { -// } -// -// void QuitMessageLoop(int seconds) { -// base::MessageLoop* loop = base::MessageLoop::current(); -// loop->PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(), -// 1000 * seconds); -// } -// }; -// -// Mock mock; -// // Will invoke mock.HandleFlowers("orchids", n, request) -// // "orchids" is a pre-bound argument, and and are call-time -// // arguments - they are not known until the OnRequest mock is invoked. -// EXPECT_CALL(mock, OnRequest(Ge(5), StartsWith("flower")) -// .Times(1) -// .WillOnce(Invoke(CreateFunctor(&mock, &Mock::HandleFlowers, -// string("orchids")))); -// -// -// // No pre-bound arguments, two call-time arguments passed -// // directly to DoLogMessage -// EXPECT_CALL(mock, OnLogMessage(_, _)) -// .Times(AnyNumber()) -// .WillAlways(Invoke(CreateFunctor, &mock, &Mock::DoLogMessage)); -// -// -// // In this case we have a single pre-bound argument - 3. We ignore -// // all of the arguments of OnQuit. -// EXCEPT_CALL(mock, OnQuit(_)) -// .Times(1) -// .WillOnce(InvokeWithoutArgs(CreateFunctor( -// &mock, &Mock::QuitMessageLoop, 3))); -// -// MessageLoop loop; -// loop.Run(); -// -// -// // Here is another example of how we can set an action that invokes -// // method of an object that is not yet created. -// struct Mock : public ObjectDelegate { -// MOCK_METHOD1(void, DemiurgeCreated(Demiurge*)); -// MOCK_METHOD2(void, OnRequest(int count, const string&)); -// -// void StoreDemiurge(Demiurge* w) { -// demiurge_ = w; -// } -// -// Demiurge* demiurge; -// } -// -// EXPECT_CALL(mock, DemiurgeCreated(_)).Times(1) -// .WillOnce(Invoke(CreateFunctor(&mock, &Mock::StoreDemiurge))); -// -// EXPECT_CALL(mock, OnRequest(_, StrEq("Moby Dick"))) -// .Times(AnyNumber()) -// .WillAlways(WithArgs<0>(Invoke( -// CreateFunctor(&mock->demiurge_, &Demiurge::DecreaseMonsters)))); -// - -#include "base/memory/linked_ptr.h" -#include "base/tuple.h" // for Tuple - -namespace testing { - -// 0 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple0& c) { - return (obj->*method)(); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple0& c) { - return (*function)(); -} - -// 0 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple1& c) { - return (obj->*method)(c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple1& c) { - return (*function)(c.a); -} - -// 0 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple2& c) { - return (obj->*method)(c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple2& c) { - return (*function)(c.a, c.b); -} - -// 0 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple3& c) { - return (obj->*method)(c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple3& c) { - return (*function)(c.a, c.b, c.c); -} - -// 0 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple4& c) { - return (obj->*method)(c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple4& c) { - return (*function)(c.a, c.b, c.c, c.d); -} - -// 0 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple5& c) { - return (obj->*method)(c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple5& c) { - return (*function)(c.a, c.b, c.c, c.d, c.e); -} - -// 0 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple0& p, - const Tuple6& c) { - return (obj->*method)(c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple0& p, - const Tuple6& c) { - return (*function)(c.a, c.b, c.c, c.d, c.e, c.f); -} - -// 1 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple0& c) { - return (obj->*method)(p.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple0& c) { - return (*function)(p.a); -} - -// 1 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple1& c) { - return (obj->*method)(p.a, c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple1& c) { - return (*function)(p.a, c.a); -} - -// 1 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple2& c) { - return (obj->*method)(p.a, c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple2& c) { - return (*function)(p.a, c.a, c.b); -} - -// 1 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple3& c) { - return (obj->*method)(p.a, c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple3& c) { - return (*function)(p.a, c.a, c.b, c.c); -} - -// 1 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple4& c) { - return (obj->*method)(p.a, c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple4& c) { - return (*function)(p.a, c.a, c.b, c.c, c.d); -} - -// 1 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple5& c) { - return (obj->*method)(p.a, c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple5& c) { - return (*function)(p.a, c.a, c.b, c.c, c.d, c.e); -} - -// 1 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple1& p, - const Tuple6& c) { - return (obj->*method)(p.a, c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple1& p, - const Tuple6& c) { - return (*function)(p.a, c.a, c.b, c.c, c.d, c.e, c.f); -} - -// 2 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple0& c) { - return (obj->*method)(p.a, p.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple0& c) { - return (*function)(p.a, p.b); -} - -// 2 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple1& c) { - return (obj->*method)(p.a, p.b, c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple1& c) { - return (*function)(p.a, p.b, c.a); -} - -// 2 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple2& c) { - return (obj->*method)(p.a, p.b, c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple2& c) { - return (*function)(p.a, p.b, c.a, c.b); -} - -// 2 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple3& c) { - return (obj->*method)(p.a, p.b, c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple3& c) { - return (*function)(p.a, p.b, c.a, c.b, c.c); -} - -// 2 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple4& c) { - return (obj->*method)(p.a, p.b, c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple4& c) { - return (*function)(p.a, p.b, c.a, c.b, c.c, c.d); -} - -// 2 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple5& c) { - return (obj->*method)(p.a, p.b, c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple5& c) { - return (*function)(p.a, p.b, c.a, c.b, c.c, c.d, c.e); -} - -// 2 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple2& p, - const Tuple6& c) { - return (obj->*method)(p.a, p.b, c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple2& p, - const Tuple6& c) { - return (*function)(p.a, p.b, c.a, c.b, c.c, c.d, c.e, c.f); -} - -// 3 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple0& c) { - return (obj->*method)(p.a, p.b, p.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple0& c) { - return (*function)(p.a, p.b, p.c); -} - -// 3 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple1& c) { - return (obj->*method)(p.a, p.b, p.c, c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple1& c) { - return (*function)(p.a, p.b, p.c, c.a); -} - -// 3 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple2& c) { - return (obj->*method)(p.a, p.b, p.c, c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple2& c) { - return (*function)(p.a, p.b, p.c, c.a, c.b); -} - -// 3 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple3& c) { - return (obj->*method)(p.a, p.b, p.c, c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple3& c) { - return (*function)(p.a, p.b, p.c, c.a, c.b, c.c); -} - -// 3 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple4& c) { - return (obj->*method)(p.a, p.b, p.c, c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple4& c) { - return (*function)(p.a, p.b, p.c, c.a, c.b, c.c, c.d); -} - -// 3 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple5& c) { - return (obj->*method)(p.a, p.b, p.c, c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple5& c) { - return (*function)(p.a, p.b, p.c, c.a, c.b, c.c, c.d, c.e); -} - -// 3 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple3& p, - const Tuple6& c) { - return (obj->*method)(p.a, p.b, p.c, c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple3& p, - const Tuple6& c) { - return (*function)(p.a, p.b, p.c, c.a, c.b, c.c, c.d, c.e, c.f); -} - -// 4 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple0& c) { - return (obj->*method)(p.a, p.b, p.c, p.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple0& c) { - return (*function)(p.a, p.b, p.c, p.d); -} - -// 4 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple1& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple1& c) { - return (*function)(p.a, p.b, p.c, p.d, c.a); -} - -// 4 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple2& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple2& c) { - return (*function)(p.a, p.b, p.c, p.d, c.a, c.b); -} - -// 4 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple3& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple3& c) { - return (*function)(p.a, p.b, p.c, p.d, c.a, c.b, c.c); -} - -// 4 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple4& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple4& c) { - return (*function)(p.a, p.b, p.c, p.d, c.a, c.b, c.c, c.d); -} - -// 4 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple5& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple5& c) { - return (*function)(p.a, p.b, p.c, p.d, c.a, c.b, c.c, c.d, c.e); -} - -// 4 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple4& p, - const Tuple6& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple4& p, - const Tuple6& c) { - return (*function)(p.a, p.b, p.c, p.d, c.a, c.b, c.c, c.d, c.e, c.f); -} - -// 5 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple0& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple0& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e); -} - -// 5 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple1& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple1& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, c.a); -} - -// 5 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple2& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple2& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, c.a, c.b); -} - -// 5 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple3& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple3& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c); -} - -// 5 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple4& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple4& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c, c.d); -} - -// 5 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple5& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple5& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c, c.d, c.e); -} - -// 5 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple5& p, - const Tuple6& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple5& p, - const Tuple6& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, c.a, c.b, c.c, c.d, c.e, c.f); -} - -// 6 - 0 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple0& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple0& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f); -} - -// 6 - 1 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple1& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f, c.a); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple1& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f, c.a); -} - -// 6 - 2 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple2& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple2& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b); -} - -// 6 - 3 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple3& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple3& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c); -} - -// 6 - 4 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple4& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c, c.d); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple4& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c, c.d); -} - -// 6 - 5 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple5& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c, c.d, c.e); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple5& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c, c.d, c.e); -} - -// 6 - 6 -template -inline R DispatchToMethod(T* obj, Method method, - const Tuple6& p, - const Tuple6& c) { - return (obj->*method)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c, c.d, c.e, c.f); -} -template -inline R DispatchToFunction(Function function, - const Tuple6& p, - const Tuple6& c) { - return (*function)(p.a, p.b, p.c, p.d, p.e, p.f, c.a, c.b, c.c, c.d, c.e, c.f); -} - -// Interface that is exposed to the consumer, that does the actual calling -// of the method. -template -class MutantRunner { - public: - virtual R RunWithParams(const Params& params) = 0; - virtual ~MutantRunner() {} -}; - -// Mutant holds pre-bound arguments (like Task). Like Callback -// allows call-time arguments. You bind a pointer to the object -// at creation time. -template -class Mutant : public MutantRunner { - public: - Mutant(T* obj, Method method, const PreBound& pb) - : obj_(obj), method_(method), pb_(pb) { - } - - // MutantRunner implementation - virtual R RunWithParams(const Params& params) { - return DispatchToMethod(this->obj_, this->method_, pb_, params); - } - - T* obj_; - Method method_; - PreBound pb_; -}; - -template -class MutantFunction : public MutantRunner { - public: - MutantFunction(Function function, const PreBound& pb) - : function_(function), pb_(pb) { - } - - // MutantRunner implementation - virtual R RunWithParams(const Params& params) { - return DispatchToFunction(function_, pb_, params); - } - - Function function_; - PreBound pb_; -}; - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -// MutantLateBind is like Mutant, but you bind a pointer to a pointer -// to the object. This way you can create actions for an object -// that is not yet created (has only storage for a pointer to it). -template -class MutantLateObjectBind : public MutantRunner { - public: - MutantLateObjectBind(T** obj, Method method, const PreBound& pb) - : obj_(obj), method_(method), pb_(pb) { - } - - // MutantRunner implementation. - virtual R RunWithParams(const Params& params) { - EXPECT_THAT(*this->obj_, testing::NotNull()); - if (NULL == *this->obj_) - return R(); - return DispatchToMethod( *this->obj_, this->method_, pb_, params); - } - - T** obj_; - Method method_; - PreBound pb_; -}; -#endif - -// Simple MutantRunner<> wrapper acting as a functor. -// Redirects operator() to MutantRunner::Run() -template -struct MutantFunctor { - explicit MutantFunctor(MutantRunner* cb) : impl_(cb) { - } - - ~MutantFunctor() { - } - - inline R operator()() { - return impl_->RunWithParams(Tuple0()); - } - - template - inline R operator()(const Arg1& a) { - return impl_->RunWithParams(Params(a)); - } - - template - inline R operator()(const Arg1& a, const Arg2& b) { - return impl_->RunWithParams(Params(a, b)); - } - - template - inline R operator()(const Arg1& a, const Arg2& b, const Arg3& c) { - return impl_->RunWithParams(Params(a, b, c)); - } - - template - inline R operator()(const Arg1& a, const Arg2& b, const Arg3& c, - const Arg4& d) { - return impl_->RunWithParams(Params(a, b, c, d)); - } - - private: - // We need copy constructor since MutantFunctor is copied few times - // inside GMock machinery, hence no DISALLOW_EVIL_CONTRUCTORS - MutantFunctor(); - linked_ptr > impl_; -}; - -// 0 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)()) { - MutantRunner* t = - new Mutant - (obj, method, MakeTuple()); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)()) { - MutantRunner* t = - new MutantFunction - (function, MakeTuple()); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)()) { - MutantRunner* t = - new MutantLateObjectBind - (obj, method, MakeTuple()); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)()) { - MutantRunner* t = - new Mutant - (obj, method, MakeTuple()); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)()) { - MutantRunner* t = - new MutantFunction - (function, MakeTuple()); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)()) { - MutantRunner* t = - new MutantLateObjectBind - (obj, method, MakeTuple()); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 0 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(A1)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(A1)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(A1)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(A1)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(A1)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(A1)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 0 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(A1, A2)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(A1, A2)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(A1, A2)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(A1, A2)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(A1, A2)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(A1, A2)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 0 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(A1, A2, A3)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(A1, A2, A3)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(A1, A2, A3)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(A1, A2, A3)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(A1, A2, A3)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(A1, A2, A3)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 0 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(A1, A2, A3, A4)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(A1, A2, A3, A4)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(A1, A2, A3, A4)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(A1, A2, A3, A4)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(A1, A2, A3, A4)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(A1, A2, A3, A4)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 0 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(A1, A2, A3, A4, A5)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(A1, A2, A3, A4, A5)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(A1, A2, A3, A4, A5)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(A1, A2, A3, A4, A5)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(A1, A2, A3, A4, A5)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(A1, A2, A3, A4, A5)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 0 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(A1, A2, A3, A4, A5, A6)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(A1, A2, A3, A4, A5, A6)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(A1, A2, A3, A4, A5, A6)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(A1, A2, A3, A4, A5, A6)) { - MutantRunner >* t = - new Mutant > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(A1, A2, A3, A4, A5, A6)) { - MutantRunner >* t = - new MutantFunction > - (function, MakeTuple()); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(A1, A2, A3, A4, A5, A6)) { - MutantRunner >* t = - new MutantLateObjectBind > - (obj, method, MakeTuple()); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)(X1), const P1& p1) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)(X1), const P1& p1) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1)); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)(X1), const P1& p1) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)(X1), const P1& p1) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)(X1), const P1& p1) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1)); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)(X1), const P1& p1) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, A1), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, A1), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, A1), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, A1), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, A1), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, A1), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, A1, A2), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, A1, A2), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, A1, A2), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, A1, A2), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, A1, A2), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, A1, A2), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, A1, A2, A3), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, A1, A2, A3), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, A1, A2, A3), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, A1, A2, A3), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, A1, A2, A3), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, A1, A2, A3), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, A1, A2, A3, A4), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, A1, A2, A3, A4), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, A1, A2, A3, A4), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, A1, A2, A3, A4), - const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, A1, A2, A3, A4), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, A1, A2, A3, A4), - const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, A1, A2, A3, A4, A5), const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, A1, A2, A3, A4, A5), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, A1, A2, A3, A4, A5), const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, A1, A2, A3, A4, A5), - const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, A1, A2, A3, A4, A5), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, A1, A2, A3, A4, A5), - const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 1 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, A1, A2, A3, A4, A5, A6), - const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, A1, A2, A3, A4, A5, A6), const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, A1, A2, A3, A4, A5, A6), - const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, A1, A2, A3, A4, A5, A6), - const P1& p1) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, A1, A2, A3, A4, A5, A6), - const P1& p1) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, A1, A2, A3, A4, A5, A6), - const P1& p1) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)(X1, X2), const P1& p1, const P2& p2) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)(X1, X2), const P1& p1, const P2& p2) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2)); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)(X1, X2), const P1& p1, const P2& p2) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2), const P1& p1, - const P2& p2) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)(X1, X2), const P1& p1, const P2& p2) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2)); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2), const P1& p1, - const P2& p2) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, A1), const P1& p1, const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, A1), const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, A1), const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, A1), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, A1), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, A1), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, A1, A2), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, A1, A2), const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, A1, A2), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, A1, A2), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, A1, A2), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, A1, A2), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, A1, A2, A3), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, A1, A2, A3), const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, A1, A2, A3), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, A1, A2, A3), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, A1, A2, A3, A4), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, A1, A2, A3, A4), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, A1, A2, A3, A4), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3, A4), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, A1, A2, A3, A4), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3, A4), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, A1, A2, A3, A4, A5), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, A1, A2, A3, A4, A5), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, A1, A2, A3, A4, A5), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, A1, A2, A3, A4, A5), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 2 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, A1, A2, A3, A4, A5, A6), const P1& p1, - const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)(X1, X2, X3), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)(X1, X2, X3), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, A1), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, A1), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, A1), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, A1), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, A1), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, A1), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, A1, A2), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, A1, A2), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, A1, A2), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, A1, A2), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, A1, A2, A3), const P1& p1, const P2& p2, - const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, A1, A2, A3, A4), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, A1, A2, A3, A4), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, A1, A2, A3, A4), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, A1, A2, A3, A4), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, A1, A2, A3, A4, A5), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 3 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, A1, A2, A3, A4, A5, A6), const P1& p1, - const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)(X1, X2, X3, X4), const P1& p1, const P2& p2, - const P3& p3, const P4& p4) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, A1), const P1& p1, const P2& p2, - const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, A1, A2), const P1& p1, const P2& p2, - const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, A1, A2, A3, A4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, A1, A2, A3, A4, A5), const P1& p1, - const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, - A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, - A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 4 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, - A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, A1, A2, A3, A4, - A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)(X1, X2, X3, X4, X5), const P1& p1, const P2& p2, - const P3& p3, const P4& p4, const P5& p5) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, A1), const P1& p1, const P2& p2, - const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, A1, A2, A3, A4), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, - A4), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, - A4), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, - A4, A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, - A4, A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 5 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, - A4, A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, A1, A2, A3, - A4, A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 0 -template -inline MutantFunctor -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6), const P1& p1, const P2& p2, - const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner* t = - new Mutant, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor(t); -} - -template -inline MutantFunctor -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner* t = - new MutantFunction, Tuple0> - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner* t = - new MutantLateObjectBind, Tuple0> - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 1 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6, A1), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple1 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple1 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 2 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6, A1, A2), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple2 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple2 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 3 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6, A1, A2, A3), const P1& p1, - const P2& p2, const P3& p3, const P4& p4, const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6, A1, A2, A3), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple3 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple3 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 4 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3, A4), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple4 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3, A4), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple4 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 5 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, - A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, A5), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, - A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3, A4, A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, - A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple5 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3, A4, A5), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple5 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -// 6 - 6 -template -inline MutantFunctor > -CreateFunctor(T* obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (*function)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, A5, A6), - const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, A5, - A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, const P5& p5, - const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING - -#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64) -template -inline MutantFunctor > -CreateFunctor(T* obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3, A4, A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new Mutant, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} - -template -inline MutantFunctor > -CreateFunctor(R (__stdcall *function)(X1, X2, X3, X4, X5, X6, A1, A2, A3, A4, - A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantFunction, Tuple6 > - (function, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -template -inline MutantFunctor > -CreateFunctor(T** obj, R (__stdcall U::*method)(X1, X2, X3, X4, X5, X6, A1, A2, - A3, A4, A5, A6), const P1& p1, const P2& p2, const P3& p3, const P4& p4, - const P5& p5, const P6& p6) { - MutantRunner >* t = - new MutantLateObjectBind, Tuple6 > - (obj, method, MakeTuple(p1, p2, p3, p4, p5, p6)); - return MutantFunctor >(t); -} -#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING -#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64) - -} // namespace testing - -#endif // TESTING_GMOCK_MUTANT_H_ diff --git a/packager/testing/gtest.gyp b/packager/testing/gtest.gyp deleted file mode 100644 index 2e5510f897..0000000000 --- a/packager/testing/gtest.gyp +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - 'includes': [ - 'gtest.gypi', - ], - 'targets': [ - { - 'target_name': 'gtest', - 'toolsets': ['host', 'target'], - 'type': 'static_library', - 'sources': [ - '<@(gtest_sources)', - ], - 'include_dirs': [ - 'gtest', - 'gtest/include', - ], - 'dependencies': [ - 'gtest_prod', - ], - 'defines': [ - # In order to allow regex matches in gtest to be shared between Windows - # and other systems, we tell gtest to always use it's internal engine. - 'GTEST_HAS_POSIX_RE=0', - 'GTEST_LANG_CXX11=1', - ], - 'all_dependent_settings': { - 'defines': [ - 'GTEST_HAS_POSIX_RE=0', - 'GTEST_LANG_CXX11=1', - ], - }, - 'conditions': [ - ['OS == "mac" or OS == "ios"', { - 'sources': [ - 'gtest_mac.h', - 'gtest_mac.mm', - ], - 'link_settings': { - 'libraries': [ - '$(SDKROOT)/System/Library/Frameworks/Foundation.framework', - ], - }, - }], - ['OS == "mac"', { - 'sources': [ - 'platform_test_mac.mm', - ], - }], - ['OS == "ios"', { - 'dependencies' : [ - '<(DEPTH)/testing/iossim/iossim.gyp:iossim#host', - ], - 'direct_dependent_settings': { - 'target_conditions': [ - # Turn all tests into bundles on iOS because that's the only - # type of executable supported for iOS. - ['_type=="executable"', { - 'variables': { - # Use a variable so the path gets fixed up so it is always - # correct when INFOPLIST_FILE finally gets set. - 'ios_unittest_info_plist_path': - '<(DEPTH)/testing/gtest_ios/unittest-Info.plist', - }, - 'mac_bundle': 1, - 'xcode_settings': { - 'BUNDLE_ID_TEST_NAME': - '>!(echo ">(_target_name)" | sed -e "s/_//g")', - 'INFOPLIST_FILE': '>(ios_unittest_info_plist_path)', - }, - 'mac_bundle_resources': [ - '<(ios_unittest_info_plist_path)', - '<(DEPTH)/testing/gtest_ios/Default-568h@2x.png', - ], - 'mac_bundle_resources!': [ - '<(ios_unittest_info_plist_path)', - ], - }], - ], - }, - 'sources': [ - 'coverage_util_ios.cc', - 'coverage_util_ios.h', - 'platform_test_ios.mm', - ], - }], - ['OS=="ios" and asan==1', { - 'direct_dependent_settings': { - 'target_conditions': [ - # Package the ASan runtime dylib into the test app bundles. - ['_type=="executable"', { - 'postbuilds': [ - { - 'variables': { - # Define copy_asan_dylib_path in a variable ending in - # _path so that gyp understands it's a path and - # performs proper relativization during dict merging. - 'copy_asan_dylib_path': - '<(DEPTH)/build/mac/copy_asan_runtime_dylib.sh', - }, - 'postbuild_name': 'Copy ASan runtime dylib', - 'action': [ - '>(copy_asan_dylib_path)', - ], - }, - ], - }], - ], - }, - }], - ['os_posix == 1', { - 'defines': [ - # gtest isn't able to figure out when RTTI is disabled for gcc - # versions older than 4.3.2, and assumes it's enabled. Our Mac - # and Linux builds disable RTTI, and cannot guarantee that the - # compiler will be 4.3.2. or newer. The Mac, for example, uses - # 4.2.1 as that is the latest available on that platform. gtest - # must be instructed that RTTI is disabled here, and for any - # direct dependents that might include gtest headers. - 'GTEST_HAS_RTTI=0', - ], - 'direct_dependent_settings': { - 'defines': [ - 'GTEST_HAS_RTTI=0', - ], - }, - }], - ['OS=="android" and android_app_abi=="x86"', { - 'defines': [ - 'GTEST_HAS_CLONE=0', - ], - 'direct_dependent_settings': { - 'defines': [ - 'GTEST_HAS_CLONE=0', - ], - }, - }], - ['OS=="android"', { - # We want gtest features that use tr1::tuple, but we currently - # don't support the variadic templates used by libstdc++'s - # implementation. gtest supports this scenario by providing its - # own implementation but we must opt in to it. - 'defines': [ - 'GTEST_USE_OWN_TR1_TUPLE=1', - # GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set. - # gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0 - # automatically on android, so it has to be set explicitly here. - 'GTEST_HAS_TR1_TUPLE=1', - ], - 'direct_dependent_settings': { - 'defines': [ - 'GTEST_USE_OWN_TR1_TUPLE=1', - 'GTEST_HAS_TR1_TUPLE=1', - ], - }, - }], - ], - 'direct_dependent_settings': { - 'defines': [ - 'UNIT_TEST', - ], - 'include_dirs': [ - 'gtest/include', # So that gtest headers can find themselves. - ], - 'target_conditions': [ - ['_type=="executable"', { - 'test': 1, - 'conditions': [ - ['OS=="mac"', { - 'run_as': { - 'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'], - }, - }], - ['OS=="ios"', { - 'variables': { - # Use a variable so the path gets fixed up so it is always - # correct when the action finally gets used. - 'ios_run_unittest_script_path': - '<(DEPTH)/testing/gtest_ios/run-unittest.sh', - }, - 'run_as': { - 'action????': ['>(ios_run_unittest_script_path)'], - }, - }], - ['OS=="win"', { - 'run_as': { - 'action????': ['$(TargetPath)', '--gtest_print_time'], - }, - }], - ], - }], - ], - 'msvs_disabled_warnings': [4800], - }, - }, - { - 'target_name': 'gtest_main', - 'type': 'static_library', - 'dependencies': [ - 'gtest', - ], - 'sources': [ - 'gtest/src/gtest_main.cc', - ], - }, - { - 'target_name': 'gtest_prod', - 'toolsets': ['host', 'target'], - 'type': 'none', - 'sources': [ - 'gtest/include/gtest/gtest_prod.h', - ], - }, - ], -} diff --git a/packager/testing/gtest.gypi b/packager/testing/gtest.gypi deleted file mode 100644 index ca12e1b1ee..0000000000 --- a/packager/testing/gtest.gypi +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - 'variables': { - 'gtest_sources': [ - 'gtest/include/gtest/gtest-death-test.h', - 'gtest/include/gtest/gtest-message.h', - 'gtest/include/gtest/gtest-param-test.h', - 'gtest/include/gtest/gtest-printers.h', - 'gtest/include/gtest/gtest-spi.h', - 'gtest/include/gtest/gtest-test-part.h', - 'gtest/include/gtest/gtest-typed-test.h', - 'gtest/include/gtest/gtest.h', - 'gtest/include/gtest/gtest_pred_impl.h', - 'gtest/include/gtest/gtest_prod.h', - 'gtest/include/gtest/internal/gtest-death-test-internal.h', - 'gtest/include/gtest/internal/gtest-filepath.h', - 'gtest/include/gtest/internal/gtest-internal.h', - 'gtest/include/gtest/internal/gtest-linked_ptr.h', - 'gtest/include/gtest/internal/gtest-param-util-generated.h', - 'gtest/include/gtest/internal/gtest-param-util.h', - 'gtest/include/gtest/internal/gtest-port.h', - 'gtest/include/gtest/internal/gtest-string.h', - 'gtest/include/gtest/internal/gtest-tuple.h', - 'gtest/include/gtest/internal/gtest-type-util.h', - 'gtest/src/gtest-death-test.cc', - 'gtest/src/gtest-filepath.cc', - 'gtest/src/gtest-internal-inl.h', - 'gtest/src/gtest-port.cc', - 'gtest/src/gtest-printers.cc', - 'gtest/src/gtest-test-part.cc', - 'gtest/src/gtest-typed-test.cc', - 'gtest/src/gtest.cc', - 'multiprocess_func_list.cc', - 'multiprocess_func_list.h', - 'platform_test.h', - ], - }, -} diff --git a/packager/testing/gtest_mac.h b/packager/testing/gtest_mac.h deleted file mode 100644 index aa48c94543..0000000000 --- a/packager/testing/gtest_mac.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2010 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef TESTING_GTEST_MAC_H_ -#define TESTING_GTEST_MAC_H_ - -#include -#include - -#ifdef GTEST_OS_MAC - -#import - -namespace testing { -namespace internal { - -// This overloaded version allows comparison between ObjC objects that conform -// to the NSObject protocol. Used to implement {ASSERT|EXPECT}_EQ(). -GTEST_API_ AssertionResult CmpHelperNSEQ(const char* expected_expression, - const char* actual_expression, - id expected, - id actual); - -// This overloaded version allows comparison between ObjC objects that conform -// to the NSObject protocol. Used to implement {ASSERT|EXPECT}_NE(). -GTEST_API_ AssertionResult CmpHelperNSNE(const char* expected_expression, - const char* actual_expression, - id expected, - id actual); - -} // namespace internal -} // namespace testing - -// Tests that [expected isEqual:actual]. -#define EXPECT_NSEQ(expected, actual) \ - EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNSEQ, expected, actual) -#define EXPECT_NSNE(val1, val2) \ - EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNSNE, val1, val2) - -#define ASSERT_NSEQ(expected, actual) \ - ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNSEQ, expected, actual) -#define ASSERT_NSNE(val1, val2) \ - ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNSNE, val1, val2) - -#endif // GTEST_OS_MAC - -#endif // TESTING_GTEST_MAC_H_ diff --git a/packager/testing/gtest_mac.mm b/packager/testing/gtest_mac.mm deleted file mode 100644 index b39d258c13..0000000000 --- a/packager/testing/gtest_mac.mm +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2010 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#import "gtest_mac.h" - -#include - -#include -#include -#include - -#ifdef GTEST_OS_MAC - -#import - -namespace testing { -namespace internal { - -// Handles nil values for |obj| properly by using safe printing of %@ in -// -stringWithFormat:. -static inline const char* StringDescription(id obj) { - return [[NSString stringWithFormat:@"%@", obj] UTF8String]; -} - -// This overloaded version allows comparison between ObjC objects that conform -// to the NSObject protocol. Used to implement {ASSERT|EXPECT}_EQ(). -GTEST_API_ AssertionResult CmpHelperNSEQ(const char* expected_expression, - const char* actual_expression, - id expected, - id actual) { - if (expected == actual || [expected isEqual:actual]) { - return AssertionSuccess(); - } - return EqFailure(expected_expression, - actual_expression, - std::string(StringDescription(expected)), - std::string(StringDescription(actual)), - false); -} - -// This overloaded version allows comparison between ObjC objects that conform -// to the NSObject protocol. Used to implement {ASSERT|EXPECT}_NE(). -GTEST_API_ AssertionResult CmpHelperNSNE(const char* expected_expression, - const char* actual_expression, - id expected, - id actual) { - if (expected != actual && ![expected isEqual:actual]) { - return AssertionSuccess(); - } - Message msg; - msg << "Expected: (" << expected_expression << ") != (" << actual_expression - << "), actual: " << StringDescription(expected) - << " vs " << StringDescription(actual); - return AssertionFailure(msg); -} - -} // namespace internal -} // namespace testing - -#endif // GTEST_OS_MAC diff --git a/packager/testing/gtest_mac_unittest.mm b/packager/testing/gtest_mac_unittest.mm deleted file mode 100644 index 9363b410fd..0000000000 --- a/packager/testing/gtest_mac_unittest.mm +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Note that while this file is in testing/ and tests GTest macros, it is built -// as part of Chromium's unit_tests target because the project does not build -// or run GTest's internal test suite. - -#import "testing/gtest_mac.h" - -#import - -#include "base/mac/scoped_nsautorelease_pool.h" -#include "testing/gtest/include/gtest/internal/gtest-port.h" -#include "testing/gtest/include/gtest/gtest.h" - -TEST(GTestMac, ExpectNSEQ) { - base::mac::ScopedNSAutoreleasePool pool; - - EXPECT_NSEQ(@"a", @"a"); - - NSString* s1 = [NSString stringWithUTF8String:"a"]; - NSString* s2 = @"a"; - EXPECT_NE(s1, s2); - EXPECT_NSEQ(s1, s2); -} - -TEST(GTestMac, AssertNSEQ) { - base::mac::ScopedNSAutoreleasePool pool; - - NSString* s1 = [NSString stringWithUTF8String:"a"]; - NSString* s2 = @"a"; - EXPECT_NE(s1, s2); - ASSERT_NSEQ(s1, s2); -} - -TEST(GTestMac, ExpectNSNE) { - base::mac::ScopedNSAutoreleasePool pool; - - EXPECT_NSNE([NSNumber numberWithInt:2], [NSNumber numberWithInt:42]); -} - -TEST(GTestMac, AssertNSNE) { - base::mac::ScopedNSAutoreleasePool pool; - - ASSERT_NSNE(@"a", @"b"); -} - -TEST(GTestMac, ExpectNSNil) { - base::mac::ScopedNSAutoreleasePool pool; - - EXPECT_NSEQ(nil, nil); - EXPECT_NSNE(nil, @"a"); - EXPECT_NSNE(@"a", nil); - - // TODO(shess): Test that EXPECT_NSNE(nil, nil) fails. -} diff --git a/packager/testing/multiprocess_func_list.cc b/packager/testing/multiprocess_func_list.cc deleted file mode 100644 index 49ae07dd3e..0000000000 --- a/packager/testing/multiprocess_func_list.cc +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "multiprocess_func_list.h" - -#include - -// Helper functions to maintain mapping of "test name"->test func. -// The information is accessed via a global map. -namespace multi_process_function_list { - -namespace { - -struct ProcessFunctions { - ProcessFunctions() : main(NULL), setup(NULL) {} - ProcessFunctions(TestMainFunctionPtr main, SetupFunctionPtr setup) - : main(main), - setup(setup) { - } - TestMainFunctionPtr main; - SetupFunctionPtr setup; -}; - -typedef std::map MultiProcessTestMap; - -// Retrieve a reference to the global 'func name' -> func ptr map. -MultiProcessTestMap& GetMultiprocessFuncMap() { - static MultiProcessTestMap test_name_to_func_ptr_map; - return test_name_to_func_ptr_map; -} - -} // namespace - -AppendMultiProcessTest::AppendMultiProcessTest( - std::string test_name, - TestMainFunctionPtr main_func_ptr, - SetupFunctionPtr setup_func_ptr) { - GetMultiprocessFuncMap()[test_name] = - ProcessFunctions(main_func_ptr, setup_func_ptr); -} - -int InvokeChildProcessTest(std::string test_name) { - MultiProcessTestMap& func_lookup_table = GetMultiprocessFuncMap(); - MultiProcessTestMap::iterator it = func_lookup_table.find(test_name); - if (it != func_lookup_table.end()) { - const ProcessFunctions& process_functions = it->second; - if (process_functions.setup) - (*process_functions.setup)(); - if (process_functions.main) - return (*process_functions.main)(); - } - - return -1; -} - -} // namespace multi_process_function_list diff --git a/packager/testing/multiprocess_func_list.h b/packager/testing/multiprocess_func_list.h deleted file mode 100644 index f806d53c93..0000000000 --- a/packager/testing/multiprocess_func_list.h +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef TESTING_MULTIPROCESS_FUNC_LIST_H_ -#define TESTING_MULTIPROCESS_FUNC_LIST_H_ - -#include - -// This file provides the plumbing to register functions to be executed -// as the main function of a child process in a multi-process test. -// This complements the MultiProcessTest class which provides facilities -// for launching such tests. -// -// The MULTIPROCESS_TEST_MAIN() macro registers a string -> func_ptr mapping -// by creating a new global instance of the AppendMultiProcessTest() class -// this means that by the time that we reach our main() function the mapping -// is already in place. -// -// Example usage: -// MULTIPROCESS_TEST_MAIN(a_test_func) { -// // Code here runs in a child process. -// return 0; -// } -// -// The prototype of a_test_func is implicitly -// int test_main_func_name(); - -namespace multi_process_function_list { - -// Type for child process main functions. -typedef int (*TestMainFunctionPtr)(); - -// Type for child setup functions. -typedef void (*SetupFunctionPtr)(); - -// Helper class to append a test function to the global mapping. -// Used by the MULTIPROCESS_TEST_MAIN macro. -class AppendMultiProcessTest { - public: - // |main_func_ptr| is the main function that is run in the child process. - // |setup_func_ptr| is a function run when the global mapping is added. - AppendMultiProcessTest(std::string test_name, - TestMainFunctionPtr main_func_ptr, - SetupFunctionPtr setup_func_ptr); -}; - -// Invoke the main function of a test previously registered with -// MULTIPROCESS_TEST_MAIN() -int InvokeChildProcessTest(std::string test_name); - -// This macro creates a global MultiProcessTest::AppendMultiProcessTest object -// whose constructor does the work of adding the global mapping. -#define MULTIPROCESS_TEST_MAIN(test_main) \ - MULTIPROCESS_TEST_MAIN_WITH_SETUP(test_main, NULL) - -// Same as above but lets callers specify a setup method that is run in the -// child process, just before the main function is run. This facilitates -// adding a generic one-time setup function for multiple tests. -#define MULTIPROCESS_TEST_MAIN_WITH_SETUP(test_main, test_setup) \ - int test_main(); \ - namespace { \ - multi_process_function_list::AppendMultiProcessTest \ - AddMultiProcessTest##_##test_main(#test_main, (test_main), (test_setup)); \ - } \ - int test_main() - -} // namespace multi_process_function_list - -#endif // TESTING_MULTIPROCESS_FUNC_LIST_H_ diff --git a/packager/testing/perf/BUILD.gn b/packager/testing/perf/BUILD.gn deleted file mode 100644 index d158f1988a..0000000000 --- a/packager/testing/perf/BUILD.gn +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -source_set("perf") { - sources = [ "perf_test.cc" ] - deps = [ "//base" ] -} diff --git a/packager/testing/perf/perf_test.cc b/packager/testing/perf/perf_test.cc deleted file mode 100644 index 0d5abc01cb..0000000000 --- a/packager/testing/perf/perf_test.cc +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "testing/perf/perf_test.h" - -#include - -#include "base/logging.h" -#include "base/strings/string_number_conversions.h" -#include "base/strings/stringprintf.h" - -namespace { - -std::string ResultsToString(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& values, - const std::string& prefix, - const std::string& suffix, - const std::string& units, - bool important) { - // <*>RESULT : = - // <*>RESULT : = {, } - // <*>RESULT : = [,value,value,...,] - return base::StringPrintf("%sRESULT %s%s: %s= %s%s%s %s\n", - important ? "*" : "", measurement.c_str(), modifier.c_str(), - trace.c_str(), prefix.c_str(), values.c_str(), suffix.c_str(), - units.c_str()); -} - -void PrintResultsImpl(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& values, - const std::string& prefix, - const std::string& suffix, - const std::string& units, - bool important) { - fflush(stdout); - printf("%s", ResultsToString(measurement, modifier, trace, values, - prefix, suffix, units, important).c_str()); - fflush(stdout); -} - -} // namespace - -namespace perf_test { - -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - size_t value, - const std::string& units, - bool important) { - PrintResultsImpl(measurement, - modifier, - trace, - base::UintToString(static_cast(value)), - std::string(), - std::string(), - units, - important); -} - -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - double value, - const std::string& units, - bool important) { - PrintResultsImpl(measurement, - modifier, - trace, - base::DoubleToString(value), - std::string(), - std::string(), - units, - important); -} - -void AppendResult(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - size_t value, - const std::string& units, - bool important) { - output += ResultsToString( - measurement, - modifier, - trace, - base::UintToString(static_cast(value)), - std::string(), - std::string(), - units, - important); -} - -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& value, - const std::string& units, - bool important) { - PrintResultsImpl(measurement, - modifier, - trace, - value, - std::string(), - std::string(), - units, - important); -} - -void AppendResult(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& value, - const std::string& units, - bool important) { - output += ResultsToString(measurement, - modifier, - trace, - value, - std::string(), - std::string(), - units, - important); -} - -void PrintResultMeanAndError(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& mean_and_error, - const std::string& units, - bool important) { - PrintResultsImpl(measurement, modifier, trace, mean_and_error, - "{", "}", units, important); -} - -void AppendResultMeanAndError(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& mean_and_error, - const std::string& units, - bool important) { - output += ResultsToString(measurement, modifier, trace, mean_and_error, - "{", "}", units, important); -} - -void PrintResultList(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& values, - const std::string& units, - bool important) { - PrintResultsImpl(measurement, modifier, trace, values, - "[", "]", units, important); -} - -void AppendResultList(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& values, - const std::string& units, - bool important) { - output += ResultsToString(measurement, modifier, trace, values, - "[", "]", units, important); -} - -void PrintSystemCommitCharge(const std::string& test_name, - size_t charge, - bool important) { - PrintSystemCommitCharge(stdout, test_name, charge, important); -} - -void PrintSystemCommitCharge(FILE* target, - const std::string& test_name, - size_t charge, - bool important) { - fprintf(target, "%s", SystemCommitChargeToString(test_name, charge, - important).c_str()); -} - -std::string SystemCommitChargeToString(const std::string& test_name, - size_t charge, - bool important) { - std::string trace_name(test_name); - std::string output; - AppendResult(output, - "commit_charge", - std::string(), - "cc" + trace_name, - charge, - "kb", - important); - return output; -} - -} // namespace perf_test diff --git a/packager/testing/perf/perf_test.gyp b/packager/testing/perf/perf_test.gyp deleted file mode 100644 index 2065270c11..0000000000 --- a/packager/testing/perf/perf_test.gyp +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2013 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - 'targets': [ - { - 'target_name': 'perf_test', - 'type': 'static_library', - 'sources': [ - 'perf_test.cc', - ], - 'dependencies': [ - '../../base/base.gyp:base', - ], - }, - ], -} diff --git a/packager/testing/perf/perf_test.h b/packager/testing/perf/perf_test.h deleted file mode 100644 index 36e2916c5f..0000000000 --- a/packager/testing/perf/perf_test.h +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef TESTING_PERF_PERF_TEST_H_ -#define TESTING_PERF_PERF_TEST_H_ - -#include - -namespace perf_test { - -// Prints numerical information to stdout in a controlled format, for -// post-processing. |measurement| is a description of the quantity being -// measured, e.g. "vm_peak"; |modifier| is provided as a convenience and -// will be appended directly to the name of the |measurement|, e.g. -// "_browser"; |trace| is a description of the particular data point, e.g. -// "reference"; |value| is the measured value; and |units| is a description -// of the units of measure, e.g. "bytes". If |important| is true, the output -// line will be specially marked, to notify the post-processor. The strings -// may be empty. They should not contain any colons (:) or equals signs (=). -// A typical post-processing step would be to produce graphs of the data -// produced for various builds, using the combined |measurement| + |modifier| -// string to specify a particular graph and the |trace| to identify a trace -// (i.e., data series) on that graph. -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - size_t value, - const std::string& units, - bool important); -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - double value, - const std::string& units, - bool important); - -void AppendResult(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - size_t value, - const std::string& units, - bool important); - -// Like the above version of PrintResult(), but takes a std::string value -// instead of a size_t. -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& value, - const std::string& units, - bool important); - -void AppendResult(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& value, - const std::string& units, - bool important); - -// Like PrintResult(), but prints a (mean, standard deviation) result pair. -// The || should be two comma-separated numbers, the mean and -// standard deviation (or other error metric) of the measurement. -void PrintResultMeanAndError(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& mean_and_error, - const std::string& units, - bool important); - -void AppendResultMeanAndError(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& mean_and_error, - const std::string& units, - bool important); - -// Like PrintResult(), but prints an entire list of results. The |values| -// will generally be a list of comma-separated numbers. A typical -// post-processing step might produce plots of their mean and standard -// deviation. -void PrintResultList(const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& values, - const std::string& units, - bool important); - -void AppendResultList(std::string& output, - const std::string& measurement, - const std::string& modifier, - const std::string& trace, - const std::string& values, - const std::string& units, - bool important); - -// Prints memory commit charge stats for use by perf graphs. -void PrintSystemCommitCharge(const std::string& test_name, - size_t charge, - bool important); - -void PrintSystemCommitCharge(FILE* target, - const std::string& test_name, - size_t charge, - bool important); - -std::string SystemCommitChargeToString(const std::string& test_name, - size_t charge, - bool important); - -} // namespace perf_test - -#endif // TESTING_PERF_PERF_TEST_H_ diff --git a/packager/testing/platform_test.h b/packager/testing/platform_test.h deleted file mode 100644 index 04fc845bd9..0000000000 --- a/packager/testing/platform_test.h +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef TESTING_PLATFORM_TEST_H_ -#define TESTING_PLATFORM_TEST_H_ - -#include - -#if defined(GTEST_OS_MAC) -#ifdef __OBJC__ -@class NSAutoreleasePool; -#else -class NSAutoreleasePool; -#endif - -// The purpose of this class us to provide a hook for platform-specific -// operations across unit tests. For example, on the Mac, it creates and -// releases an outer NSAutoreleasePool for each test case. For now, it's only -// implemented on the Mac. To enable this for another platform, just adjust -// the #ifdefs and add a platform_test_.cc implementation file. -class PlatformTest : public testing::Test { - public: - virtual ~PlatformTest(); - - protected: - PlatformTest(); - - private: - NSAutoreleasePool* pool_; -}; -#else -typedef testing::Test PlatformTest; -#endif // GTEST_OS_MAC - -#endif // TESTING_PLATFORM_TEST_H_ diff --git a/packager/testing/platform_test_ios.mm b/packager/testing/platform_test_ios.mm deleted file mode 100644 index 5162c1dbb1..0000000000 --- a/packager/testing/platform_test_ios.mm +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "platform_test.h" - -#import - -#include "coverage_util_ios.h" - -PlatformTest::PlatformTest() - : pool_([[NSAutoreleasePool alloc] init]) { -} - -PlatformTest::~PlatformTest() { - [pool_ release]; - coverage_util::FlushCoverageDataIfNecessary(); -} diff --git a/packager/testing/platform_test_mac.mm b/packager/testing/platform_test_mac.mm deleted file mode 100644 index bd22cd5b45..0000000000 --- a/packager/testing/platform_test_mac.mm +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "platform_test.h" - -#import - -PlatformTest::PlatformTest() - : pool_([[NSAutoreleasePool alloc] init]) { -} - -PlatformTest::~PlatformTest() { - [pool_ release]; -} diff --git a/packager/testing/test_dockers.sh b/packager/testing/test_dockers.sh new file mode 100755 index 0000000000..38c4cc58ff --- /dev/null +++ b/packager/testing/test_dockers.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +# Exit on first error. +set -e + +# To debug a failure, run with the variable DEBUG=1. For example: +# DEBUG=1 ./packager/testing/test_dockers.sh + +SCRIPT_DIR="$(dirname "$0")" +PACKAGER_DIR="$(realpath "$SCRIPT_DIR/../..")" +TEMP_BUILD_DIR="$(mktemp -d)" + +function docker_run_internal() { + if [[ "$DEBUG" == "1" ]]; then + # For debugging, allocate an interactive terminal in docker. + INTERACTIVE_ARG="-it" + else + INTERACTIVE_ARG="" + fi + + docker run \ + ${INTERACTIVE_ARG} \ + -v ${PACKAGER_DIR}:/shaka-packager \ + -v ${TEMP_BUILD_DIR}:/shaka-packager/build \ + -w /shaka-packager \ + -e HOME=/tmp \ + --user $(id -u):$(id -g) \ + ${CONTAINER} "$@" +} + +function docker_run() { + if ! docker_run_internal "$@"; then + echo "Command failed in ${CONTAINER}: $@" + if [[ "$DEBUG" == "1" ]]; then + echo "Launching interactive shell to debug." + docker_run_internal /bin/bash + exit 1 + else + echo "Run with DEBUG=1 to debug!" + exit 1 + fi + fi +} + +# Command line arguments will be taken as an allowlist of OSes to run. +# By default, a regex that matches everything. +FILTER=".*" +if [[ $# != 0 ]]; then + # Join arguments with a pipe, to make a regex alternation to match any of + # them. The syntax is a mess, but that's bash. Set IFS (the separator + # variable) in a subshell and print the array. This has the effect of joining + # them by the character in IFS. Then add parentheses to make a complete regex + # to match all the arguments. + FILTER=$(IFS="|"; echo "$*") + FILTER="($FILTER)" +fi + +function on_exit() { + # On exit, print the name of the OS we were on. This helps identify what to + # debug when the start of a test run scrolls off-screen. + echo "Failed on $OS_NAME!" + rm -rf "${TEMP_BUILD_DIR}" +} +trap 'on_exit' exit + +echo "Using OS filter: $FILTER" +RAN_SOMETHING=0 +for DOCKER_FILE in ${SCRIPT_DIR}/dockers/*; do + # Take the basename of the dockerfile path, then remove the trailing + # "_Dockerfile" from the file name. This is the OS name. + OS_NAME="$( basename "$DOCKER_FILE" | sed -e 's/_Dockerfile//' )" + + if echo "$OS_NAME" | grep -Eqi "$FILTER"; then + echo "Testing $OS_NAME." + # Fall through. + else + echo "Skipping $OS_NAME." + continue + fi + + # Build a unique container name per OS for debugging purposes and to improve + # caching. Containers names must be in lowercase. + CONTAINER="$( echo "packager_test_${OS_NAME}" | tr A-Z a-z )" + + RAN_SOMETHING=1 + docker build -t ${CONTAINER} -f ${DOCKER_FILE} ${SCRIPT_DIR}/dockers/ + mkdir -p "${TEMP_BUILD_DIR}" + docker_run cmake -S . -B build/ + docker_run make -C build/ + docker_run bash -c "cd build && ctest -V" + rm -rf "${TEMP_BUILD_DIR}" +done + +# Clear the exit trap from above. +trap - exit + +if [[ "$RAN_SOMETHING" == "0" ]]; then + echo "No tests were run! The filter $FILTER did not match any OSes." 1>&2 + exit 1 +fi diff --git a/packager/third_party/CMakeLists.txt b/packager/third_party/CMakeLists.txt new file mode 100644 index 0000000000..84fa4160f0 --- /dev/null +++ b/packager/third_party/CMakeLists.txt @@ -0,0 +1,34 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# Packager CMake build file for third-party libraries. + +# Tell third-party libraries generally not to build their own tests. +set(BUILD_TESTING OFF) + +# Tell third-party libraries generally not to build shared library targets. +set(BUILD_SHARED_LIBS OFF) + +# Tell CMake that we intend to override some libraries' options with set(). +# By setting this default instead of using cmake_policy(SET CMP0077 NEW), we +# ensure that the defaults are reset when a library calls +# cmake_minimum_required. +# See https://gitlab.kitware.com/cmake/cmake/-/issues/20312 +set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) + +if(MSVC) + # Use warning level 3 in third-party code, as opposed to level 4 used in our + # own code. + add_compile_options(/W3) +endif() + +# These all use EXCLUDE_FROM_ALL so that only the referenced targets get built. +add_subdirectory(abseil-cpp EXCLUDE_FROM_ALL) +add_subdirectory(curl EXCLUDE_FROM_ALL) +add_subdirectory(glog EXCLUDE_FROM_ALL) +add_subdirectory(googletest EXCLUDE_FROM_ALL) +add_subdirectory(json EXCLUDE_FROM_ALL) +add_subdirectory(mbedtls EXCLUDE_FROM_ALL) diff --git a/packager/third_party/abseil-cpp/CMakeLists.txt b/packager/third_party/abseil-cpp/CMakeLists.txt new file mode 100644 index 0000000000..293bd455e2 --- /dev/null +++ b/packager/third_party/abseil-cpp/CMakeLists.txt @@ -0,0 +1,19 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# CMake build file to host abseil-cpp configuration. + +# Variables that must be defined for third-party libraries. + +# This will be the new default, so turn it on now to suppress a warning. +set(ABSL_PROPAGATE_CXX_STD ON) + +# Use the googletest library included with Shaka Packager instead of searching +# for one in the system. +set(ABSL_USE_EXTERNAL_GOOGLETEST ON) + +# With these set in scope of this folder, load the library's own CMakeLists.txt. +add_subdirectory(source) diff --git a/packager/third_party/abseil-cpp/source b/packager/third_party/abseil-cpp/source new file mode 160000 index 0000000000..273292d1cf --- /dev/null +++ b/packager/third_party/abseil-cpp/source @@ -0,0 +1 @@ +Subproject commit 273292d1cfc0a94a65082ee350509af1d113344d diff --git a/packager/third_party/boringssl/BUILD.generated.gni b/packager/third_party/boringssl/BUILD.generated.gni deleted file mode 100644 index 56bc2574ea..0000000000 --- a/packager/third_party/boringssl/BUILD.generated.gni +++ /dev/null @@ -1,619 +0,0 @@ -# Copyright (c) 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# This file is created by generate_build_files.py. Do not edit manually. - -crypto_sources = [ - "err_data.c", - "src/crypto/asn1/a_bitstr.c", - "src/crypto/asn1/a_bool.c", - "src/crypto/asn1/a_d2i_fp.c", - "src/crypto/asn1/a_dup.c", - "src/crypto/asn1/a_enum.c", - "src/crypto/asn1/a_gentm.c", - "src/crypto/asn1/a_i2d_fp.c", - "src/crypto/asn1/a_int.c", - "src/crypto/asn1/a_mbstr.c", - "src/crypto/asn1/a_object.c", - "src/crypto/asn1/a_octet.c", - "src/crypto/asn1/a_print.c", - "src/crypto/asn1/a_strnid.c", - "src/crypto/asn1/a_time.c", - "src/crypto/asn1/a_type.c", - "src/crypto/asn1/a_utctm.c", - "src/crypto/asn1/a_utf8.c", - "src/crypto/asn1/asn1_lib.c", - "src/crypto/asn1/asn1_locl.h", - "src/crypto/asn1/asn1_par.c", - "src/crypto/asn1/asn_pack.c", - "src/crypto/asn1/f_enum.c", - "src/crypto/asn1/f_int.c", - "src/crypto/asn1/f_string.c", - "src/crypto/asn1/tasn_dec.c", - "src/crypto/asn1/tasn_enc.c", - "src/crypto/asn1/tasn_fre.c", - "src/crypto/asn1/tasn_new.c", - "src/crypto/asn1/tasn_typ.c", - "src/crypto/asn1/tasn_utl.c", - "src/crypto/asn1/time_support.c", - "src/crypto/base64/base64.c", - "src/crypto/bio/bio.c", - "src/crypto/bio/bio_mem.c", - "src/crypto/bio/connect.c", - "src/crypto/bio/fd.c", - "src/crypto/bio/file.c", - "src/crypto/bio/hexdump.c", - "src/crypto/bio/internal.h", - "src/crypto/bio/pair.c", - "src/crypto/bio/printf.c", - "src/crypto/bio/socket.c", - "src/crypto/bio/socket_helper.c", - "src/crypto/bn_extra/bn_asn1.c", - "src/crypto/bn_extra/convert.c", - "src/crypto/buf/buf.c", - "src/crypto/bytestring/asn1_compat.c", - "src/crypto/bytestring/ber.c", - "src/crypto/bytestring/cbb.c", - "src/crypto/bytestring/cbs.c", - "src/crypto/bytestring/internal.h", - "src/crypto/bytestring/unicode.c", - "src/crypto/chacha/chacha.c", - "src/crypto/chacha/internal.h", - "src/crypto/cipher_extra/cipher_extra.c", - "src/crypto/cipher_extra/derive_key.c", - "src/crypto/cipher_extra/e_aesccm.c", - "src/crypto/cipher_extra/e_aesctrhmac.c", - "src/crypto/cipher_extra/e_aesgcmsiv.c", - "src/crypto/cipher_extra/e_chacha20poly1305.c", - "src/crypto/cipher_extra/e_null.c", - "src/crypto/cipher_extra/e_rc2.c", - "src/crypto/cipher_extra/e_rc4.c", - "src/crypto/cipher_extra/e_tls.c", - "src/crypto/cipher_extra/internal.h", - "src/crypto/cipher_extra/tls_cbc.c", - "src/crypto/cmac/cmac.c", - "src/crypto/conf/conf.c", - "src/crypto/conf/conf_def.h", - "src/crypto/conf/internal.h", - "src/crypto/cpu-aarch64-fuchsia.c", - "src/crypto/cpu-aarch64-linux.c", - "src/crypto/cpu-arm-linux.c", - "src/crypto/cpu-arm-linux.h", - "src/crypto/cpu-arm.c", - "src/crypto/cpu-intel.c", - "src/crypto/cpu-ppc64le.c", - "src/crypto/crypto.c", - "src/crypto/curve25519/spake25519.c", - "src/crypto/dh/check.c", - "src/crypto/dh/dh.c", - "src/crypto/dh/dh_asn1.c", - "src/crypto/dh/params.c", - "src/crypto/digest_extra/digest_extra.c", - "src/crypto/dsa/dsa.c", - "src/crypto/dsa/dsa_asn1.c", - "src/crypto/ec_extra/ec_asn1.c", - "src/crypto/ec_extra/ec_derive.c", - "src/crypto/ecdh_extra/ecdh_extra.c", - "src/crypto/ecdsa_extra/ecdsa_asn1.c", - "src/crypto/engine/engine.c", - "src/crypto/err/err.c", - "src/crypto/err/internal.h", - "src/crypto/evp/digestsign.c", - "src/crypto/evp/evp.c", - "src/crypto/evp/evp_asn1.c", - "src/crypto/evp/evp_ctx.c", - "src/crypto/evp/internal.h", - "src/crypto/evp/p_dsa_asn1.c", - "src/crypto/evp/p_ec.c", - "src/crypto/evp/p_ec_asn1.c", - "src/crypto/evp/p_ed25519.c", - "src/crypto/evp/p_ed25519_asn1.c", - "src/crypto/evp/p_rsa.c", - "src/crypto/evp/p_rsa_asn1.c", - "src/crypto/evp/p_x25519.c", - "src/crypto/evp/p_x25519_asn1.c", - "src/crypto/evp/pbkdf.c", - "src/crypto/evp/print.c", - "src/crypto/evp/scrypt.c", - "src/crypto/evp/sign.c", - "src/crypto/ex_data.c", - "src/crypto/fipsmodule/aes/internal.h", - "src/crypto/fipsmodule/bcm.c", - "src/crypto/fipsmodule/bn/internal.h", - "src/crypto/fipsmodule/bn/rsaz_exp.h", - "src/crypto/fipsmodule/cipher/internal.h", - "src/crypto/fipsmodule/delocate.h", - "src/crypto/fipsmodule/des/internal.h", - "src/crypto/fipsmodule/digest/internal.h", - "src/crypto/fipsmodule/digest/md32_common.h", - "src/crypto/fipsmodule/ec/internal.h", - "src/crypto/fipsmodule/ec/p256-x86_64-table.h", - "src/crypto/fipsmodule/ec/p256-x86_64.h", - "src/crypto/fipsmodule/fips_shared_support.c", - "src/crypto/fipsmodule/is_fips.c", - "src/crypto/fipsmodule/md5/internal.h", - "src/crypto/fipsmodule/modes/internal.h", - "src/crypto/fipsmodule/rand/internal.h", - "src/crypto/fipsmodule/rsa/internal.h", - "src/crypto/fipsmodule/sha/internal.h", - "src/crypto/fipsmodule/tls/internal.h", - "src/crypto/hkdf/hkdf.c", - "src/crypto/hrss/hrss.c", - "src/crypto/hrss/internal.h", - "src/crypto/internal.h", - "src/crypto/lhash/lhash.c", - "src/crypto/mem.c", - "src/crypto/obj/obj.c", - "src/crypto/obj/obj_dat.h", - "src/crypto/obj/obj_xref.c", - "src/crypto/pem/pem_all.c", - "src/crypto/pem/pem_info.c", - "src/crypto/pem/pem_lib.c", - "src/crypto/pem/pem_oth.c", - "src/crypto/pem/pem_pk8.c", - "src/crypto/pem/pem_pkey.c", - "src/crypto/pem/pem_x509.c", - "src/crypto/pem/pem_xaux.c", - "src/crypto/pkcs7/internal.h", - "src/crypto/pkcs7/pkcs7.c", - "src/crypto/pkcs7/pkcs7_x509.c", - "src/crypto/pkcs8/internal.h", - "src/crypto/pkcs8/p5_pbev2.c", - "src/crypto/pkcs8/pkcs8.c", - "src/crypto/pkcs8/pkcs8_x509.c", - "src/crypto/poly1305/internal.h", - "src/crypto/poly1305/poly1305.c", - "src/crypto/poly1305/poly1305_arm.c", - "src/crypto/poly1305/poly1305_vec.c", - "src/crypto/pool/internal.h", - "src/crypto/pool/pool.c", - "src/crypto/rand_extra/deterministic.c", - "src/crypto/rand_extra/forkunsafe.c", - "src/crypto/rand_extra/fuchsia.c", - "src/crypto/rand_extra/rand_extra.c", - "src/crypto/rand_extra/windows.c", - "src/crypto/rc4/rc4.c", - "src/crypto/refcount_c11.c", - "src/crypto/refcount_lock.c", - "src/crypto/rsa_extra/rsa_asn1.c", - "src/crypto/rsa_extra/rsa_print.c", - "src/crypto/siphash/siphash.c", - "src/crypto/stack/stack.c", - "src/crypto/thread.c", - "src/crypto/thread_none.c", - "src/crypto/thread_pthread.c", - "src/crypto/thread_win.c", - "src/crypto/x509/a_digest.c", - "src/crypto/x509/a_sign.c", - "src/crypto/x509/a_strex.c", - "src/crypto/x509/a_verify.c", - "src/crypto/x509/algorithm.c", - "src/crypto/x509/asn1_gen.c", - "src/crypto/x509/by_dir.c", - "src/crypto/x509/by_file.c", - "src/crypto/x509/charmap.h", - "src/crypto/x509/i2d_pr.c", - "src/crypto/x509/internal.h", - "src/crypto/x509/rsa_pss.c", - "src/crypto/x509/t_crl.c", - "src/crypto/x509/t_req.c", - "src/crypto/x509/t_x509.c", - "src/crypto/x509/t_x509a.c", - "src/crypto/x509/vpm_int.h", - "src/crypto/x509/x509.c", - "src/crypto/x509/x509_att.c", - "src/crypto/x509/x509_cmp.c", - "src/crypto/x509/x509_d2.c", - "src/crypto/x509/x509_def.c", - "src/crypto/x509/x509_ext.c", - "src/crypto/x509/x509_lu.c", - "src/crypto/x509/x509_obj.c", - "src/crypto/x509/x509_r2x.c", - "src/crypto/x509/x509_req.c", - "src/crypto/x509/x509_set.c", - "src/crypto/x509/x509_trs.c", - "src/crypto/x509/x509_txt.c", - "src/crypto/x509/x509_v3.c", - "src/crypto/x509/x509_vfy.c", - "src/crypto/x509/x509_vpm.c", - "src/crypto/x509/x509cset.c", - "src/crypto/x509/x509name.c", - "src/crypto/x509/x509rset.c", - "src/crypto/x509/x509spki.c", - "src/crypto/x509/x_algor.c", - "src/crypto/x509/x_all.c", - "src/crypto/x509/x_attrib.c", - "src/crypto/x509/x_crl.c", - "src/crypto/x509/x_exten.c", - "src/crypto/x509/x_info.c", - "src/crypto/x509/x_name.c", - "src/crypto/x509/x_pkey.c", - "src/crypto/x509/x_pubkey.c", - "src/crypto/x509/x_req.c", - "src/crypto/x509/x_sig.c", - "src/crypto/x509/x_spki.c", - "src/crypto/x509/x_val.c", - "src/crypto/x509/x_x509.c", - "src/crypto/x509/x_x509a.c", - "src/crypto/x509v3/ext_dat.h", - "src/crypto/x509v3/internal.h", - "src/crypto/x509v3/pcy_cache.c", - "src/crypto/x509v3/pcy_data.c", - "src/crypto/x509v3/pcy_int.h", - "src/crypto/x509v3/pcy_lib.c", - "src/crypto/x509v3/pcy_map.c", - "src/crypto/x509v3/pcy_node.c", - "src/crypto/x509v3/pcy_tree.c", - "src/crypto/x509v3/v3_akey.c", - "src/crypto/x509v3/v3_akeya.c", - "src/crypto/x509v3/v3_alt.c", - "src/crypto/x509v3/v3_bcons.c", - "src/crypto/x509v3/v3_bitst.c", - "src/crypto/x509v3/v3_conf.c", - "src/crypto/x509v3/v3_cpols.c", - "src/crypto/x509v3/v3_crld.c", - "src/crypto/x509v3/v3_enum.c", - "src/crypto/x509v3/v3_extku.c", - "src/crypto/x509v3/v3_genn.c", - "src/crypto/x509v3/v3_ia5.c", - "src/crypto/x509v3/v3_info.c", - "src/crypto/x509v3/v3_int.c", - "src/crypto/x509v3/v3_lib.c", - "src/crypto/x509v3/v3_ncons.c", - "src/crypto/x509v3/v3_ocsp.c", - "src/crypto/x509v3/v3_pci.c", - "src/crypto/x509v3/v3_pcia.c", - "src/crypto/x509v3/v3_pcons.c", - "src/crypto/x509v3/v3_pku.c", - "src/crypto/x509v3/v3_pmaps.c", - "src/crypto/x509v3/v3_prn.c", - "src/crypto/x509v3/v3_purp.c", - "src/crypto/x509v3/v3_skey.c", - "src/crypto/x509v3/v3_sxnet.c", - "src/crypto/x509v3/v3_utl.c", - "src/third_party/fiat/curve25519.c", - "src/third_party/fiat/curve25519_32.h", - "src/third_party/fiat/curve25519_64.h", - "src/third_party/fiat/curve25519_tables.h", - "src/third_party/fiat/internal.h", - "src/third_party/fiat/p256_32.h", - "src/third_party/fiat/p256_64.h", - "src/third_party/sike/asm/fp_generic.c", - "src/third_party/sike/curve_params.c", - "src/third_party/sike/fpx.c", - "src/third_party/sike/fpx.h", - "src/third_party/sike/isogeny.c", - "src/third_party/sike/isogeny.h", - "src/third_party/sike/sike.c", - "src/third_party/sike/sike.h", - "src/third_party/sike/utils.h", -] - -crypto_headers = [ - "src/include/openssl/aead.h", - "src/include/openssl/aes.h", - "src/include/openssl/arm_arch.h", - "src/include/openssl/asn1.h", - "src/include/openssl/asn1_mac.h", - "src/include/openssl/asn1t.h", - "src/include/openssl/base.h", - "src/include/openssl/base64.h", - "src/include/openssl/bio.h", - "src/include/openssl/blowfish.h", - "src/include/openssl/bn.h", - "src/include/openssl/buf.h", - "src/include/openssl/buffer.h", - "src/include/openssl/bytestring.h", - "src/include/openssl/cast.h", - "src/include/openssl/chacha.h", - "src/include/openssl/cipher.h", - "src/include/openssl/cmac.h", - "src/include/openssl/conf.h", - "src/include/openssl/cpu.h", - "src/include/openssl/crypto.h", - "src/include/openssl/curve25519.h", - "src/include/openssl/des.h", - "src/include/openssl/dh.h", - "src/include/openssl/digest.h", - "src/include/openssl/dsa.h", - "src/include/openssl/e_os2.h", - "src/include/openssl/ec.h", - "src/include/openssl/ec_key.h", - "src/include/openssl/ecdh.h", - "src/include/openssl/ecdsa.h", - "src/include/openssl/engine.h", - "src/include/openssl/err.h", - "src/include/openssl/evp.h", - "src/include/openssl/ex_data.h", - "src/include/openssl/hkdf.h", - "src/include/openssl/hmac.h", - "src/include/openssl/hrss.h", - "src/include/openssl/is_boringssl.h", - "src/include/openssl/lhash.h", - "src/include/openssl/md4.h", - "src/include/openssl/md5.h", - "src/include/openssl/mem.h", - "src/include/openssl/nid.h", - "src/include/openssl/obj.h", - "src/include/openssl/obj_mac.h", - "src/include/openssl/objects.h", - "src/include/openssl/opensslconf.h", - "src/include/openssl/opensslv.h", - "src/include/openssl/ossl_typ.h", - "src/include/openssl/pem.h", - "src/include/openssl/pkcs12.h", - "src/include/openssl/pkcs7.h", - "src/include/openssl/pkcs8.h", - "src/include/openssl/poly1305.h", - "src/include/openssl/pool.h", - "src/include/openssl/rand.h", - "src/include/openssl/rc4.h", - "src/include/openssl/ripemd.h", - "src/include/openssl/rsa.h", - "src/include/openssl/safestack.h", - "src/include/openssl/sha.h", - "src/include/openssl/siphash.h", - "src/include/openssl/span.h", - "src/include/openssl/stack.h", - "src/include/openssl/thread.h", - "src/include/openssl/type_check.h", - "src/include/openssl/x509.h", - "src/include/openssl/x509_vfy.h", - "src/include/openssl/x509v3.h", -] - -ssl_sources = [ - "src/ssl/bio_ssl.cc", - "src/ssl/d1_both.cc", - "src/ssl/d1_lib.cc", - "src/ssl/d1_pkt.cc", - "src/ssl/d1_srtp.cc", - "src/ssl/dtls_method.cc", - "src/ssl/dtls_record.cc", - "src/ssl/handoff.cc", - "src/ssl/handshake.cc", - "src/ssl/handshake_client.cc", - "src/ssl/handshake_server.cc", - "src/ssl/internal.h", - "src/ssl/s3_both.cc", - "src/ssl/s3_lib.cc", - "src/ssl/s3_pkt.cc", - "src/ssl/ssl_aead_ctx.cc", - "src/ssl/ssl_asn1.cc", - "src/ssl/ssl_buffer.cc", - "src/ssl/ssl_cert.cc", - "src/ssl/ssl_cipher.cc", - "src/ssl/ssl_file.cc", - "src/ssl/ssl_key_share.cc", - "src/ssl/ssl_lib.cc", - "src/ssl/ssl_privkey.cc", - "src/ssl/ssl_session.cc", - "src/ssl/ssl_stat.cc", - "src/ssl/ssl_transcript.cc", - "src/ssl/ssl_versions.cc", - "src/ssl/ssl_x509.cc", - "src/ssl/t1_enc.cc", - "src/ssl/t1_lib.cc", - "src/ssl/tls13_both.cc", - "src/ssl/tls13_client.cc", - "src/ssl/tls13_enc.cc", - "src/ssl/tls13_server.cc", - "src/ssl/tls_method.cc", - "src/ssl/tls_record.cc", -] - -ssl_headers = [ - "src/include/openssl/dtls1.h", - "src/include/openssl/srtp.h", - "src/include/openssl/ssl.h", - "src/include/openssl/ssl3.h", - "src/include/openssl/tls1.h", -] - -crypto_sources_ios_aarch64 = [ - "ios-aarch64/crypto/chacha/chacha-armv8.S", - "ios-aarch64/crypto/fipsmodule/aesv8-armx64.S", - "ios-aarch64/crypto/fipsmodule/armv8-mont.S", - "ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S", - "ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S", - "ios-aarch64/crypto/fipsmodule/sha1-armv8.S", - "ios-aarch64/crypto/fipsmodule/sha256-armv8.S", - "ios-aarch64/crypto/fipsmodule/sha512-armv8.S", - "ios-aarch64/crypto/fipsmodule/vpaes-armv8.S", - "ios-aarch64/crypto/test/trampoline-armv8.S", - "ios-aarch64/crypto/third_party/sike/asm/fp-armv8.S", -] - -crypto_sources_ios_arm = [ - "ios-arm/crypto/chacha/chacha-armv4.S", - "ios-arm/crypto/fipsmodule/aes-armv4.S", - "ios-arm/crypto/fipsmodule/aesv8-armx32.S", - "ios-arm/crypto/fipsmodule/armv4-mont.S", - "ios-arm/crypto/fipsmodule/bsaes-armv7.S", - "ios-arm/crypto/fipsmodule/ghash-armv4.S", - "ios-arm/crypto/fipsmodule/ghashv8-armx32.S", - "ios-arm/crypto/fipsmodule/sha1-armv4-large.S", - "ios-arm/crypto/fipsmodule/sha256-armv4.S", - "ios-arm/crypto/fipsmodule/sha512-armv4.S", - "ios-arm/crypto/fipsmodule/vpaes-armv7.S", - "ios-arm/crypto/test/trampoline-armv4.S", -] - -crypto_sources_linux_aarch64 = [ - "linux-aarch64/crypto/chacha/chacha-armv8.S", - "linux-aarch64/crypto/fipsmodule/aesv8-armx64.S", - "linux-aarch64/crypto/fipsmodule/armv8-mont.S", - "linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S", - "linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S", - "linux-aarch64/crypto/fipsmodule/sha1-armv8.S", - "linux-aarch64/crypto/fipsmodule/sha256-armv8.S", - "linux-aarch64/crypto/fipsmodule/sha512-armv8.S", - "linux-aarch64/crypto/fipsmodule/vpaes-armv8.S", - "linux-aarch64/crypto/test/trampoline-armv8.S", - "linux-aarch64/crypto/third_party/sike/asm/fp-armv8.S", -] - -crypto_sources_linux_arm = [ - "linux-arm/crypto/chacha/chacha-armv4.S", - "linux-arm/crypto/fipsmodule/aes-armv4.S", - "linux-arm/crypto/fipsmodule/aesv8-armx32.S", - "linux-arm/crypto/fipsmodule/armv4-mont.S", - "linux-arm/crypto/fipsmodule/bsaes-armv7.S", - "linux-arm/crypto/fipsmodule/ghash-armv4.S", - "linux-arm/crypto/fipsmodule/ghashv8-armx32.S", - "linux-arm/crypto/fipsmodule/sha1-armv4-large.S", - "linux-arm/crypto/fipsmodule/sha256-armv4.S", - "linux-arm/crypto/fipsmodule/sha512-armv4.S", - "linux-arm/crypto/fipsmodule/vpaes-armv7.S", - "linux-arm/crypto/test/trampoline-armv4.S", - "src/crypto/curve25519/asm/x25519-asm-arm.S", - "src/crypto/poly1305/poly1305_arm_asm.S", -] - -crypto_sources_linux_ppc64le = [ - "linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S", - "linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S", -] - -crypto_sources_linux_x86 = [ - "linux-x86/crypto/chacha/chacha-x86.S", - "linux-x86/crypto/fipsmodule/aes-586.S", - "linux-x86/crypto/fipsmodule/aesni-x86.S", - "linux-x86/crypto/fipsmodule/bn-586.S", - "linux-x86/crypto/fipsmodule/co-586.S", - "linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S", - "linux-x86/crypto/fipsmodule/ghash-x86.S", - "linux-x86/crypto/fipsmodule/md5-586.S", - "linux-x86/crypto/fipsmodule/sha1-586.S", - "linux-x86/crypto/fipsmodule/sha256-586.S", - "linux-x86/crypto/fipsmodule/sha512-586.S", - "linux-x86/crypto/fipsmodule/vpaes-x86.S", - "linux-x86/crypto/fipsmodule/x86-mont.S", - "linux-x86/crypto/test/trampoline-x86.S", -] - -crypto_sources_linux_x86_64 = [ - "linux-x86_64/crypto/chacha/chacha-x86_64.S", - "linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S", - "linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S", - "linux-x86_64/crypto/fipsmodule/aes-x86_64.S", - "linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S", - "linux-x86_64/crypto/fipsmodule/aesni-x86_64.S", - "linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S", - "linux-x86_64/crypto/fipsmodule/ghash-x86_64.S", - "linux-x86_64/crypto/fipsmodule/md5-x86_64.S", - "linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S", - "linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S", - "linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S", - "linux-x86_64/crypto/fipsmodule/rsaz-avx2.S", - "linux-x86_64/crypto/fipsmodule/sha1-x86_64.S", - "linux-x86_64/crypto/fipsmodule/sha256-x86_64.S", - "linux-x86_64/crypto/fipsmodule/sha512-x86_64.S", - "linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S", - "linux-x86_64/crypto/fipsmodule/x86_64-mont.S", - "linux-x86_64/crypto/fipsmodule/x86_64-mont5.S", - "linux-x86_64/crypto/test/trampoline-x86_64.S", - "linux-x86_64/crypto/third_party/sike/asm/fp-x86_64.S", - "src/crypto/hrss/asm/poly_rq_mul.S", -] - -crypto_sources_mac_x86 = [ - "mac-x86/crypto/chacha/chacha-x86.S", - "mac-x86/crypto/fipsmodule/aes-586.S", - "mac-x86/crypto/fipsmodule/aesni-x86.S", - "mac-x86/crypto/fipsmodule/bn-586.S", - "mac-x86/crypto/fipsmodule/co-586.S", - "mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S", - "mac-x86/crypto/fipsmodule/ghash-x86.S", - "mac-x86/crypto/fipsmodule/md5-586.S", - "mac-x86/crypto/fipsmodule/sha1-586.S", - "mac-x86/crypto/fipsmodule/sha256-586.S", - "mac-x86/crypto/fipsmodule/sha512-586.S", - "mac-x86/crypto/fipsmodule/vpaes-x86.S", - "mac-x86/crypto/fipsmodule/x86-mont.S", - "mac-x86/crypto/test/trampoline-x86.S", -] - -crypto_sources_mac_x86_64 = [ - "mac-x86_64/crypto/chacha/chacha-x86_64.S", - "mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S", - "mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S", - "mac-x86_64/crypto/fipsmodule/aes-x86_64.S", - "mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S", - "mac-x86_64/crypto/fipsmodule/aesni-x86_64.S", - "mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S", - "mac-x86_64/crypto/fipsmodule/ghash-x86_64.S", - "mac-x86_64/crypto/fipsmodule/md5-x86_64.S", - "mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S", - "mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S", - "mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S", - "mac-x86_64/crypto/fipsmodule/rsaz-avx2.S", - "mac-x86_64/crypto/fipsmodule/sha1-x86_64.S", - "mac-x86_64/crypto/fipsmodule/sha256-x86_64.S", - "mac-x86_64/crypto/fipsmodule/sha512-x86_64.S", - "mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S", - "mac-x86_64/crypto/fipsmodule/x86_64-mont.S", - "mac-x86_64/crypto/fipsmodule/x86_64-mont5.S", - "mac-x86_64/crypto/test/trampoline-x86_64.S", - "mac-x86_64/crypto/third_party/sike/asm/fp-x86_64.S", -] - -crypto_sources_win_x86 = [ - "win-x86/crypto/chacha/chacha-x86.asm", - "win-x86/crypto/fipsmodule/aes-586.asm", - "win-x86/crypto/fipsmodule/aesni-x86.asm", - "win-x86/crypto/fipsmodule/bn-586.asm", - "win-x86/crypto/fipsmodule/co-586.asm", - "win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm", - "win-x86/crypto/fipsmodule/ghash-x86.asm", - "win-x86/crypto/fipsmodule/md5-586.asm", - "win-x86/crypto/fipsmodule/sha1-586.asm", - "win-x86/crypto/fipsmodule/sha256-586.asm", - "win-x86/crypto/fipsmodule/sha512-586.asm", - "win-x86/crypto/fipsmodule/vpaes-x86.asm", - "win-x86/crypto/fipsmodule/x86-mont.asm", - "win-x86/crypto/test/trampoline-x86.asm", -] - -crypto_sources_win_x86_64 = [ - "win-x86_64/crypto/chacha/chacha-x86_64.asm", - "win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm", - "win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm", - "win-x86_64/crypto/fipsmodule/aes-x86_64.asm", - "win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm", - "win-x86_64/crypto/fipsmodule/aesni-x86_64.asm", - "win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm", - "win-x86_64/crypto/fipsmodule/ghash-x86_64.asm", - "win-x86_64/crypto/fipsmodule/md5-x86_64.asm", - "win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm", - "win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm", - "win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm", - "win-x86_64/crypto/fipsmodule/rsaz-avx2.asm", - "win-x86_64/crypto/fipsmodule/sha1-x86_64.asm", - "win-x86_64/crypto/fipsmodule/sha256-x86_64.asm", - "win-x86_64/crypto/fipsmodule/sha512-x86_64.asm", - "win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm", - "win-x86_64/crypto/fipsmodule/x86_64-mont.asm", - "win-x86_64/crypto/fipsmodule/x86_64-mont5.asm", - "win-x86_64/crypto/test/trampoline-x86_64.asm", - "win-x86_64/crypto/third_party/sike/asm/fp-x86_64.asm", -] - -fuzzers = [ - "arm_cpuinfo", - "bn_div", - "bn_mod_exp", - "cert", - "client", - "dtls_client", - "dtls_server", - "pkcs12", - "pkcs8", - "privkey", - "read_pem", - "server", - "session", - "spki", - "ssl_ctx_api", -] diff --git a/packager/third_party/boringssl/BUILD.generated_tests.gni b/packager/third_party/boringssl/BUILD.generated_tests.gni deleted file mode 100644 index c6d2db8a6b..0000000000 --- a/packager/third_party/boringssl/BUILD.generated_tests.gni +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# This file is created by generate_build_files.py. Do not edit manually. - -test_support_sources = [ - "src/crypto/test/abi_test.h", - "src/crypto/test/file_test.cc", - "src/crypto/test/file_test.h", - "src/crypto/test/gtest_main.h", - "src/crypto/test/malloc.cc", - "src/crypto/test/test_util.cc", - "src/crypto/test/test_util.h", - "src/crypto/test/wycheproof_util.cc", - "src/crypto/test/wycheproof_util.h", - "src/ssl/test/async_bio.h", - "src/ssl/test/fuzzer.h", - "src/ssl/test/fuzzer_tags.h", - "src/ssl/test/handshake_util.h", - "src/ssl/test/packeted_bio.h", - "src/ssl/test/settings_writer.h", - "src/ssl/test/test_config.h", - "src/ssl/test/test_state.h", -] - -crypto_test_sources = [ - "crypto_test_data.cc", - "src/crypto/abi_self_test.cc", - "src/crypto/asn1/asn1_test.cc", - "src/crypto/base64/base64_test.cc", - "src/crypto/bio/bio_test.cc", - "src/crypto/buf/buf_test.cc", - "src/crypto/bytestring/bytestring_test.cc", - "src/crypto/chacha/chacha_test.cc", - "src/crypto/cipher_extra/aead_test.cc", - "src/crypto/cipher_extra/cipher_test.cc", - "src/crypto/cmac/cmac_test.cc", - "src/crypto/compiler_test.cc", - "src/crypto/constant_time_test.cc", - "src/crypto/cpu-arm-linux_test.cc", - "src/crypto/curve25519/ed25519_test.cc", - "src/crypto/curve25519/spake25519_test.cc", - "src/crypto/curve25519/x25519_test.cc", - "src/crypto/dh/dh_test.cc", - "src/crypto/digest_extra/digest_test.cc", - "src/crypto/dsa/dsa_test.cc", - "src/crypto/ecdh_extra/ecdh_test.cc", - "src/crypto/err/err_test.cc", - "src/crypto/evp/evp_extra_test.cc", - "src/crypto/evp/evp_test.cc", - "src/crypto/evp/pbkdf_test.cc", - "src/crypto/evp/scrypt_test.cc", - "src/crypto/fipsmodule/aes/aes_test.cc", - "src/crypto/fipsmodule/bn/bn_test.cc", - "src/crypto/fipsmodule/ec/ec_test.cc", - "src/crypto/fipsmodule/ec/p256-x86_64_test.cc", - "src/crypto/fipsmodule/ecdsa/ecdsa_test.cc", - "src/crypto/fipsmodule/md5/md5_test.cc", - "src/crypto/fipsmodule/modes/gcm_test.cc", - "src/crypto/fipsmodule/rand/ctrdrbg_test.cc", - "src/crypto/fipsmodule/sha/sha_test.cc", - "src/crypto/hkdf/hkdf_test.cc", - "src/crypto/hmac_extra/hmac_test.cc", - "src/crypto/hrss/hrss_test.cc", - "src/crypto/impl_dispatch_test.cc", - "src/crypto/lhash/lhash_test.cc", - "src/crypto/obj/obj_test.cc", - "src/crypto/pem/pem_test.cc", - "src/crypto/pkcs7/pkcs7_test.cc", - "src/crypto/pkcs8/pkcs12_test.cc", - "src/crypto/pkcs8/pkcs8_test.cc", - "src/crypto/poly1305/poly1305_test.cc", - "src/crypto/pool/pool_test.cc", - "src/crypto/rand_extra/rand_test.cc", - "src/crypto/refcount_test.cc", - "src/crypto/rsa_extra/rsa_test.cc", - "src/crypto/self_test.cc", - "src/crypto/siphash/siphash_test.cc", - "src/crypto/stack/stack_test.cc", - "src/crypto/test/abi_test.cc", - "src/crypto/test/file_test_gtest.cc", - "src/crypto/test/gtest_main.cc", - "src/crypto/thread_test.cc", - "src/crypto/x509/x509_test.cc", - "src/crypto/x509/x509_time_test.cc", - "src/crypto/x509v3/tab_test.cc", - "src/crypto/x509v3/v3name_test.cc", -] - -crypto_test_data = [ - "src/crypto/cipher_extra/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt", - "src/crypto/cipher_extra/test/aes_128_cbc_sha1_tls_tests.txt", - "src/crypto/cipher_extra/test/aes_128_cbc_sha256_tls_tests.txt", - "src/crypto/cipher_extra/test/aes_128_ccm_bluetooth_8_tests.txt", - "src/crypto/cipher_extra/test/aes_128_ccm_bluetooth_tests.txt", - "src/crypto/cipher_extra/test/aes_128_ctr_hmac_sha256.txt", - "src/crypto/cipher_extra/test/aes_128_gcm_siv_tests.txt", - "src/crypto/cipher_extra/test/aes_128_gcm_tests.txt", - "src/crypto/cipher_extra/test/aes_192_gcm_tests.txt", - "src/crypto/cipher_extra/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt", - "src/crypto/cipher_extra/test/aes_256_cbc_sha1_tls_tests.txt", - "src/crypto/cipher_extra/test/aes_256_cbc_sha256_tls_tests.txt", - "src/crypto/cipher_extra/test/aes_256_cbc_sha384_tls_tests.txt", - "src/crypto/cipher_extra/test/aes_256_ctr_hmac_sha256.txt", - "src/crypto/cipher_extra/test/aes_256_gcm_siv_tests.txt", - "src/crypto/cipher_extra/test/aes_256_gcm_tests.txt", - "src/crypto/cipher_extra/test/chacha20_poly1305_tests.txt", - "src/crypto/cipher_extra/test/cipher_tests.txt", - "src/crypto/cipher_extra/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt", - "src/crypto/cipher_extra/test/des_ede3_cbc_sha1_tls_tests.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_128_cbc.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_128_ctr.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_128_gcm.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_192_cbc.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_192_ctr.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_256_cbc.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_256_ctr.txt", - "src/crypto/cipher_extra/test/nist_cavp/aes_256_gcm.txt", - "src/crypto/cipher_extra/test/nist_cavp/tdes_cbc.txt", - "src/crypto/cipher_extra/test/nist_cavp/tdes_ecb.txt", - "src/crypto/cipher_extra/test/xchacha20_poly1305_tests.txt", - "src/crypto/cmac/cavp_3des_cmac_tests.txt", - "src/crypto/cmac/cavp_aes128_cmac_tests.txt", - "src/crypto/cmac/cavp_aes192_cmac_tests.txt", - "src/crypto/cmac/cavp_aes256_cmac_tests.txt", - "src/crypto/curve25519/ed25519_tests.txt", - "src/crypto/ecdh_extra/ecdh_tests.txt", - "src/crypto/evp/evp_tests.txt", - "src/crypto/evp/scrypt_tests.txt", - "src/crypto/fipsmodule/aes/aes_tests.txt", - "src/crypto/fipsmodule/bn/bn_tests.txt", - "src/crypto/fipsmodule/bn/miller_rabin_tests.txt", - "src/crypto/fipsmodule/ec/ec_scalar_base_mult_tests.txt", - "src/crypto/fipsmodule/ec/p256-x86_64_tests.txt", - "src/crypto/fipsmodule/ecdsa/ecdsa_sign_tests.txt", - "src/crypto/fipsmodule/ecdsa/ecdsa_verify_tests.txt", - "src/crypto/fipsmodule/modes/gcm_tests.txt", - "src/crypto/fipsmodule/rand/ctrdrbg_vectors.txt", - "src/crypto/hmac_extra/hmac_tests.txt", - "src/crypto/poly1305/poly1305_tests.txt", - "src/crypto/siphash/siphash_tests.txt", - "src/crypto/x509/many_constraints.pem", - "src/crypto/x509/many_names1.pem", - "src/crypto/x509/many_names2.pem", - "src/crypto/x509/many_names3.pem", - "src/crypto/x509/some_names1.pem", - "src/crypto/x509/some_names2.pem", - "src/crypto/x509/some_names3.pem", - "src/third_party/wycheproof_testvectors/aes_cbc_pkcs5_test.txt", - "src/third_party/wycheproof_testvectors/aes_cmac_test.txt", - "src/third_party/wycheproof_testvectors/aes_gcm_siv_test.txt", - "src/third_party/wycheproof_testvectors/aes_gcm_test.txt", - "src/third_party/wycheproof_testvectors/chacha20_poly1305_test.txt", - "src/third_party/wycheproof_testvectors/dsa_test.txt", - "src/third_party/wycheproof_testvectors/ecdh_secp224r1_test.txt", - "src/third_party/wycheproof_testvectors/ecdh_secp256r1_test.txt", - "src/third_party/wycheproof_testvectors/ecdh_secp384r1_test.txt", - "src/third_party/wycheproof_testvectors/ecdh_secp521r1_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp224r1_sha224_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp224r1_sha256_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp224r1_sha512_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp256r1_sha256_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp256r1_sha512_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp384r1_sha384_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp384r1_sha512_test.txt", - "src/third_party/wycheproof_testvectors/ecdsa_secp521r1_sha512_test.txt", - "src/third_party/wycheproof_testvectors/eddsa_test.txt", - "src/third_party/wycheproof_testvectors/kw_test.txt", - "src/third_party/wycheproof_testvectors/kwp_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_2048_sha1_mgf1_20_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_2048_sha256_mgf1_0_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_2048_sha256_mgf1_32_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_3072_sha256_mgf1_32_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_4096_sha256_mgf1_32_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_4096_sha512_mgf1_32_test.txt", - "src/third_party/wycheproof_testvectors/rsa_pss_misc_test.txt", - "src/third_party/wycheproof_testvectors/rsa_signature_test.txt", - "src/third_party/wycheproof_testvectors/x25519_test.txt", -] - -ssl_test_sources = [ - "src/crypto/test/abi_test.cc", - "src/crypto/test/gtest_main.cc", - "src/ssl/span_test.cc", - "src/ssl/ssl_c_test.c", - "src/ssl/ssl_test.cc", -] diff --git a/packager/third_party/boringssl/BUILD.gn b/packager/third_party/boringssl/BUILD.gn deleted file mode 100644 index fd7c960989..0000000000 --- a/packager/third_party/boringssl/BUILD.gn +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import("//build/config/android/config.gni") -import("//build/config/sanitizers/sanitizers.gni") -import("//build_overrides/build.gni") -import("//testing/libfuzzer/fuzzer_test.gni") -import("BUILD.generated.gni") -import("BUILD.generated_tests.gni") - -# Config for us and everybody else depending on BoringSSL. -config("external_config") { - include_dirs = [ "src/include" ] - if (is_component_build) { - defines = [ "BORINGSSL_SHARED_LIBRARY" ] - } -} - -# Config internal to this build file, shared by boringssl and boringssl_fuzzer. -config("internal_config") { - visibility = [ ":*" ] # Only targets in this file can depend on this. - defines = [ - "BORINGSSL_IMPLEMENTATION", - "BORINGSSL_NO_STATIC_INITIALIZER", - "OPENSSL_SMALL", - ] - configs = [ - # TODO(davidben): Fix size_t truncations in BoringSSL. - # https://crbug.com/429039 - "//build/config/compiler:no_size_t_to_int_warning", - ] - if (is_posix) { - cflags_c = [ "-std=c99" ] - defines += [ "_XOPEN_SOURCE=700" ] - } -} - -config("no_asm_config") { - visibility = [ ":*" ] # Only targets in this file can depend on this. - defines = [ "OPENSSL_NO_ASM" ] -} - -config("fuzzer_config") { - visibility = [ ":*" ] # Only targets in this file can depend on this. - defines = [ "BORINGSSL_UNSAFE_FUZZER_MODE" ] -} - -all_sources = crypto_sources + ssl_sources - -# Windows' assembly is built with Yasm. The other platforms use the platform -# assembler. -if (is_win && !is_msan) { - import("//third_party/yasm/yasm_assemble.gni") - yasm_assemble("boringssl_asm") { - if (current_cpu == "x64") { - sources = crypto_sources_win_x86_64 - } else if (current_cpu == "x86") { - sources = crypto_sources_win_x86 - } - } -} else { - source_set("boringssl_asm") { - visibility = [ ":*" ] # Only targets in this file can depend on this. - - defines = [] - sources = [] - asmflags = [] - include_dirs = [ "src/include" ] - - if ((current_cpu == "arm" || current_cpu == "arm64") && is_clang) { - if (current_cpu == "arm") { - # TODO(hans) Enable integrated-as (crbug.com/124610). - asmflags += [ "-fno-integrated-as" ] - } - if (is_android) { - rebased_android_toolchain_root = - rebase_path(android_toolchain_root, root_build_dir) - - # Else /usr/bin/as gets picked up. - asmflags += [ "-B${rebased_android_toolchain_root}/bin" ] - } - } - - if (is_msan) { - public_configs = [ ":no_asm_config" ] - } else if (current_cpu == "x64") { - if (is_mac) { - sources += crypto_sources_mac_x86_64 - } else if (is_linux || is_android) { - sources += crypto_sources_linux_x86_64 - } else { - public_configs = [ ":no_asm_config" ] - } - } else if (current_cpu == "x86") { - if (is_mac) { - sources += crypto_sources_mac_x86 - } else if (is_linux || is_android) { - sources += crypto_sources_linux_x86 - } else { - public_configs = [ ":no_asm_config" ] - } - } else if (current_cpu == "arm" && (is_linux || is_android)) { - sources += crypto_sources_linux_arm - } else if (current_cpu == "arm64" && (is_linux || is_android)) { - sources += crypto_sources_linux_aarch64 - - # TODO(davidben): Remove explicit arch flag once https://crbug.com/576858 - # is fixed. - asmflags += [ "-march=armv8-a+crypto" ] - } else { - public_configs = [ ":no_asm_config" ] - } - } -} - -component("boringssl") { - sources = all_sources - deps = [ - ":boringssl_asm", - ] - - public_configs = [ ":external_config" ] - configs += [ ":internal_config" ] - - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - - if (is_nacl) { - deps += [ "//native_client_sdk/src/libraries/nacl_io" ] - } -} - -if (build_with_chromium) { - create_tests("boringssl_tests") { - configs_exclude = [ "//build/config/compiler:chromium_code" ] - configs = [ - ":internal_config", - "//build/config/compiler:no_chromium_code", - ] - deps = [ - ":boringssl", - "//build/win:default_exe_manifest", - ] - } - - if (!is_ios) { - test("boringssl_unittests") { - deps = [ - ":boringssl_tests", - "//base", - "//base/test:run_all_unittests", - "//base/test:test_support", - "//testing/gtest", - ] - sources = [ - "boringssl_unittest.cc", - ] - } - } - - # The same as boringssl, but builds with BORINGSSL_UNSAFE_FUZZER_MODE. - component("boringssl_fuzzer") { - visibility = [ ":*" ] # Only targets in this file can depend on this. - - sources = all_sources - deps = [ - ":boringssl_asm", - ] - - public_configs = [ - ":external_config", - ":fuzzer_config", - ] - configs += [ ":internal_config" ] - - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ "//build/config/compiler:no_chromium_code" ] - - if (is_nacl) { - deps += [ "//native_client_sdk/src/libraries/nacl_io" ] - } - } - - foreach(fuzzer, fuzzers) { - fuzzer_test("boringssl_${fuzzer}_fuzzer") { - sources = [ - "src/fuzz/${fuzzer}.cc", - ] - deps = [ - ":boringssl_fuzzer", - ] - seed_corpus = "src/fuzz/${fuzzer}_corpus" - - if ("cert" == fuzzer) { - libfuzzer_options = [ "max_len=3072" ] - } else if ("client" == fuzzer) { - libfuzzer_options = [ "max_len=20000" ] - } else if ("pkcs8" == fuzzer) { - libfuzzer_options = [ "max_len=2048" ] - } else if ("privkey" == fuzzer) { - libfuzzer_options = [ "max_len=2048" ] - } else if ("read_pem" == fuzzer) { - libfuzzer_options = [ "max_len=512" ] - } else if ("server" == fuzzer) { - libfuzzer_options = [ "max_len=4096" ] - } else if ("spki" == fuzzer) { - libfuzzer_options = [ "max_len=1024" ] - } - } - } -} diff --git a/packager/third_party/boringssl/DEPS b/packager/third_party/boringssl/DEPS deleted file mode 100644 index dda2d7c7fc..0000000000 --- a/packager/third_party/boringssl/DEPS +++ /dev/null @@ -1,6 +0,0 @@ -specific_include_rules = { - "boringssl_unittest\.cc": [ - "+base", - "+testing", - ], -} diff --git a/packager/third_party/boringssl/NOTICE b/packager/third_party/boringssl/NOTICE deleted file mode 100644 index e47d101f10..0000000000 --- a/packager/third_party/boringssl/NOTICE +++ /dev/null @@ -1,127 +0,0 @@ - - LICENSE ISSUES - ============== - - The OpenSSL toolkit stays under a dual license, i.e. both the conditions of - the OpenSSL License and the original SSLeay license apply to the toolkit. - See below for the actual license texts. Actually both licenses are BSD-style - Open Source licenses. In case of any license issues related to OpenSSL - please contact openssl-core@openssl.org. - - OpenSSL License - --------------- - -/* ==================================================================== - * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - - Original SSLeay License - ----------------------- - -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ - diff --git a/packager/third_party/boringssl/OWNERS b/packager/third_party/boringssl/OWNERS deleted file mode 100644 index 42d0d3b58b..0000000000 --- a/packager/third_party/boringssl/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -agl@chromium.org -davidben@chromium.org -rsleevi@chromium.org diff --git a/packager/third_party/boringssl/README.chromium b/packager/third_party/boringssl/README.chromium deleted file mode 100644 index 8abcfe695d..0000000000 --- a/packager/third_party/boringssl/README.chromium +++ /dev/null @@ -1,19 +0,0 @@ -Name: boringssl -URL: https://boringssl.googlesource.com/boringssl -Version: git -License: BSDish -License File: src/LICENSE -License Android Compatible: yes -Security Critical: yes - -Description: -This is BoringSSL, a fork of OpenSSL. See -https://www.imperialviolet.org/2014/06/20/boringssl.html - -Note: when rolling DEPS forward, remember to run - - cd third_party/boringssl - python src/util/generate_build_files.py gn gyp - -from a system with both Perl and Go installed. Alternatively, use the -roll_boringssl.py script. diff --git a/packager/third_party/boringssl/boringssl.gyp b/packager/third_party/boringssl/boringssl.gyp deleted file mode 100644 index cd5d88c834..0000000000 --- a/packager/third_party/boringssl/boringssl.gyp +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - 'includes': [ - 'boringssl.gypi', - ], - 'target_defaults': { - 'conditions': [ - ['os_posix == 1', { - 'cflags_c': [ '-std=c99' ], - 'defines': [ '_XOPEN_SOURCE=700' ], - }], - ], - }, - 'targets': [ - { - 'target_name': 'boringssl_nacl_win64', - 'type': '<(component)', - 'sources': [ - '<@(boringssl_crypto_sources)', - ], - 'defines': [ - 'BORINGSSL_IMPLEMENTATION', - 'BORINGSSL_NO_STATIC_INITIALIZER', - 'OPENSSL_NO_ASM', - 'OPENSSL_SMALL', - ], - 'configurations': { - 'Common_Base': { - 'msvs_target_platform': 'x64', - }, - }, - # TODO(davidben): Fix size_t truncations in BoringSSL. - # https://crbug.com/429039 - 'msvs_disabled_warnings': [ 4267, ], - 'conditions': [ - ['component == "shared_library"', { - 'defines': [ - 'BORINGSSL_SHARED_LIBRARY', - ], - }], - ], - 'include_dirs': [ - 'src/include', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - 'src/include', - ], - 'conditions': [ - ['component == "shared_library"', { - 'defines': [ - 'BORINGSSL_SHARED_LIBRARY', - ], - }], - ], - }, - }, - { - 'target_name': 'boringssl', - 'type': '<(component)', - 'sources': [ - '<@(boringssl_crypto_sources)', - '<@(boringssl_ssl_sources)', - ], - 'defines': [ - 'BORINGSSL_IMPLEMENTATION', - 'BORINGSSL_NO_STATIC_INITIALIZER', - 'OPENSSL_SMALL', - ], - 'dependencies': [ 'boringssl_asm' ], - # TODO(davidben): Fix size_t truncations in BoringSSL. - # https://crbug.com/429039 - 'msvs_disabled_warnings': [ 4267, ], - 'conditions': [ - ['component == "shared_library"', { - 'defines': [ - 'BORINGSSL_SHARED_LIBRARY', - ], - }], - ], - 'include_dirs': [ - 'src/include', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - 'src/include', - ], - 'conditions': [ - ['component == "shared_library"', { - 'defines': [ - 'BORINGSSL_SHARED_LIBRARY', - ], - }], - ], - }, - }, - { - # boringssl_asm is a separate target to allow for ASM-specific cflags. - 'target_name': 'boringssl_asm', - 'type': 'static_library', - 'include_dirs': [ - 'src/include', - ], - 'conditions': [ - ['target_arch == "arm" and msan == 0', { - 'conditions': [ - ['OS == "linux" or OS == "android"', { - 'sources': [ '<@(boringssl_linux_arm_sources)' ], - }, { - 'direct_dependent_settings': { - 'defines': [ 'OPENSSL_NO_ASM' ], - }, - }], - ], - }], - ['target_arch == "arm" and clang == 1', { - # TODO(hans) Enable integrated-as (crbug.com/124610). - 'cflags': [ '-fno-integrated-as' ], - 'conditions': [ - ['OS == "android"', { - # Else /usr/bin/as gets picked up. - 'cflags': [ '-B<(android_toolchain)' ], - }], - ], - }], - ['target_arch == "arm64" and msan == 0', { - 'conditions': [ - ['OS == "linux" or OS == "android"', { - 'sources': [ '<@(boringssl_linux_aarch64_sources)' ], - # TODO(davidben): Remove explicit arch flag once - # https://crbug.com/576858 is fixed. - 'cflags': [ '-march=armv8-a+crypto' ], - }, { - 'direct_dependent_settings': { - 'defines': [ 'OPENSSL_NO_ASM' ], - }, - }], - ], - }], - ['target_arch == "ia32" and msan == 0', { - 'conditions': [ - ['OS == "mac"', { - 'sources': [ '<@(boringssl_mac_x86_sources)' ], - }], - ['OS == "linux" or OS == "android"', { - 'sources': [ '<@(boringssl_linux_x86_sources)' ], - }], - ['OS == "win"', { - 'sources': [ '<@(boringssl_win_x86_sources)' ], - # Windows' assembly is built with Yasm. The other platforms use - # the platform assembler. - 'variables': { - 'yasm_output_path': '<(SHARED_INTERMEDIATE_DIR)/third_party/boringssl', - }, - 'includes': [ - '../yasm/yasm_compile.gypi', - ], - }], - ['OS != "mac" and OS != "linux" and OS != "win" and OS != "android"', { - 'direct_dependent_settings': { - 'defines': [ 'OPENSSL_NO_ASM' ], - }, - }], - ] - }], - ['target_arch == "x64" and msan == 0', { - 'conditions': [ - ['OS == "mac"', { - 'sources': [ '<@(boringssl_mac_x86_64_sources)' ], - }], - ['OS == "linux" or OS == "android"', { - 'sources': [ '<@(boringssl_linux_x86_64_sources)' ], - }], - ['OS == "win"', { - # NOTES(kqyang): Somehow ASM fails to compile. Disable ASM. - 'direct_dependent_settings': { - 'defines': [ 'OPENSSL_NO_ASM' ], - }, - }], - ['OS != "mac" and OS != "linux" and OS != "win" and OS != "android"', { - 'direct_dependent_settings': { - 'defines': [ 'OPENSSL_NO_ASM' ], - }, - }], - ] - }], - ['msan == 1 or (target_arch != "arm" and target_arch != "ia32" and target_arch != "x64" and target_arch != "arm64")', { - 'direct_dependent_settings': { - 'defines': [ 'OPENSSL_NO_ASM' ], - }, - }], - ], - }, - ], -} diff --git a/packager/third_party/boringssl/boringssl.gypi b/packager/third_party/boringssl/boringssl.gypi deleted file mode 100644 index e7cb8f0ba2..0000000000 --- a/packager/third_party/boringssl/boringssl.gypi +++ /dev/null @@ -1,587 +0,0 @@ -# Copyright (c) 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# This file is created by generate_build_files.py. Do not edit manually. - -{ - 'variables': { - 'boringssl_ssl_sources': [ - 'src/include/openssl/dtls1.h', - 'src/include/openssl/srtp.h', - 'src/include/openssl/ssl.h', - 'src/include/openssl/ssl3.h', - 'src/include/openssl/tls1.h', - 'src/ssl/bio_ssl.cc', - 'src/ssl/d1_both.cc', - 'src/ssl/d1_lib.cc', - 'src/ssl/d1_pkt.cc', - 'src/ssl/d1_srtp.cc', - 'src/ssl/dtls_method.cc', - 'src/ssl/dtls_record.cc', - 'src/ssl/handoff.cc', - 'src/ssl/handshake.cc', - 'src/ssl/handshake_client.cc', - 'src/ssl/handshake_server.cc', - 'src/ssl/internal.h', - 'src/ssl/s3_both.cc', - 'src/ssl/s3_lib.cc', - 'src/ssl/s3_pkt.cc', - 'src/ssl/ssl_aead_ctx.cc', - 'src/ssl/ssl_asn1.cc', - 'src/ssl/ssl_buffer.cc', - 'src/ssl/ssl_cert.cc', - 'src/ssl/ssl_cipher.cc', - 'src/ssl/ssl_file.cc', - 'src/ssl/ssl_key_share.cc', - 'src/ssl/ssl_lib.cc', - 'src/ssl/ssl_privkey.cc', - 'src/ssl/ssl_session.cc', - 'src/ssl/ssl_stat.cc', - 'src/ssl/ssl_transcript.cc', - 'src/ssl/ssl_versions.cc', - 'src/ssl/ssl_x509.cc', - 'src/ssl/t1_enc.cc', - 'src/ssl/t1_lib.cc', - 'src/ssl/tls13_both.cc', - 'src/ssl/tls13_client.cc', - 'src/ssl/tls13_enc.cc', - 'src/ssl/tls13_server.cc', - 'src/ssl/tls_method.cc', - 'src/ssl/tls_record.cc', - ], - 'boringssl_crypto_sources': [ - 'err_data.c', - 'src/crypto/asn1/a_bitstr.c', - 'src/crypto/asn1/a_bool.c', - 'src/crypto/asn1/a_d2i_fp.c', - 'src/crypto/asn1/a_dup.c', - 'src/crypto/asn1/a_enum.c', - 'src/crypto/asn1/a_gentm.c', - 'src/crypto/asn1/a_i2d_fp.c', - 'src/crypto/asn1/a_int.c', - 'src/crypto/asn1/a_mbstr.c', - 'src/crypto/asn1/a_object.c', - 'src/crypto/asn1/a_octet.c', - 'src/crypto/asn1/a_print.c', - 'src/crypto/asn1/a_strnid.c', - 'src/crypto/asn1/a_time.c', - 'src/crypto/asn1/a_type.c', - 'src/crypto/asn1/a_utctm.c', - 'src/crypto/asn1/a_utf8.c', - 'src/crypto/asn1/asn1_lib.c', - 'src/crypto/asn1/asn1_locl.h', - 'src/crypto/asn1/asn1_par.c', - 'src/crypto/asn1/asn_pack.c', - 'src/crypto/asn1/f_enum.c', - 'src/crypto/asn1/f_int.c', - 'src/crypto/asn1/f_string.c', - 'src/crypto/asn1/tasn_dec.c', - 'src/crypto/asn1/tasn_enc.c', - 'src/crypto/asn1/tasn_fre.c', - 'src/crypto/asn1/tasn_new.c', - 'src/crypto/asn1/tasn_typ.c', - 'src/crypto/asn1/tasn_utl.c', - 'src/crypto/asn1/time_support.c', - 'src/crypto/base64/base64.c', - 'src/crypto/bio/bio.c', - 'src/crypto/bio/bio_mem.c', - 'src/crypto/bio/connect.c', - 'src/crypto/bio/fd.c', - 'src/crypto/bio/file.c', - 'src/crypto/bio/hexdump.c', - 'src/crypto/bio/internal.h', - 'src/crypto/bio/pair.c', - 'src/crypto/bio/printf.c', - 'src/crypto/bio/socket.c', - 'src/crypto/bio/socket_helper.c', - 'src/crypto/bn_extra/bn_asn1.c', - 'src/crypto/bn_extra/convert.c', - 'src/crypto/buf/buf.c', - 'src/crypto/bytestring/asn1_compat.c', - 'src/crypto/bytestring/ber.c', - 'src/crypto/bytestring/cbb.c', - 'src/crypto/bytestring/cbs.c', - 'src/crypto/bytestring/internal.h', - 'src/crypto/bytestring/unicode.c', - 'src/crypto/chacha/chacha.c', - 'src/crypto/chacha/internal.h', - 'src/crypto/cipher_extra/cipher_extra.c', - 'src/crypto/cipher_extra/derive_key.c', - 'src/crypto/cipher_extra/e_aesccm.c', - 'src/crypto/cipher_extra/e_aesctrhmac.c', - 'src/crypto/cipher_extra/e_aesgcmsiv.c', - 'src/crypto/cipher_extra/e_chacha20poly1305.c', - 'src/crypto/cipher_extra/e_null.c', - 'src/crypto/cipher_extra/e_rc2.c', - 'src/crypto/cipher_extra/e_rc4.c', - 'src/crypto/cipher_extra/e_tls.c', - 'src/crypto/cipher_extra/internal.h', - 'src/crypto/cipher_extra/tls_cbc.c', - 'src/crypto/cmac/cmac.c', - 'src/crypto/conf/conf.c', - 'src/crypto/conf/conf_def.h', - 'src/crypto/conf/internal.h', - 'src/crypto/cpu-aarch64-fuchsia.c', - 'src/crypto/cpu-aarch64-linux.c', - 'src/crypto/cpu-arm-linux.c', - 'src/crypto/cpu-arm-linux.h', - 'src/crypto/cpu-arm.c', - 'src/crypto/cpu-intel.c', - 'src/crypto/cpu-ppc64le.c', - 'src/crypto/crypto.c', - 'src/crypto/curve25519/spake25519.c', - 'src/crypto/dh/check.c', - 'src/crypto/dh/dh.c', - 'src/crypto/dh/dh_asn1.c', - 'src/crypto/dh/params.c', - 'src/crypto/digest_extra/digest_extra.c', - 'src/crypto/dsa/dsa.c', - 'src/crypto/dsa/dsa_asn1.c', - 'src/crypto/ec_extra/ec_asn1.c', - 'src/crypto/ec_extra/ec_derive.c', - 'src/crypto/ecdh_extra/ecdh_extra.c', - 'src/crypto/ecdsa_extra/ecdsa_asn1.c', - 'src/crypto/engine/engine.c', - 'src/crypto/err/err.c', - 'src/crypto/err/internal.h', - 'src/crypto/evp/digestsign.c', - 'src/crypto/evp/evp.c', - 'src/crypto/evp/evp_asn1.c', - 'src/crypto/evp/evp_ctx.c', - 'src/crypto/evp/internal.h', - 'src/crypto/evp/p_dsa_asn1.c', - 'src/crypto/evp/p_ec.c', - 'src/crypto/evp/p_ec_asn1.c', - 'src/crypto/evp/p_ed25519.c', - 'src/crypto/evp/p_ed25519_asn1.c', - 'src/crypto/evp/p_rsa.c', - 'src/crypto/evp/p_rsa_asn1.c', - 'src/crypto/evp/p_x25519.c', - 'src/crypto/evp/p_x25519_asn1.c', - 'src/crypto/evp/pbkdf.c', - 'src/crypto/evp/print.c', - 'src/crypto/evp/scrypt.c', - 'src/crypto/evp/sign.c', - 'src/crypto/ex_data.c', - 'src/crypto/fipsmodule/aes/internal.h', - 'src/crypto/fipsmodule/bcm.c', - 'src/crypto/fipsmodule/bn/internal.h', - 'src/crypto/fipsmodule/bn/rsaz_exp.h', - 'src/crypto/fipsmodule/cipher/internal.h', - 'src/crypto/fipsmodule/delocate.h', - 'src/crypto/fipsmodule/des/internal.h', - 'src/crypto/fipsmodule/digest/internal.h', - 'src/crypto/fipsmodule/digest/md32_common.h', - 'src/crypto/fipsmodule/ec/internal.h', - 'src/crypto/fipsmodule/ec/p256-x86_64-table.h', - 'src/crypto/fipsmodule/ec/p256-x86_64.h', - 'src/crypto/fipsmodule/fips_shared_support.c', - 'src/crypto/fipsmodule/is_fips.c', - 'src/crypto/fipsmodule/md5/internal.h', - 'src/crypto/fipsmodule/modes/internal.h', - 'src/crypto/fipsmodule/rand/internal.h', - 'src/crypto/fipsmodule/rsa/internal.h', - 'src/crypto/fipsmodule/sha/internal.h', - 'src/crypto/fipsmodule/tls/internal.h', - 'src/crypto/hkdf/hkdf.c', - 'src/crypto/hrss/hrss.c', - 'src/crypto/hrss/internal.h', - 'src/crypto/internal.h', - 'src/crypto/lhash/lhash.c', - 'src/crypto/mem.c', - 'src/crypto/obj/obj.c', - 'src/crypto/obj/obj_dat.h', - 'src/crypto/obj/obj_xref.c', - 'src/crypto/pem/pem_all.c', - 'src/crypto/pem/pem_info.c', - 'src/crypto/pem/pem_lib.c', - 'src/crypto/pem/pem_oth.c', - 'src/crypto/pem/pem_pk8.c', - 'src/crypto/pem/pem_pkey.c', - 'src/crypto/pem/pem_x509.c', - 'src/crypto/pem/pem_xaux.c', - 'src/crypto/pkcs7/internal.h', - 'src/crypto/pkcs7/pkcs7.c', - 'src/crypto/pkcs7/pkcs7_x509.c', - 'src/crypto/pkcs8/internal.h', - 'src/crypto/pkcs8/p5_pbev2.c', - 'src/crypto/pkcs8/pkcs8.c', - 'src/crypto/pkcs8/pkcs8_x509.c', - 'src/crypto/poly1305/internal.h', - 'src/crypto/poly1305/poly1305.c', - 'src/crypto/poly1305/poly1305_arm.c', - 'src/crypto/poly1305/poly1305_vec.c', - 'src/crypto/pool/internal.h', - 'src/crypto/pool/pool.c', - 'src/crypto/rand_extra/deterministic.c', - 'src/crypto/rand_extra/forkunsafe.c', - 'src/crypto/rand_extra/fuchsia.c', - 'src/crypto/rand_extra/rand_extra.c', - 'src/crypto/rand_extra/windows.c', - 'src/crypto/rc4/rc4.c', - 'src/crypto/refcount_c11.c', - 'src/crypto/refcount_lock.c', - 'src/crypto/rsa_extra/rsa_asn1.c', - 'src/crypto/rsa_extra/rsa_print.c', - 'src/crypto/siphash/siphash.c', - 'src/crypto/stack/stack.c', - 'src/crypto/thread.c', - 'src/crypto/thread_none.c', - 'src/crypto/thread_pthread.c', - 'src/crypto/thread_win.c', - 'src/crypto/x509/a_digest.c', - 'src/crypto/x509/a_sign.c', - 'src/crypto/x509/a_strex.c', - 'src/crypto/x509/a_verify.c', - 'src/crypto/x509/algorithm.c', - 'src/crypto/x509/asn1_gen.c', - 'src/crypto/x509/by_dir.c', - 'src/crypto/x509/by_file.c', - 'src/crypto/x509/charmap.h', - 'src/crypto/x509/i2d_pr.c', - 'src/crypto/x509/internal.h', - 'src/crypto/x509/rsa_pss.c', - 'src/crypto/x509/t_crl.c', - 'src/crypto/x509/t_req.c', - 'src/crypto/x509/t_x509.c', - 'src/crypto/x509/t_x509a.c', - 'src/crypto/x509/vpm_int.h', - 'src/crypto/x509/x509.c', - 'src/crypto/x509/x509_att.c', - 'src/crypto/x509/x509_cmp.c', - 'src/crypto/x509/x509_d2.c', - 'src/crypto/x509/x509_def.c', - 'src/crypto/x509/x509_ext.c', - 'src/crypto/x509/x509_lu.c', - 'src/crypto/x509/x509_obj.c', - 'src/crypto/x509/x509_r2x.c', - 'src/crypto/x509/x509_req.c', - 'src/crypto/x509/x509_set.c', - 'src/crypto/x509/x509_trs.c', - 'src/crypto/x509/x509_txt.c', - 'src/crypto/x509/x509_v3.c', - 'src/crypto/x509/x509_vfy.c', - 'src/crypto/x509/x509_vpm.c', - 'src/crypto/x509/x509cset.c', - 'src/crypto/x509/x509name.c', - 'src/crypto/x509/x509rset.c', - 'src/crypto/x509/x509spki.c', - 'src/crypto/x509/x_algor.c', - 'src/crypto/x509/x_all.c', - 'src/crypto/x509/x_attrib.c', - 'src/crypto/x509/x_crl.c', - 'src/crypto/x509/x_exten.c', - 'src/crypto/x509/x_info.c', - 'src/crypto/x509/x_name.c', - 'src/crypto/x509/x_pkey.c', - 'src/crypto/x509/x_pubkey.c', - 'src/crypto/x509/x_req.c', - 'src/crypto/x509/x_sig.c', - 'src/crypto/x509/x_spki.c', - 'src/crypto/x509/x_val.c', - 'src/crypto/x509/x_x509.c', - 'src/crypto/x509/x_x509a.c', - 'src/crypto/x509v3/ext_dat.h', - 'src/crypto/x509v3/internal.h', - 'src/crypto/x509v3/pcy_cache.c', - 'src/crypto/x509v3/pcy_data.c', - 'src/crypto/x509v3/pcy_int.h', - 'src/crypto/x509v3/pcy_lib.c', - 'src/crypto/x509v3/pcy_map.c', - 'src/crypto/x509v3/pcy_node.c', - 'src/crypto/x509v3/pcy_tree.c', - 'src/crypto/x509v3/v3_akey.c', - 'src/crypto/x509v3/v3_akeya.c', - 'src/crypto/x509v3/v3_alt.c', - 'src/crypto/x509v3/v3_bcons.c', - 'src/crypto/x509v3/v3_bitst.c', - 'src/crypto/x509v3/v3_conf.c', - 'src/crypto/x509v3/v3_cpols.c', - 'src/crypto/x509v3/v3_crld.c', - 'src/crypto/x509v3/v3_enum.c', - 'src/crypto/x509v3/v3_extku.c', - 'src/crypto/x509v3/v3_genn.c', - 'src/crypto/x509v3/v3_ia5.c', - 'src/crypto/x509v3/v3_info.c', - 'src/crypto/x509v3/v3_int.c', - 'src/crypto/x509v3/v3_lib.c', - 'src/crypto/x509v3/v3_ncons.c', - 'src/crypto/x509v3/v3_ocsp.c', - 'src/crypto/x509v3/v3_pci.c', - 'src/crypto/x509v3/v3_pcia.c', - 'src/crypto/x509v3/v3_pcons.c', - 'src/crypto/x509v3/v3_pku.c', - 'src/crypto/x509v3/v3_pmaps.c', - 'src/crypto/x509v3/v3_prn.c', - 'src/crypto/x509v3/v3_purp.c', - 'src/crypto/x509v3/v3_skey.c', - 'src/crypto/x509v3/v3_sxnet.c', - 'src/crypto/x509v3/v3_utl.c', - 'src/include/openssl/aead.h', - 'src/include/openssl/aes.h', - 'src/include/openssl/arm_arch.h', - 'src/include/openssl/asn1.h', - 'src/include/openssl/asn1_mac.h', - 'src/include/openssl/asn1t.h', - 'src/include/openssl/base.h', - 'src/include/openssl/base64.h', - 'src/include/openssl/bio.h', - 'src/include/openssl/blowfish.h', - 'src/include/openssl/bn.h', - 'src/include/openssl/buf.h', - 'src/include/openssl/buffer.h', - 'src/include/openssl/bytestring.h', - 'src/include/openssl/cast.h', - 'src/include/openssl/chacha.h', - 'src/include/openssl/cipher.h', - 'src/include/openssl/cmac.h', - 'src/include/openssl/conf.h', - 'src/include/openssl/cpu.h', - 'src/include/openssl/crypto.h', - 'src/include/openssl/curve25519.h', - 'src/include/openssl/des.h', - 'src/include/openssl/dh.h', - 'src/include/openssl/digest.h', - 'src/include/openssl/dsa.h', - 'src/include/openssl/e_os2.h', - 'src/include/openssl/ec.h', - 'src/include/openssl/ec_key.h', - 'src/include/openssl/ecdh.h', - 'src/include/openssl/ecdsa.h', - 'src/include/openssl/engine.h', - 'src/include/openssl/err.h', - 'src/include/openssl/evp.h', - 'src/include/openssl/ex_data.h', - 'src/include/openssl/hkdf.h', - 'src/include/openssl/hmac.h', - 'src/include/openssl/hrss.h', - 'src/include/openssl/is_boringssl.h', - 'src/include/openssl/lhash.h', - 'src/include/openssl/md4.h', - 'src/include/openssl/md5.h', - 'src/include/openssl/mem.h', - 'src/include/openssl/nid.h', - 'src/include/openssl/obj.h', - 'src/include/openssl/obj_mac.h', - 'src/include/openssl/objects.h', - 'src/include/openssl/opensslconf.h', - 'src/include/openssl/opensslv.h', - 'src/include/openssl/ossl_typ.h', - 'src/include/openssl/pem.h', - 'src/include/openssl/pkcs12.h', - 'src/include/openssl/pkcs7.h', - 'src/include/openssl/pkcs8.h', - 'src/include/openssl/poly1305.h', - 'src/include/openssl/pool.h', - 'src/include/openssl/rand.h', - 'src/include/openssl/rc4.h', - 'src/include/openssl/ripemd.h', - 'src/include/openssl/rsa.h', - 'src/include/openssl/safestack.h', - 'src/include/openssl/sha.h', - 'src/include/openssl/siphash.h', - 'src/include/openssl/span.h', - 'src/include/openssl/stack.h', - 'src/include/openssl/thread.h', - 'src/include/openssl/type_check.h', - 'src/include/openssl/x509.h', - 'src/include/openssl/x509_vfy.h', - 'src/include/openssl/x509v3.h', - 'src/third_party/fiat/curve25519.c', - 'src/third_party/fiat/curve25519_32.h', - 'src/third_party/fiat/curve25519_64.h', - 'src/third_party/fiat/curve25519_tables.h', - 'src/third_party/fiat/internal.h', - 'src/third_party/fiat/p256_32.h', - 'src/third_party/fiat/p256_64.h', - 'src/third_party/sike/asm/fp_generic.c', - 'src/third_party/sike/curve_params.c', - 'src/third_party/sike/fpx.c', - 'src/third_party/sike/fpx.h', - 'src/third_party/sike/isogeny.c', - 'src/third_party/sike/isogeny.h', - 'src/third_party/sike/sike.c', - 'src/third_party/sike/sike.h', - 'src/third_party/sike/utils.h', - ], - 'boringssl_ios_aarch64_sources': [ - 'ios-aarch64/crypto/chacha/chacha-armv8.S', - 'ios-aarch64/crypto/fipsmodule/aesv8-armx64.S', - 'ios-aarch64/crypto/fipsmodule/armv8-mont.S', - 'ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S', - 'ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S', - 'ios-aarch64/crypto/fipsmodule/sha1-armv8.S', - 'ios-aarch64/crypto/fipsmodule/sha256-armv8.S', - 'ios-aarch64/crypto/fipsmodule/sha512-armv8.S', - 'ios-aarch64/crypto/fipsmodule/vpaes-armv8.S', - 'ios-aarch64/crypto/test/trampoline-armv8.S', - 'ios-aarch64/crypto/third_party/sike/asm/fp-armv8.S', - ], - 'boringssl_ios_arm_sources': [ - 'ios-arm/crypto/chacha/chacha-armv4.S', - 'ios-arm/crypto/fipsmodule/aes-armv4.S', - 'ios-arm/crypto/fipsmodule/aesv8-armx32.S', - 'ios-arm/crypto/fipsmodule/armv4-mont.S', - 'ios-arm/crypto/fipsmodule/bsaes-armv7.S', - 'ios-arm/crypto/fipsmodule/ghash-armv4.S', - 'ios-arm/crypto/fipsmodule/ghashv8-armx32.S', - 'ios-arm/crypto/fipsmodule/sha1-armv4-large.S', - 'ios-arm/crypto/fipsmodule/sha256-armv4.S', - 'ios-arm/crypto/fipsmodule/sha512-armv4.S', - 'ios-arm/crypto/fipsmodule/vpaes-armv7.S', - 'ios-arm/crypto/test/trampoline-armv4.S', - ], - 'boringssl_linux_aarch64_sources': [ - 'linux-aarch64/crypto/chacha/chacha-armv8.S', - 'linux-aarch64/crypto/fipsmodule/aesv8-armx64.S', - 'linux-aarch64/crypto/fipsmodule/armv8-mont.S', - 'linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S', - 'linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S', - 'linux-aarch64/crypto/fipsmodule/sha1-armv8.S', - 'linux-aarch64/crypto/fipsmodule/sha256-armv8.S', - 'linux-aarch64/crypto/fipsmodule/sha512-armv8.S', - 'linux-aarch64/crypto/fipsmodule/vpaes-armv8.S', - 'linux-aarch64/crypto/test/trampoline-armv8.S', - 'linux-aarch64/crypto/third_party/sike/asm/fp-armv8.S', - ], - 'boringssl_linux_arm_sources': [ - 'linux-arm/crypto/chacha/chacha-armv4.S', - 'linux-arm/crypto/fipsmodule/aes-armv4.S', - 'linux-arm/crypto/fipsmodule/aesv8-armx32.S', - 'linux-arm/crypto/fipsmodule/armv4-mont.S', - 'linux-arm/crypto/fipsmodule/bsaes-armv7.S', - 'linux-arm/crypto/fipsmodule/ghash-armv4.S', - 'linux-arm/crypto/fipsmodule/ghashv8-armx32.S', - 'linux-arm/crypto/fipsmodule/sha1-armv4-large.S', - 'linux-arm/crypto/fipsmodule/sha256-armv4.S', - 'linux-arm/crypto/fipsmodule/sha512-armv4.S', - 'linux-arm/crypto/fipsmodule/vpaes-armv7.S', - 'linux-arm/crypto/test/trampoline-armv4.S', - 'src/crypto/curve25519/asm/x25519-asm-arm.S', - 'src/crypto/poly1305/poly1305_arm_asm.S', - ], - 'boringssl_linux_ppc64le_sources': [ - 'linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S', - 'linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S', - ], - 'boringssl_linux_x86_sources': [ - 'linux-x86/crypto/chacha/chacha-x86.S', - 'linux-x86/crypto/fipsmodule/aes-586.S', - 'linux-x86/crypto/fipsmodule/aesni-x86.S', - 'linux-x86/crypto/fipsmodule/bn-586.S', - 'linux-x86/crypto/fipsmodule/co-586.S', - 'linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S', - 'linux-x86/crypto/fipsmodule/ghash-x86.S', - 'linux-x86/crypto/fipsmodule/md5-586.S', - 'linux-x86/crypto/fipsmodule/sha1-586.S', - 'linux-x86/crypto/fipsmodule/sha256-586.S', - 'linux-x86/crypto/fipsmodule/sha512-586.S', - 'linux-x86/crypto/fipsmodule/vpaes-x86.S', - 'linux-x86/crypto/fipsmodule/x86-mont.S', - 'linux-x86/crypto/test/trampoline-x86.S', - ], - 'boringssl_linux_x86_64_sources': [ - 'linux-x86_64/crypto/chacha/chacha-x86_64.S', - 'linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S', - 'linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S', - 'linux-x86_64/crypto/fipsmodule/aes-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/aesni-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/ghash-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/md5-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S', - 'linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S', - 'linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/rsaz-avx2.S', - 'linux-x86_64/crypto/fipsmodule/sha1-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/sha256-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/sha512-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S', - 'linux-x86_64/crypto/fipsmodule/x86_64-mont.S', - 'linux-x86_64/crypto/fipsmodule/x86_64-mont5.S', - 'linux-x86_64/crypto/test/trampoline-x86_64.S', - 'linux-x86_64/crypto/third_party/sike/asm/fp-x86_64.S', - 'src/crypto/hrss/asm/poly_rq_mul.S', - ], - 'boringssl_mac_x86_sources': [ - 'mac-x86/crypto/chacha/chacha-x86.S', - 'mac-x86/crypto/fipsmodule/aes-586.S', - 'mac-x86/crypto/fipsmodule/aesni-x86.S', - 'mac-x86/crypto/fipsmodule/bn-586.S', - 'mac-x86/crypto/fipsmodule/co-586.S', - 'mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S', - 'mac-x86/crypto/fipsmodule/ghash-x86.S', - 'mac-x86/crypto/fipsmodule/md5-586.S', - 'mac-x86/crypto/fipsmodule/sha1-586.S', - 'mac-x86/crypto/fipsmodule/sha256-586.S', - 'mac-x86/crypto/fipsmodule/sha512-586.S', - 'mac-x86/crypto/fipsmodule/vpaes-x86.S', - 'mac-x86/crypto/fipsmodule/x86-mont.S', - 'mac-x86/crypto/test/trampoline-x86.S', - ], - 'boringssl_mac_x86_64_sources': [ - 'mac-x86_64/crypto/chacha/chacha-x86_64.S', - 'mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S', - 'mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S', - 'mac-x86_64/crypto/fipsmodule/aes-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/aesni-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/ghash-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/md5-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S', - 'mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S', - 'mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/rsaz-avx2.S', - 'mac-x86_64/crypto/fipsmodule/sha1-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/sha256-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/sha512-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S', - 'mac-x86_64/crypto/fipsmodule/x86_64-mont.S', - 'mac-x86_64/crypto/fipsmodule/x86_64-mont5.S', - 'mac-x86_64/crypto/test/trampoline-x86_64.S', - 'mac-x86_64/crypto/third_party/sike/asm/fp-x86_64.S', - ], - 'boringssl_win_x86_sources': [ - 'win-x86/crypto/chacha/chacha-x86.asm', - 'win-x86/crypto/fipsmodule/aes-586.asm', - 'win-x86/crypto/fipsmodule/aesni-x86.asm', - 'win-x86/crypto/fipsmodule/bn-586.asm', - 'win-x86/crypto/fipsmodule/co-586.asm', - 'win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm', - 'win-x86/crypto/fipsmodule/ghash-x86.asm', - 'win-x86/crypto/fipsmodule/md5-586.asm', - 'win-x86/crypto/fipsmodule/sha1-586.asm', - 'win-x86/crypto/fipsmodule/sha256-586.asm', - 'win-x86/crypto/fipsmodule/sha512-586.asm', - 'win-x86/crypto/fipsmodule/vpaes-x86.asm', - 'win-x86/crypto/fipsmodule/x86-mont.asm', - 'win-x86/crypto/test/trampoline-x86.asm', - ], - 'boringssl_win_x86_64_sources': [ - 'win-x86_64/crypto/chacha/chacha-x86_64.asm', - 'win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm', - 'win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm', - 'win-x86_64/crypto/fipsmodule/aes-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/aesni-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/ghash-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/md5-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm', - 'win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm', - 'win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/rsaz-avx2.asm', - 'win-x86_64/crypto/fipsmodule/sha1-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/sha256-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/sha512-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm', - 'win-x86_64/crypto/fipsmodule/x86_64-mont.asm', - 'win-x86_64/crypto/fipsmodule/x86_64-mont5.asm', - 'win-x86_64/crypto/test/trampoline-x86_64.asm', - 'win-x86_64/crypto/third_party/sike/asm/fp-x86_64.asm', - ], - } -} diff --git a/packager/third_party/boringssl/boringssl_nacl.gyp b/packager/third_party/boringssl/boringssl_nacl.gyp deleted file mode 100644 index b99e00221d..0000000000 --- a/packager/third_party/boringssl/boringssl_nacl.gyp +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -{ - 'includes': [ - '../../native_client/build/untrusted.gypi', - ], - 'targets': [ - { - 'target_name': 'boringssl_nacl', - 'type': 'none', - 'variables': { - 'nlib_target': 'libboringssl_nacl.a', - 'build_glibc': 0, - 'build_newlib': 0, - 'build_pnacl_newlib': 1, - }, - 'dependencies': [ - '<(DEPTH)/native_client_sdk/native_client_sdk_untrusted.gyp:nacl_io_untrusted', - ], - 'includes': [ - # Include the auto-generated gypi file. - 'boringssl.gypi' - ], - 'sources': [ - '<@(boringssl_crypto_sources)', - '<@(boringssl_ssl_sources)', - ], - 'defines': [ - 'OPENSSL_NO_ASM', - ], - 'include_dirs': [ - 'src/include', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - 'src/include', - ], - }, - 'pnacl_compile_flags': [ - '-Wno-sometimes-uninitialized', - '-Wno-unused-variable', - ], - }, # target boringssl_nacl - ], -} diff --git a/packager/third_party/boringssl/boringssl_tests.gyp b/packager/third_party/boringssl/boringssl_tests.gyp deleted file mode 100644 index a9160315b9..0000000000 --- a/packager/third_party/boringssl/boringssl_tests.gyp +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -{ - 'includes': [ - 'boringssl_tests.gypi', - ], - 'conditions': [ - ['OS!="ios"', { - 'targets': [ - { - 'target_name': 'boringssl_unittests', - 'type': 'executable', - 'sources': [ - 'boringssl_unittest.cc', - ], - 'dependencies': [ - '<@(boringssl_test_targets)', - '../../base/base.gyp:base', - '../../base/base.gyp:run_all_unittests', - '../../base/base.gyp:test_support_base', - '../../testing/gtest.gyp:gtest', - ], - }, - ], - }], - ], -} diff --git a/packager/third_party/boringssl/boringssl_unittest.cc b/packager/third_party/boringssl/boringssl_unittest.cc deleted file mode 100644 index 068dafffa6..0000000000 --- a/packager/third_party/boringssl/boringssl_unittest.cc +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2014 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include - -#include - -#include "base/base_paths.h" -#include "base/command_line.h" -#include "base/files/file_path.h" -#include "base/logging.h" -#include "base/path_service.h" -#include "base/process/launch.h" -#include "base/strings/string_util.h" -#include "testing/gtest/include/gtest/gtest.h" - -namespace { - -void TestProcess(const std::string& name, - const std::vector& args) { - base::FilePath exe_dir; - ASSERT_TRUE(PathService::Get(base::DIR_EXE, &exe_dir)); - base::FilePath test_binary = - exe_dir.AppendASCII("boringssl_" + name); - base::CommandLine cmd(test_binary); - - for (size_t i = 0; i < args.size(); ++i) { - cmd.AppendArgNative(args[i]); - } - - std::string output; - EXPECT_TRUE(base::GetAppOutput(cmd, &output)); - // Account for Windows line endings. - base::ReplaceSubstringsAfterOffset(&output, 0, "\r\n", "\n"); - - const bool ok = output.size() >= 5 && - memcmp("PASS\n", &output[output.size() - 5], 5) == 0 && - (output.size() == 5 || output[output.size() - 6] == '\n'); - - EXPECT_TRUE(ok) << output; -} - -void TestSimple(const std::string& name) { - std::vector empty; - TestProcess(name, empty); -} - -bool BoringSSLPath(base::FilePath* result) { - if (!PathService::Get(base::DIR_SOURCE_ROOT, result)) - return false; - - *result = result->Append(FILE_PATH_LITERAL("third_party")); - *result = result->Append(FILE_PATH_LITERAL("boringssl")); - *result = result->Append(FILE_PATH_LITERAL("src")); - return true; -} - -bool CryptoCipherTestPath(base::FilePath *result) { - if (!BoringSSLPath(result)) - return false; - - *result = result->Append(FILE_PATH_LITERAL("crypto")); - *result = result->Append(FILE_PATH_LITERAL("cipher")); - *result = result->Append(FILE_PATH_LITERAL("test")); - return true; -} - -} // anonymous namespace - -struct AEADTest { - const base::CommandLine::CharType *name; - const base::FilePath::CharType *test_vector_filename; -}; - -static const AEADTest kAEADTests[] = { - {FILE_PATH_LITERAL("aes-128-gcm"), - FILE_PATH_LITERAL("aes_128_gcm_tests.txt")}, - {FILE_PATH_LITERAL("aes-128-key-wrap"), - FILE_PATH_LITERAL("aes_128_key_wrap_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-gcm"), - FILE_PATH_LITERAL("aes_256_gcm_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-key-wrap"), - FILE_PATH_LITERAL("aes_256_key_wrap_tests.txt")}, - {FILE_PATH_LITERAL("chacha20-poly1305"), - FILE_PATH_LITERAL("chacha20_poly1305_tests.txt")}, - {FILE_PATH_LITERAL("rc4-md5-tls"), - FILE_PATH_LITERAL("rc4_md5_tls_tests.txt")}, - {FILE_PATH_LITERAL("rc4-sha1-tls"), - FILE_PATH_LITERAL("rc4_sha1_tls_tests.txt")}, - {FILE_PATH_LITERAL("aes-128-cbc-sha1-tls"), - FILE_PATH_LITERAL("aes_128_cbc_sha1_tls_tests.txt")}, - {FILE_PATH_LITERAL("aes-128-cbc-sha1-tls-implicit-iv"), - FILE_PATH_LITERAL("aes_128_cbc_sha1_tls_implicit_iv_tests.txt")}, - {FILE_PATH_LITERAL("aes-128-cbc-sha256-tls"), - FILE_PATH_LITERAL("aes_128_cbc_sha256_tls_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-cbc-sha1-tls"), - FILE_PATH_LITERAL("aes_256_cbc_sha1_tls_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-cbc-sha1-tls-implicit-iv"), - FILE_PATH_LITERAL("aes_256_cbc_sha1_tls_implicit_iv_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-cbc-sha256-tls"), - FILE_PATH_LITERAL("aes_256_cbc_sha256_tls_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-cbc-sha384-tls"), - FILE_PATH_LITERAL("aes_256_cbc_sha384_tls_tests.txt")}, - {FILE_PATH_LITERAL("des-ede3-cbc-sha1-tls"), - FILE_PATH_LITERAL("des_ede3_cbc_sha1_tls_tests.txt")}, - {FILE_PATH_LITERAL("des-ede3-cbc-sha1-tls-implicit-iv"), - FILE_PATH_LITERAL("des_ede3_cbc_sha1_tls_implicit_iv_tests.txt")}, - {FILE_PATH_LITERAL("rc4-md5-ssl3"), - FILE_PATH_LITERAL("rc4_md5_ssl3_tests.txt")}, - {FILE_PATH_LITERAL("rc4-sha1-ssl3"), - FILE_PATH_LITERAL("rc4_sha1_ssl3_tests.txt")}, - {FILE_PATH_LITERAL("aes-128-cbc-sha1-ssl3"), - FILE_PATH_LITERAL("aes_128_cbc_sha1_ssl3_tests.txt")}, - {FILE_PATH_LITERAL("aes-256-cbc-sha1-ssl3"), - FILE_PATH_LITERAL("aes_256_cbc_sha1_ssl3_tests.txt")}, - {FILE_PATH_LITERAL("des-ede3-cbc-sha1-ssl3"), - FILE_PATH_LITERAL("des_ede3_cbc_sha1_ssl3_tests.txt")}, - {FILE_PATH_LITERAL("aes-128-ctr-hmac-sha256"), - FILE_PATH_LITERAL("aes_128_ctr_hmac_sha256.txt")}, - {FILE_PATH_LITERAL("aes-256-ctr-hmac-sha256"), - FILE_PATH_LITERAL("aes_256_ctr_hmac_sha256.txt")}, -}; - -TEST(BoringSSL, AEADs) { - base::FilePath test_vector_dir; - ASSERT_TRUE(CryptoCipherTestPath(&test_vector_dir)); - - for (size_t i = 0; i < arraysize(kAEADTests); i++) { - const AEADTest& test = kAEADTests[i]; - SCOPED_TRACE(test.name); - - base::FilePath test_vector_file = - test_vector_dir.Append(test.test_vector_filename); - - std::vector args; - args.push_back(test.name); - args.push_back(test_vector_file.value()); - - TestProcess("aead_test", args); - } -} - -TEST(BoringSSL, AES) { - TestSimple("aes_test"); -} - -TEST(BoringSSL, Base64) { - TestSimple("base64_test"); -} - -TEST(BoringSSL, BIO) { - TestSimple("bio_test"); -} - -TEST(BoringSSL, BN) { - TestSimple("bn_test"); -} - -TEST(BoringSSL, ByteString) { - TestSimple("bytestring_test"); -} - -TEST(BoringSSL, ChaCha) { - TestSimple("chacha_test"); -} - -TEST(BoringSSL, Cipher) { - base::FilePath data_file; - ASSERT_TRUE(CryptoCipherTestPath(&data_file)); - data_file = data_file.Append(FILE_PATH_LITERAL("cipher_test.txt")); - - std::vector args; - args.push_back(data_file.value()); - - TestProcess("cipher_test", args); -} - -TEST(BoringSSL, CMAC) { - TestSimple("cmac_test"); -} - -TEST(BoringSSL, ConstantTime) { - TestSimple("constant_time_test"); -} - -TEST(BoringSSL, DH) { - TestSimple("dh_test"); -} - -TEST(BoringSSL, Digest) { - TestSimple("digest_test"); -} - -TEST(BoringSSL, DSA) { - TestSimple("dsa_test"); -} - -TEST(BoringSSL, EC) { - TestSimple("ec_test"); -} - -TEST(BoringSSL, ECDSA) { - TestSimple("ecdsa_test"); -} - -TEST(BoringSSL, ED25519) { - base::FilePath data_file; - ASSERT_TRUE(BoringSSLPath(&data_file)); - data_file = data_file.Append(FILE_PATH_LITERAL("crypto")); - data_file = data_file.Append(FILE_PATH_LITERAL("curve25519")); - data_file = data_file.Append(FILE_PATH_LITERAL("ed25519_tests.txt")); - - std::vector args; - args.push_back(data_file.value()); - - TestProcess("ed25519_test", args); -} - -TEST(BoringSSL, ERR) { - TestSimple("err_test"); -} - -TEST(BoringSSL, EVP) { - base::FilePath data_file; - ASSERT_TRUE(BoringSSLPath(&data_file)); - data_file = data_file.Append(FILE_PATH_LITERAL("crypto")); - data_file = data_file.Append(FILE_PATH_LITERAL("evp")); - data_file = data_file.Append(FILE_PATH_LITERAL("evp_tests.txt")); - - std::vector args; - args.push_back(data_file.value()); - - TestProcess("evp_test", args); -} - -TEST(BoringSSL, EVPExtra) { - TestSimple("evp_extra_test"); -} - -TEST(BoringSSL, ExampleMul) { - TestSimple("example_mul"); -} - -TEST(BoringSSL, GCM) { - TestSimple("gcm_test"); -} - -TEST(BoringSSL, HKDF) { - TestSimple("hkdf_test"); -} - -TEST(BoringSSL, HMAC) { - base::FilePath data_file; - ASSERT_TRUE(BoringSSLPath(&data_file)); - data_file = data_file.Append(FILE_PATH_LITERAL("crypto")); - data_file = data_file.Append(FILE_PATH_LITERAL("hmac")); - data_file = data_file.Append(FILE_PATH_LITERAL("hmac_tests.txt")); - - std::vector args; - args.push_back(data_file.value()); - - TestProcess("hmac_test", args); -} - -TEST(BoringSSL, LH) { - TestSimple("lhash_test"); -} - -TEST(BoringSSL, NewHope) { - TestSimple("newhope_test"); -} - -TEST(BoringSSL, NewHopeVectors) { - base::FilePath data_file; - ASSERT_TRUE(BoringSSLPath(&data_file)); - data_file = data_file.Append(FILE_PATH_LITERAL("crypto")); - data_file = data_file.Append(FILE_PATH_LITERAL("newhope")); - data_file = data_file.Append(FILE_PATH_LITERAL("newhope_test.txt")); - - std::vector args; - args.push_back(data_file.value()); - - TestProcess("newhope_vectors_test", args); -} - -TEST(BoringSSL, PBKDF) { - TestSimple("pbkdf_test"); -} - -TEST(BoringSSL, Poly1305) { - base::FilePath data_file; - ASSERT_TRUE(BoringSSLPath(&data_file)); - data_file = data_file.Append(FILE_PATH_LITERAL("crypto")); - data_file = data_file.Append(FILE_PATH_LITERAL("poly1305")); - data_file = data_file.Append(FILE_PATH_LITERAL("poly1305_test.txt")); - - std::vector args; - args.push_back(data_file.value()); - - TestProcess("poly1305_test", args); -} - -TEST(BoringSSL, PKCS7) { - TestSimple("pkcs7_test"); -} - -TEST(BoringSSL, PKCS8) { - TestSimple("pkcs8_test"); -} - -TEST(BoringSSL, PKCS12) { - TestSimple("pkcs12_test"); -} - -TEST(BoringSSL, PQueue) { - TestSimple("pqueue_test"); -} - -TEST(BoringSSL, RefcountTest) { - TestSimple("refcount_test"); -} - -TEST(BoringSSL, RSA) { - TestSimple("rsa_test"); -} - -TEST(BoringSSL, SSL) { - TestSimple("ssl_test"); -} - -TEST(BoringSSL, TabTest) { - TestSimple("tab_test"); -} - -TEST(BoringSSL, Thread) { - TestSimple("thread_test"); -} - -TEST(BoringSSL, V3NameTest) { - TestSimple("v3name_test"); -} - -TEST(BoringSSL, X25519) { - TestSimple("x25519_test"); -} - -TEST(BoringSSL, X509) { - TestSimple("x509_test"); -} diff --git a/packager/third_party/boringssl/err_data.c b/packager/third_party/boringssl/err_data.c deleted file mode 100644 index 7fcbc56ac0..0000000000 --- a/packager/third_party/boringssl/err_data.c +++ /dev/null @@ -1,821 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - - /* This file was generated by err_data_generate.go. */ - -#include -#include -#include - -OPENSSL_STATIC_ASSERT(ERR_LIB_NONE == 1, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_SYS == 2, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_BN == 3, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_RSA == 4, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_DH == 5, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_EVP == 6, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_BUF == 7, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_OBJ == 8, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_PEM == 9, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_DSA == 10, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_X509 == 11, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_ASN1 == 12, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_CONF == 13, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_CRYPTO == 14, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_EC == 15, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_SSL == 16, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_BIO == 17, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_PKCS7 == 18, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_PKCS8 == 19, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_X509V3 == 20, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_RAND == 21, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_ENGINE == 22, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_OCSP == 23, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_UI == 24, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_COMP == 25, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_ECDSA == 26, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_ECDH == 27, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_HMAC == 28, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_DIGEST == 29, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_CIPHER == 30, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_HKDF == 31, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_LIB_USER == 32, "library value changed"); -OPENSSL_STATIC_ASSERT(ERR_NUM_LIBS == 33, "number of libraries changed"); - -const uint32_t kOpenSSLReasonValues[] = { - 0xc32083a, 0xc328854, 0xc330863, 0xc338873, 0xc340882, 0xc34889b, - 0xc3508a7, 0xc3588c4, 0xc3608e4, 0xc3688f2, 0xc370902, 0xc37890f, - 0xc38091f, 0xc38892a, 0xc390940, 0xc39894f, 0xc3a0963, 0xc3a8847, - 0xc3b00ea, 0xc3b88d6, 0x10320847, 0x1032959f, 0x103315ab, 0x103395c4, - 0x103415d7, 0x10348f27, 0x10350c60, 0x103595ea, 0x10361614, 0x10369627, - 0x10371646, 0x1037965f, 0x10381674, 0x10389692, 0x103916a1, 0x103996bd, - 0x103a16d8, 0x103a96e7, 0x103b1703, 0x103b971e, 0x103c1744, 0x103c80ea, - 0x103d1755, 0x103d9769, 0x103e1788, 0x103e9797, 0x103f17ae, 0x103f97c1, - 0x10400c24, 0x104097d4, 0x104117f2, 0x10419805, 0x1042181f, 0x1042982f, - 0x10431843, 0x10439859, 0x10441871, 0x10449886, 0x1045189a, 0x104598ac, - 0x104605fd, 0x1046894f, 0x104718c1, 0x104798d8, 0x104818ed, 0x104898fb, - 0x10490e73, 0x10499735, 0x104a15ff, 0x14320c07, 0x14328c15, 0x14330c24, - 0x14338c36, 0x143400ac, 0x143480ea, 0x18320083, 0x18328f7d, 0x183300ac, - 0x18338f93, 0x18340fa7, 0x183480ea, 0x18350fbc, 0x18358fd4, 0x18360fe9, - 0x18368ffd, 0x18371021, 0x18379037, 0x1838104b, 0x1838905b, 0x18390a75, - 0x1839906b, 0x183a1091, 0x183a90b7, 0x183b0c7f, 0x183b9106, 0x183c1118, - 0x183c9123, 0x183d1133, 0x183d9144, 0x183e1155, 0x183e9167, 0x183f1190, - 0x183f91a9, 0x184011c1, 0x184086d5, 0x184110da, 0x184190a5, 0x184210c4, - 0x18428c6c, 0x18431080, 0x184390ec, 0x203211fb, 0x203291e8, 0x24321207, - 0x24328995, 0x24331219, 0x24339226, 0x24341233, 0x24349245, 0x24351254, - 0x24359271, 0x2436127e, 0x2436928c, 0x2437129a, 0x243792a8, 0x243812b1, - 0x243892be, 0x243912d1, 0x28320c54, 0x28328c7f, 0x28330c24, 0x28338c92, - 0x28340c60, 0x283480ac, 0x283500ea, 0x28358c6c, 0x2c322f0c, 0x2c3292e8, - 0x2c332f1a, 0x2c33af2c, 0x2c342f40, 0x2c34af52, 0x2c352f6d, 0x2c35af7f, - 0x2c362f92, 0x2c36832d, 0x2c372f9f, 0x2c37afb1, 0x2c382fd6, 0x2c38afed, - 0x2c392ffb, 0x2c39b00b, 0x2c3a301d, 0x2c3ab031, 0x2c3b3042, 0x2c3bb061, - 0x2c3c12fa, 0x2c3c9310, 0x2c3d3075, 0x2c3d9329, 0x2c3e3092, 0x2c3eb0a0, - 0x2c3f30b8, 0x2c3fb0d0, 0x2c4030fa, 0x2c4091fb, 0x2c41310b, 0x2c41b11e, - 0x2c4211c1, 0x2c42b12f, 0x2c430722, 0x2c43b053, 0x2c442fc4, 0x2c44b0dd, - 0x30320000, 0x30328015, 0x3033001f, 0x30338038, 0x3034004a, 0x30348064, - 0x3035006b, 0x30358083, 0x30360094, 0x303680ac, 0x303700b9, 0x303780c8, - 0x303800ea, 0x303880f7, 0x3039010a, 0x30398125, 0x303a013a, 0x303a814e, - 0x303b0162, 0x303b8173, 0x303c018c, 0x303c81a9, 0x303d01b7, 0x303d81cb, - 0x303e01db, 0x303e81f4, 0x303f0204, 0x303f8217, 0x30400226, 0x30408232, - 0x30410247, 0x30418257, 0x3042026e, 0x3042827b, 0x3043028e, 0x3043829d, - 0x304402b2, 0x304482d3, 0x304502e6, 0x304582f9, 0x30460312, 0x3046832d, - 0x3047034a, 0x3047835c, 0x3048036a, 0x3048837b, 0x3049038a, 0x304983a2, - 0x304a03b4, 0x304a83c8, 0x304b03e0, 0x304b83f3, 0x304c03fe, 0x304c840f, - 0x304d041b, 0x304d8431, 0x304e043f, 0x304e8455, 0x304f0467, 0x304f8479, - 0x3050049c, 0x305084af, 0x305104c0, 0x305184d0, 0x305204e8, 0x305284fd, - 0x30530515, 0x30538529, 0x30540541, 0x3054855a, 0x30550573, 0x30558590, - 0x3056059b, 0x305685b3, 0x305705c3, 0x305785d4, 0x305805e7, 0x305885fd, - 0x30590606, 0x3059861b, 0x305a062e, 0x305a863d, 0x305b065d, 0x305b866c, - 0x305c068d, 0x305c86a9, 0x305d06b5, 0x305d86d5, 0x305e06f1, 0x305e8702, - 0x305f0718, 0x305f8722, 0x3060048c, 0x34320b65, 0x34328b79, 0x34330b96, - 0x34338ba9, 0x34340bb8, 0x34348bf1, 0x34350bd5, 0x3c320083, 0x3c328cbc, - 0x3c330cd5, 0x3c338cf0, 0x3c340d0d, 0x3c348d37, 0x3c350d52, 0x3c358d78, - 0x3c360d91, 0x3c368da9, 0x3c370dba, 0x3c378dc8, 0x3c380dd5, 0x3c388de9, - 0x3c390c7f, 0x3c398e0c, 0x3c3a0e20, 0x3c3a890f, 0x3c3b0e30, 0x3c3b8e4b, - 0x3c3c0e5d, 0x3c3c8e90, 0x3c3d0e9a, 0x3c3d8eae, 0x3c3e0ebc, 0x3c3e8ee1, - 0x3c3f0ca8, 0x3c3f8eca, 0x3c4000ac, 0x3c4080ea, 0x3c410d28, 0x3c418d67, - 0x3c420e73, 0x3c428dfd, 0x40321971, 0x40329987, 0x403319b5, 0x403399bf, - 0x403419d6, 0x403499f4, 0x40351a04, 0x40359a16, 0x40361a23, 0x40369a2f, - 0x40371a44, 0x40379a56, 0x40381a61, 0x40389a73, 0x40390f27, 0x40399a83, - 0x403a1a96, 0x403a9ab7, 0x403b1ac8, 0x403b9ad8, 0x403c0064, 0x403c8083, - 0x403d1b5c, 0x403d9b72, 0x403e1b81, 0x403e9bb9, 0x403f1bd3, 0x403f9bfb, - 0x40401c10, 0x40409c24, 0x40411c41, 0x40419c5c, 0x40421c75, 0x40429c88, - 0x40431c9c, 0x40439cb4, 0x40441ccb, 0x404480ac, 0x40451ce0, 0x40459cf2, - 0x40461d16, 0x40469d36, 0x40471d44, 0x40479d6b, 0x40481ddc, 0x40489e0f, - 0x40491e26, 0x40499e40, 0x404a1e57, 0x404a9e75, 0x404b1e8d, 0x404b9ea4, - 0x404c1eba, 0x404c9ecc, 0x404d1eed, 0x404d9f26, 0x404e1f3a, 0x404e9f47, - 0x404f1f8e, 0x404f9fd4, 0x4050202b, 0x4050a03f, 0x40512072, 0x40522082, - 0x4052a0a6, 0x405320be, 0x4053a0d1, 0x405420e6, 0x4054a109, 0x40552117, - 0x4055a154, 0x40562161, 0x4056a17a, 0x40572192, 0x4057a1a5, 0x405821ba, - 0x4058a1e1, 0x40592210, 0x4059a23d, 0x405a2251, 0x405aa261, 0x405b2279, - 0x405ba28a, 0x405c229d, 0x405ca2dc, 0x405d22e9, 0x405da30e, 0x405e234c, - 0x405e8ab3, 0x405f236d, 0x405fa37a, 0x40602388, 0x4060a3aa, 0x4061240b, - 0x4061a443, 0x4062245a, 0x4062a46b, 0x40632490, 0x4063a4a5, 0x406424bc, - 0x4064a4e8, 0x40652503, 0x4065a51a, 0x40662532, 0x4066a55c, 0x40672587, - 0x4067a5cc, 0x40682614, 0x4068a635, 0x40692667, 0x4069a695, 0x406a26b6, - 0x406aa6d6, 0x406b285e, 0x406ba881, 0x406c2897, 0x406cab3a, 0x406d2b69, - 0x406dab91, 0x406e2bbf, 0x406eac0c, 0x406f2c47, 0x406fac7f, 0x40702c92, - 0x4070acaf, 0x40710802, 0x4071acc1, 0x40722cd4, 0x4072ad0a, 0x40732d22, - 0x407394fa, 0x40742d36, 0x4074ad50, 0x40752d61, 0x4075ad75, 0x40762d83, - 0x407692be, 0x40772da8, 0x4077adca, 0x40782de5, 0x4078ae1e, 0x40792e35, - 0x4079ae4b, 0x407a2e77, 0x407aae8a, 0x407b2e9f, 0x407baeb1, 0x407c2ee2, - 0x407caeeb, 0x407d2650, 0x407d9fe4, 0x407e2dfa, 0x407ea1f1, 0x407f1d58, - 0x407f9afe, 0x40801f9e, 0x40809d80, 0x40812094, 0x40819f78, 0x40822baa, - 0x40829ae4, 0x408321cc, 0x4083a4cd, 0x40841d94, 0x4084a229, 0x408522ae, - 0x4085a3d2, 0x4086232e, 0x40869ffe, 0x40872bf0, 0x4087a420, 0x40881b45, - 0x4088a5df, 0x40891b94, 0x40899b21, 0x408a28cf, 0x408a9912, 0x408b2ec6, - 0x408bac5c, 0x408c22be, 0x408c992e, 0x408d1df5, 0x408d9dc6, 0x408e1f0f, - 0x408ea134, 0x408f25f3, 0x408fa3ee, 0x409025a8, 0x4090a300, 0x409128b7, - 0x40919954, 0x40921be1, 0x4092ac2b, 0x40932ced, 0x4093a00f, 0x40941da8, - 0x4094a8e8, 0x4095247c, 0x4095ae57, 0x40962bd7, 0x40969fb7, 0x4097205a, - 0x40979f5e, 0x41f42789, 0x41f9281b, 0x41fe270e, 0x41fea92b, 0x41ff2a1c, - 0x420327a2, 0x420827c4, 0x4208a800, 0x420926f2, 0x4209a83a, 0x420a2749, - 0x420aa729, 0x420b2769, 0x420ba7e2, 0x420c2a38, 0x420ca8f8, 0x420d2912, - 0x420da949, 0x42122963, 0x421729ff, 0x4217a9a5, 0x421c29c7, 0x421f2982, - 0x42212a4f, 0x422629e2, 0x422b2b1e, 0x422baacc, 0x422c2b06, 0x422caa8b, - 0x422d2a6a, 0x422daaeb, 0x422e2ab1, 0x4432072d, 0x4432873c, 0x44330748, - 0x44338756, 0x44340769, 0x4434877a, 0x44350781, 0x4435878b, 0x4436079e, - 0x443687b4, 0x443707c6, 0x443787d3, 0x443807e2, 0x443887ea, 0x44390802, - 0x44398810, 0x443a0823, 0x483212e8, 0x483292fa, 0x48331310, 0x48339329, - 0x4c32134e, 0x4c32935e, 0x4c331371, 0x4c339391, 0x4c3400ac, 0x4c3480ea, - 0x4c35139d, 0x4c3593ab, 0x4c3613c7, 0x4c3693ed, 0x4c3713fc, 0x4c37940a, - 0x4c38141f, 0x4c38942b, 0x4c39144b, 0x4c399475, 0x4c3a148e, 0x4c3a94a7, - 0x4c3b05fd, 0x4c3b94c0, 0x4c3c14d2, 0x4c3c94e1, 0x4c3d14fa, 0x4c3d8c47, - 0x4c3e1567, 0x4c3e9509, 0x4c3f1589, 0x4c3f92be, 0x4c40151f, 0x4c40933a, - 0x4c411557, 0x4c4193da, 0x4c421543, 0x50323141, 0x5032b150, 0x5033315b, - 0x5033b16b, 0x50343184, 0x5034b19e, 0x503531ac, 0x5035b1c2, 0x503631d4, - 0x5036b1ea, 0x50373203, 0x5037b216, 0x5038322e, 0x5038b23f, 0x50393254, - 0x5039b268, 0x503a3288, 0x503ab29e, 0x503b32b6, 0x503bb2c8, 0x503c32e4, - 0x503cb2fb, 0x503d3314, 0x503db32a, 0x503e3337, 0x503eb34d, 0x503f335f, - 0x503f837b, 0x50403372, 0x5040b382, 0x5041339c, 0x5041b3ab, 0x504233c5, - 0x5042b3e2, 0x504333f2, 0x5043b402, 0x50443411, 0x50448431, 0x50453425, - 0x5045b443, 0x50463456, 0x5046b46c, 0x5047347e, 0x5047b493, 0x504834b9, - 0x5048b4c7, 0x504934da, 0x5049b4ef, 0x504a3505, 0x504ab515, 0x504b3535, - 0x504bb548, 0x504c356b, 0x504cb599, 0x504d35ab, 0x504db5c8, 0x504e35e3, - 0x504eb5ff, 0x504f3611, 0x504fb628, 0x50503637, 0x505086f1, 0x5051364a, - 0x58320f65, 0x68320f27, 0x68328c7f, 0x68330c92, 0x68338f35, 0x68340f45, - 0x683480ea, 0x6c320eed, 0x6c328c36, 0x6c330ef8, 0x6c338f11, 0x74320a1b, - 0x743280ac, 0x74330c47, 0x78320980, 0x78328995, 0x783309a1, 0x78338083, - 0x783409b0, 0x783489c5, 0x783509e4, 0x78358a06, 0x78360a1b, 0x78368a31, - 0x78370a41, 0x78378a62, 0x78380a75, 0x78388a87, 0x78390a94, 0x78398ab3, - 0x783a0ac8, 0x783a8ad6, 0x783b0ae0, 0x783b8af4, 0x783c0b0b, 0x783c8b20, - 0x783d0b37, 0x783d8b4c, 0x783e0aa2, 0x783e8a54, 0x7c3211d7, -}; - -const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]); - -const char kOpenSSLReasonStringData[] = - "ASN1_LENGTH_MISMATCH\0" - "AUX_ERROR\0" - "BAD_GET_ASN1_OBJECT_CALL\0" - "BAD_OBJECT_HEADER\0" - "BMPSTRING_IS_WRONG_LENGTH\0" - "BN_LIB\0" - "BOOLEAN_IS_WRONG_LENGTH\0" - "BUFFER_TOO_SMALL\0" - "CONTEXT_NOT_INITIALISED\0" - "DECODE_ERROR\0" - "DEPTH_EXCEEDED\0" - "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\0" - "ENCODE_ERROR\0" - "ERROR_GETTING_TIME\0" - "EXPECTING_AN_ASN1_SEQUENCE\0" - "EXPECTING_AN_INTEGER\0" - "EXPECTING_AN_OBJECT\0" - "EXPECTING_A_BOOLEAN\0" - "EXPECTING_A_TIME\0" - "EXPLICIT_LENGTH_MISMATCH\0" - "EXPLICIT_TAG_NOT_CONSTRUCTED\0" - "FIELD_MISSING\0" - "FIRST_NUM_TOO_LARGE\0" - "HEADER_TOO_LONG\0" - "ILLEGAL_BITSTRING_FORMAT\0" - "ILLEGAL_BOOLEAN\0" - "ILLEGAL_CHARACTERS\0" - "ILLEGAL_FORMAT\0" - "ILLEGAL_HEX\0" - "ILLEGAL_IMPLICIT_TAG\0" - "ILLEGAL_INTEGER\0" - "ILLEGAL_NESTED_TAGGING\0" - "ILLEGAL_NULL\0" - "ILLEGAL_NULL_VALUE\0" - "ILLEGAL_OBJECT\0" - "ILLEGAL_OPTIONAL_ANY\0" - "ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE\0" - "ILLEGAL_TAGGED_ANY\0" - "ILLEGAL_TIME_VALUE\0" - "INTEGER_NOT_ASCII_FORMAT\0" - "INTEGER_TOO_LARGE_FOR_LONG\0" - "INVALID_BIT_STRING_BITS_LEFT\0" - "INVALID_BMPSTRING\0" - "INVALID_DIGIT\0" - "INVALID_MODIFIER\0" - "INVALID_NUMBER\0" - "INVALID_OBJECT_ENCODING\0" - "INVALID_SEPARATOR\0" - "INVALID_TIME_FORMAT\0" - "INVALID_UNIVERSALSTRING\0" - "INVALID_UTF8STRING\0" - "LIST_ERROR\0" - "MISSING_ASN1_EOS\0" - "MISSING_EOC\0" - "MISSING_SECOND_NUMBER\0" - "MISSING_VALUE\0" - "MSTRING_NOT_UNIVERSAL\0" - "MSTRING_WRONG_TAG\0" - "NESTED_ASN1_ERROR\0" - "NESTED_ASN1_STRING\0" - "NESTED_TOO_DEEP\0" - "NON_HEX_CHARACTERS\0" - "NOT_ASCII_FORMAT\0" - "NOT_ENOUGH_DATA\0" - "NO_MATCHING_CHOICE_TYPE\0" - "NULL_IS_WRONG_LENGTH\0" - "OBJECT_NOT_ASCII_FORMAT\0" - "ODD_NUMBER_OF_CHARS\0" - "SECOND_NUMBER_TOO_LARGE\0" - "SEQUENCE_LENGTH_MISMATCH\0" - "SEQUENCE_NOT_CONSTRUCTED\0" - "SEQUENCE_OR_SET_NEEDS_CONFIG\0" - "SHORT_LINE\0" - "STREAMING_NOT_SUPPORTED\0" - "STRING_TOO_LONG\0" - "STRING_TOO_SHORT\0" - "TAG_VALUE_TOO_HIGH\0" - "TIME_NOT_ASCII_FORMAT\0" - "TOO_LONG\0" - "TYPE_NOT_CONSTRUCTED\0" - "TYPE_NOT_PRIMITIVE\0" - "UNEXPECTED_EOC\0" - "UNIVERSALSTRING_IS_WRONG_LENGTH\0" - "UNKNOWN_FORMAT\0" - "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\0" - "UNKNOWN_SIGNATURE_ALGORITHM\0" - "UNKNOWN_TAG\0" - "UNSUPPORTED_ANY_DEFINED_BY_TYPE\0" - "UNSUPPORTED_PUBLIC_KEY_TYPE\0" - "UNSUPPORTED_TYPE\0" - "WRONG_PUBLIC_KEY_TYPE\0" - "WRONG_TAG\0" - "WRONG_TYPE\0" - "BAD_FOPEN_MODE\0" - "BROKEN_PIPE\0" - "CONNECT_ERROR\0" - "ERROR_SETTING_NBIO\0" - "INVALID_ARGUMENT\0" - "IN_USE\0" - "KEEPALIVE\0" - "NBIO_CONNECT_ERROR\0" - "NO_HOSTNAME_SPECIFIED\0" - "NO_PORT_SPECIFIED\0" - "NO_SUCH_FILE\0" - "NULL_PARAMETER\0" - "SYS_LIB\0" - "UNABLE_TO_CREATE_SOCKET\0" - "UNINITIALIZED\0" - "UNSUPPORTED_METHOD\0" - "WRITE_TO_READ_ONLY_BIO\0" - "ARG2_LT_ARG3\0" - "BAD_ENCODING\0" - "BAD_RECIPROCAL\0" - "BIGNUM_TOO_LONG\0" - "BITS_TOO_SMALL\0" - "CALLED_WITH_EVEN_MODULUS\0" - "DIV_BY_ZERO\0" - "EXPAND_ON_STATIC_BIGNUM_DATA\0" - "INPUT_NOT_REDUCED\0" - "INVALID_INPUT\0" - "INVALID_RANGE\0" - "NEGATIVE_NUMBER\0" - "NOT_A_SQUARE\0" - "NOT_INITIALIZED\0" - "NO_INVERSE\0" - "PRIVATE_KEY_TOO_LARGE\0" - "P_IS_NOT_PRIME\0" - "TOO_MANY_ITERATIONS\0" - "TOO_MANY_TEMPORARY_VARIABLES\0" - "AES_KEY_SETUP_FAILED\0" - "BAD_DECRYPT\0" - "BAD_KEY_LENGTH\0" - "CTRL_NOT_IMPLEMENTED\0" - "CTRL_OPERATION_NOT_IMPLEMENTED\0" - "DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH\0" - "INITIALIZATION_ERROR\0" - "INPUT_NOT_INITIALIZED\0" - "INVALID_AD_SIZE\0" - "INVALID_KEY_LENGTH\0" - "INVALID_NONCE\0" - "INVALID_NONCE_SIZE\0" - "INVALID_OPERATION\0" - "IV_TOO_LARGE\0" - "NO_CIPHER_SET\0" - "NO_DIRECTION_SET\0" - "OUTPUT_ALIASES_INPUT\0" - "TAG_TOO_LARGE\0" - "TOO_LARGE\0" - "UNSUPPORTED_AD_SIZE\0" - "UNSUPPORTED_INPUT_SIZE\0" - "UNSUPPORTED_KEY_SIZE\0" - "UNSUPPORTED_NONCE_SIZE\0" - "UNSUPPORTED_TAG_SIZE\0" - "WRONG_FINAL_BLOCK_LENGTH\0" - "LIST_CANNOT_BE_NULL\0" - "MISSING_CLOSE_SQUARE_BRACKET\0" - "MISSING_EQUAL_SIGN\0" - "NO_CLOSE_BRACE\0" - "UNABLE_TO_CREATE_NEW_SECTION\0" - "VARIABLE_EXPANSION_TOO_LONG\0" - "VARIABLE_HAS_NO_VALUE\0" - "BAD_GENERATOR\0" - "INVALID_PUBKEY\0" - "MODULUS_TOO_LARGE\0" - "NO_PRIVATE_VALUE\0" - "UNKNOWN_HASH\0" - "BAD_Q_VALUE\0" - "BAD_VERSION\0" - "INVALID_PARAMETERS\0" - "MISSING_PARAMETERS\0" - "NEED_NEW_SETUP_VALUES\0" - "BIGNUM_OUT_OF_RANGE\0" - "COORDINATES_OUT_OF_RANGE\0" - "D2I_ECPKPARAMETERS_FAILURE\0" - "EC_GROUP_NEW_BY_NAME_FAILURE\0" - "GROUP2PKPARAMETERS_FAILURE\0" - "GROUP_MISMATCH\0" - "I2D_ECPKPARAMETERS_FAILURE\0" - "INCOMPATIBLE_OBJECTS\0" - "INVALID_COFACTOR\0" - "INVALID_COMPRESSED_POINT\0" - "INVALID_COMPRESSION_BIT\0" - "INVALID_ENCODING\0" - "INVALID_FIELD\0" - "INVALID_FORM\0" - "INVALID_GROUP_ORDER\0" - "INVALID_PRIVATE_KEY\0" - "INVALID_SCALAR\0" - "MISSING_PRIVATE_KEY\0" - "NON_NAMED_CURVE\0" - "PKPARAMETERS2GROUP_FAILURE\0" - "POINT_AT_INFINITY\0" - "POINT_IS_NOT_ON_CURVE\0" - "PUBLIC_KEY_VALIDATION_FAILED\0" - "SLOT_FULL\0" - "UNDEFINED_GENERATOR\0" - "UNKNOWN_GROUP\0" - "UNKNOWN_ORDER\0" - "WRONG_CURVE_PARAMETERS\0" - "WRONG_ORDER\0" - "KDF_FAILED\0" - "POINT_ARITHMETIC_FAILURE\0" - "UNKNOWN_DIGEST_LENGTH\0" - "BAD_SIGNATURE\0" - "NOT_IMPLEMENTED\0" - "RANDOM_NUMBER_GENERATION_FAILED\0" - "OPERATION_NOT_SUPPORTED\0" - "COMMAND_NOT_SUPPORTED\0" - "DIFFERENT_KEY_TYPES\0" - "DIFFERENT_PARAMETERS\0" - "EXPECTING_AN_EC_KEY_KEY\0" - "EXPECTING_AN_RSA_KEY\0" - "EXPECTING_A_DSA_KEY\0" - "ILLEGAL_OR_UNSUPPORTED_PADDING_MODE\0" - "INVALID_DIGEST_LENGTH\0" - "INVALID_DIGEST_TYPE\0" - "INVALID_KEYBITS\0" - "INVALID_MGF1_MD\0" - "INVALID_PADDING_MODE\0" - "INVALID_PEER_KEY\0" - "INVALID_PSS_SALTLEN\0" - "INVALID_SIGNATURE\0" - "KEYS_NOT_SET\0" - "MEMORY_LIMIT_EXCEEDED\0" - "NOT_A_PRIVATE_KEY\0" - "NOT_XOF_OR_INVALID_LENGTH\0" - "NO_DEFAULT_DIGEST\0" - "NO_KEY_SET\0" - "NO_MDC2_SUPPORT\0" - "NO_NID_FOR_CURVE\0" - "NO_OPERATION_SET\0" - "NO_PARAMETERS_SET\0" - "OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE\0" - "OPERATON_NOT_INITIALIZED\0" - "UNKNOWN_PUBLIC_KEY_TYPE\0" - "UNSUPPORTED_ALGORITHM\0" - "OUTPUT_TOO_LARGE\0" - "INVALID_OID_STRING\0" - "UNKNOWN_NID\0" - "BAD_BASE64_DECODE\0" - "BAD_END_LINE\0" - "BAD_IV_CHARS\0" - "BAD_PASSWORD_READ\0" - "CIPHER_IS_NULL\0" - "ERROR_CONVERTING_PRIVATE_KEY\0" - "NOT_DEK_INFO\0" - "NOT_ENCRYPTED\0" - "NOT_PROC_TYPE\0" - "NO_START_LINE\0" - "READ_KEY\0" - "SHORT_HEADER\0" - "UNSUPPORTED_CIPHER\0" - "UNSUPPORTED_ENCRYPTION\0" - "BAD_PKCS7_VERSION\0" - "NOT_PKCS7_SIGNED_DATA\0" - "NO_CERTIFICATES_INCLUDED\0" - "NO_CRLS_INCLUDED\0" - "BAD_ITERATION_COUNT\0" - "BAD_PKCS12_DATA\0" - "BAD_PKCS12_VERSION\0" - "CIPHER_HAS_NO_OBJECT_IDENTIFIER\0" - "CRYPT_ERROR\0" - "ENCRYPT_ERROR\0" - "ERROR_SETTING_CIPHER_PARAMS\0" - "INCORRECT_PASSWORD\0" - "INVALID_CHARACTERS\0" - "KEYGEN_FAILURE\0" - "KEY_GEN_ERROR\0" - "METHOD_NOT_SUPPORTED\0" - "MISSING_MAC\0" - "MULTIPLE_PRIVATE_KEYS_IN_PKCS12\0" - "PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED\0" - "PKCS12_TOO_DEEPLY_NESTED\0" - "PRIVATE_KEY_DECODE_ERROR\0" - "PRIVATE_KEY_ENCODE_ERROR\0" - "UNKNOWN_ALGORITHM\0" - "UNKNOWN_CIPHER\0" - "UNKNOWN_CIPHER_ALGORITHM\0" - "UNKNOWN_DIGEST\0" - "UNSUPPORTED_KEYLENGTH\0" - "UNSUPPORTED_KEY_DERIVATION_FUNCTION\0" - "UNSUPPORTED_OPTIONS\0" - "UNSUPPORTED_PRF\0" - "UNSUPPORTED_PRIVATE_KEY_ALGORITHM\0" - "UNSUPPORTED_SALT_TYPE\0" - "BAD_E_VALUE\0" - "BAD_FIXED_HEADER_DECRYPT\0" - "BAD_PAD_BYTE_COUNT\0" - "BAD_RSA_PARAMETERS\0" - "BLOCK_TYPE_IS_NOT_01\0" - "BLOCK_TYPE_IS_NOT_02\0" - "BN_NOT_INITIALIZED\0" - "CANNOT_RECOVER_MULTI_PRIME_KEY\0" - "CRT_PARAMS_ALREADY_GIVEN\0" - "CRT_VALUES_INCORRECT\0" - "DATA_LEN_NOT_EQUAL_TO_MOD_LEN\0" - "DATA_TOO_LARGE\0" - "DATA_TOO_LARGE_FOR_KEY_SIZE\0" - "DATA_TOO_LARGE_FOR_MODULUS\0" - "DATA_TOO_SMALL\0" - "DATA_TOO_SMALL_FOR_KEY_SIZE\0" - "DIGEST_TOO_BIG_FOR_RSA_KEY\0" - "D_E_NOT_CONGRUENT_TO_1\0" - "D_OUT_OF_RANGE\0" - "EMPTY_PUBLIC_KEY\0" - "FIRST_OCTET_INVALID\0" - "INCONSISTENT_SET_OF_CRT_VALUES\0" - "INTERNAL_ERROR\0" - "INVALID_MESSAGE_LENGTH\0" - "KEY_SIZE_TOO_SMALL\0" - "LAST_OCTET_INVALID\0" - "MUST_HAVE_AT_LEAST_TWO_PRIMES\0" - "NO_PUBLIC_EXPONENT\0" - "NULL_BEFORE_BLOCK_MISSING\0" - "N_NOT_EQUAL_P_Q\0" - "OAEP_DECODING_ERROR\0" - "ONLY_ONE_OF_P_Q_GIVEN\0" - "OUTPUT_BUFFER_TOO_SMALL\0" - "PADDING_CHECK_FAILED\0" - "PKCS_DECODING_ERROR\0" - "SLEN_CHECK_FAILED\0" - "SLEN_RECOVERY_FAILED\0" - "UNKNOWN_ALGORITHM_TYPE\0" - "UNKNOWN_PADDING_TYPE\0" - "VALUE_MISSING\0" - "WRONG_SIGNATURE_LENGTH\0" - "ALPN_MISMATCH_ON_EARLY_DATA\0" - "APPLICATION_DATA_INSTEAD_OF_HANDSHAKE\0" - "APPLICATION_DATA_ON_SHUTDOWN\0" - "APP_DATA_IN_HANDSHAKE\0" - "ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT\0" - "BAD_ALERT\0" - "BAD_CHANGE_CIPHER_SPEC\0" - "BAD_DATA_RETURNED_BY_CALLBACK\0" - "BAD_DH_P_LENGTH\0" - "BAD_DIGEST_LENGTH\0" - "BAD_ECC_CERT\0" - "BAD_ECPOINT\0" - "BAD_HANDSHAKE_RECORD\0" - "BAD_HELLO_REQUEST\0" - "BAD_LENGTH\0" - "BAD_PACKET_LENGTH\0" - "BAD_RSA_ENCRYPT\0" - "BAD_SRTP_MKI_VALUE\0" - "BAD_SRTP_PROTECTION_PROFILE_LIST\0" - "BAD_SSL_FILETYPE\0" - "BAD_WRITE_RETRY\0" - "BIO_NOT_SET\0" - "BLOCK_CIPHER_PAD_IS_WRONG\0" - "BUFFERED_MESSAGES_ON_CIPHER_CHANGE\0" - "CANNOT_HAVE_BOTH_PRIVKEY_AND_METHOD\0" - "CANNOT_PARSE_LEAF_CERT\0" - "CA_DN_LENGTH_MISMATCH\0" - "CA_DN_TOO_LONG\0" - "CCS_RECEIVED_EARLY\0" - "CERTIFICATE_AND_PRIVATE_KEY_MISMATCH\0" - "CERTIFICATE_VERIFY_FAILED\0" - "CERT_CB_ERROR\0" - "CERT_DECOMPRESSION_FAILED\0" - "CERT_LENGTH_MISMATCH\0" - "CHANNEL_ID_NOT_P256\0" - "CHANNEL_ID_SIGNATURE_INVALID\0" - "CIPHER_OR_HASH_UNAVAILABLE\0" - "CLIENTHELLO_PARSE_FAILED\0" - "CLIENTHELLO_TLSEXT\0" - "CONNECTION_REJECTED\0" - "CONNECTION_TYPE_NOT_SET\0" - "CUSTOM_EXTENSION_ERROR\0" - "DATA_LENGTH_TOO_LONG\0" - "DECRYPTION_FAILED\0" - "DECRYPTION_FAILED_OR_BAD_RECORD_MAC\0" - "DH_PUBLIC_VALUE_LENGTH_IS_WRONG\0" - "DH_P_TOO_LONG\0" - "DIGEST_CHECK_FAILED\0" - "DOWNGRADE_DETECTED\0" - "DTLS_MESSAGE_TOO_BIG\0" - "DUPLICATE_EXTENSION\0" - "DUPLICATE_KEY_SHARE\0" - "DUPLICATE_SIGNATURE_ALGORITHM\0" - "EARLY_DATA_NOT_IN_USE\0" - "ECC_CERT_NOT_FOR_SIGNING\0" - "EMPTY_HELLO_RETRY_REQUEST\0" - "EMS_STATE_INCONSISTENT\0" - "ENCRYPTED_LENGTH_TOO_LONG\0" - "ERROR_ADDING_EXTENSION\0" - "ERROR_IN_RECEIVED_CIPHER_LIST\0" - "ERROR_PARSING_EXTENSION\0" - "EXCESSIVE_MESSAGE_SIZE\0" - "EXTRA_DATA_IN_MESSAGE\0" - "FRAGMENT_MISMATCH\0" - "GOT_NEXT_PROTO_WITHOUT_EXTENSION\0" - "HANDSHAKE_FAILURE_ON_CLIENT_HELLO\0" - "HANDSHAKE_NOT_COMPLETE\0" - "HTTPS_PROXY_REQUEST\0" - "HTTP_REQUEST\0" - "INAPPROPRIATE_FALLBACK\0" - "INCONSISTENT_CLIENT_HELLO\0" - "INVALID_ALPN_PROTOCOL\0" - "INVALID_COMMAND\0" - "INVALID_COMPRESSION_LIST\0" - "INVALID_DELEGATED_CREDENTIAL\0" - "INVALID_MESSAGE\0" - "INVALID_OUTER_RECORD_TYPE\0" - "INVALID_SCT_LIST\0" - "INVALID_SIGNATURE_ALGORITHM\0" - "INVALID_SSL_SESSION\0" - "INVALID_TICKET_KEYS_LENGTH\0" - "KEY_USAGE_BIT_INCORRECT\0" - "LENGTH_MISMATCH\0" - "MISSING_EXTENSION\0" - "MISSING_KEY_SHARE\0" - "MISSING_RSA_CERTIFICATE\0" - "MISSING_TMP_DH_KEY\0" - "MISSING_TMP_ECDH_KEY\0" - "MIXED_SPECIAL_OPERATOR_WITH_GROUPS\0" - "MTU_TOO_SMALL\0" - "NEGOTIATED_BOTH_NPN_AND_ALPN\0" - "NEGOTIATED_TB_WITHOUT_EMS_OR_RI\0" - "NESTED_GROUP\0" - "NO_CERTIFICATES_RETURNED\0" - "NO_CERTIFICATE_ASSIGNED\0" - "NO_CERTIFICATE_SET\0" - "NO_CIPHERS_AVAILABLE\0" - "NO_CIPHERS_PASSED\0" - "NO_CIPHERS_SPECIFIED\0" - "NO_CIPHER_MATCH\0" - "NO_COMMON_SIGNATURE_ALGORITHMS\0" - "NO_COMPRESSION_SPECIFIED\0" - "NO_GROUPS_SPECIFIED\0" - "NO_METHOD_SPECIFIED\0" - "NO_P256_SUPPORT\0" - "NO_PRIVATE_KEY_ASSIGNED\0" - "NO_RENEGOTIATION\0" - "NO_REQUIRED_DIGEST\0" - "NO_SHARED_CIPHER\0" - "NO_SHARED_GROUP\0" - "NO_SUPPORTED_VERSIONS_ENABLED\0" - "NULL_SSL_CTX\0" - "NULL_SSL_METHOD_PASSED\0" - "OCSP_CB_ERROR\0" - "OLD_SESSION_CIPHER_NOT_RETURNED\0" - "OLD_SESSION_PRF_HASH_MISMATCH\0" - "OLD_SESSION_VERSION_NOT_RETURNED\0" - "PARSE_TLSEXT\0" - "PATH_TOO_LONG\0" - "PEER_DID_NOT_RETURN_A_CERTIFICATE\0" - "PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE\0" - "PRE_SHARED_KEY_MUST_BE_LAST\0" - "PRIVATE_KEY_OPERATION_FAILED\0" - "PROTOCOL_IS_SHUTDOWN\0" - "PSK_IDENTITY_BINDER_COUNT_MISMATCH\0" - "PSK_IDENTITY_NOT_FOUND\0" - "PSK_NO_CLIENT_CB\0" - "PSK_NO_SERVER_CB\0" - "QUIC_INTERNAL_ERROR\0" - "READ_TIMEOUT_EXPIRED\0" - "RECORD_LENGTH_MISMATCH\0" - "RECORD_TOO_LARGE\0" - "RENEGOTIATION_EMS_MISMATCH\0" - "RENEGOTIATION_ENCODING_ERR\0" - "RENEGOTIATION_MISMATCH\0" - "REQUIRED_CIPHER_MISSING\0" - "RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION\0" - "RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION\0" - "SCSV_RECEIVED_WHEN_RENEGOTIATING\0" - "SECOND_SERVERHELLO_VERSION_MISMATCH\0" - "SERVERHELLO_TLSEXT\0" - "SERVER_CERT_CHANGED\0" - "SERVER_ECHOED_INVALID_SESSION_ID\0" - "SESSION_ID_CONTEXT_UNINITIALIZED\0" - "SESSION_MAY_NOT_BE_CREATED\0" - "SHUTDOWN_WHILE_IN_INIT\0" - "SIGNATURE_ALGORITHMS_EXTENSION_SENT_BY_SERVER\0" - "SRTP_COULD_NOT_ALLOCATE_PROFILES\0" - "SRTP_UNKNOWN_PROTECTION_PROFILE\0" - "SSL3_EXT_INVALID_SERVERNAME\0" - "SSLV3_ALERT_BAD_CERTIFICATE\0" - "SSLV3_ALERT_BAD_RECORD_MAC\0" - "SSLV3_ALERT_CERTIFICATE_EXPIRED\0" - "SSLV3_ALERT_CERTIFICATE_REVOKED\0" - "SSLV3_ALERT_CERTIFICATE_UNKNOWN\0" - "SSLV3_ALERT_CLOSE_NOTIFY\0" - "SSLV3_ALERT_DECOMPRESSION_FAILURE\0" - "SSLV3_ALERT_HANDSHAKE_FAILURE\0" - "SSLV3_ALERT_ILLEGAL_PARAMETER\0" - "SSLV3_ALERT_NO_CERTIFICATE\0" - "SSLV3_ALERT_UNEXPECTED_MESSAGE\0" - "SSLV3_ALERT_UNSUPPORTED_CERTIFICATE\0" - "SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION\0" - "SSL_HANDSHAKE_FAILURE\0" - "SSL_SESSION_ID_CONTEXT_TOO_LONG\0" - "SSL_SESSION_ID_TOO_LONG\0" - "TICKET_ENCRYPTION_FAILED\0" - "TLS13_DOWNGRADE\0" - "TLSV1_ALERT_ACCESS_DENIED\0" - "TLSV1_ALERT_DECODE_ERROR\0" - "TLSV1_ALERT_DECRYPTION_FAILED\0" - "TLSV1_ALERT_DECRYPT_ERROR\0" - "TLSV1_ALERT_EXPORT_RESTRICTION\0" - "TLSV1_ALERT_INAPPROPRIATE_FALLBACK\0" - "TLSV1_ALERT_INSUFFICIENT_SECURITY\0" - "TLSV1_ALERT_INTERNAL_ERROR\0" - "TLSV1_ALERT_NO_RENEGOTIATION\0" - "TLSV1_ALERT_PROTOCOL_VERSION\0" - "TLSV1_ALERT_RECORD_OVERFLOW\0" - "TLSV1_ALERT_UNKNOWN_CA\0" - "TLSV1_ALERT_USER_CANCELLED\0" - "TLSV1_BAD_CERTIFICATE_HASH_VALUE\0" - "TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE\0" - "TLSV1_CERTIFICATE_REQUIRED\0" - "TLSV1_CERTIFICATE_UNOBTAINABLE\0" - "TLSV1_UNKNOWN_PSK_IDENTITY\0" - "TLSV1_UNRECOGNIZED_NAME\0" - "TLSV1_UNSUPPORTED_EXTENSION\0" - "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\0" - "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\0" - "TOO_MANY_EMPTY_FRAGMENTS\0" - "TOO_MANY_KEY_UPDATES\0" - "TOO_MANY_WARNING_ALERTS\0" - "TOO_MUCH_READ_EARLY_DATA\0" - "TOO_MUCH_SKIPPED_EARLY_DATA\0" - "UNABLE_TO_FIND_ECDH_PARAMETERS\0" - "UNCOMPRESSED_CERT_TOO_LARGE\0" - "UNEXPECTED_EXTENSION\0" - "UNEXPECTED_EXTENSION_ON_EARLY_DATA\0" - "UNEXPECTED_MESSAGE\0" - "UNEXPECTED_OPERATOR_IN_GROUP\0" - "UNEXPECTED_RECORD\0" - "UNKNOWN_ALERT_TYPE\0" - "UNKNOWN_CERTIFICATE_TYPE\0" - "UNKNOWN_CERT_COMPRESSION_ALG\0" - "UNKNOWN_CIPHER_RETURNED\0" - "UNKNOWN_CIPHER_TYPE\0" - "UNKNOWN_KEY_EXCHANGE_TYPE\0" - "UNKNOWN_PROTOCOL\0" - "UNKNOWN_SSL_VERSION\0" - "UNKNOWN_STATE\0" - "UNSAFE_LEGACY_RENEGOTIATION_DISABLED\0" - "UNSUPPORTED_COMPRESSION_ALGORITHM\0" - "UNSUPPORTED_ELLIPTIC_CURVE\0" - "UNSUPPORTED_PROTOCOL\0" - "UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY\0" - "WRONG_CERTIFICATE_TYPE\0" - "WRONG_CIPHER_RETURNED\0" - "WRONG_CURVE\0" - "WRONG_ENCRYPTION_LEVEL_RECEIVED\0" - "WRONG_MESSAGE_TYPE\0" - "WRONG_SIGNATURE_TYPE\0" - "WRONG_SSL_VERSION\0" - "WRONG_VERSION_NUMBER\0" - "WRONG_VERSION_ON_EARLY_DATA\0" - "X509_LIB\0" - "X509_VERIFICATION_SETUP_PROBLEMS\0" - "AKID_MISMATCH\0" - "BAD_X509_FILETYPE\0" - "BASE64_DECODE_ERROR\0" - "CANT_CHECK_DH_KEY\0" - "CERT_ALREADY_IN_HASH_TABLE\0" - "CRL_ALREADY_DELTA\0" - "CRL_VERIFY_FAILURE\0" - "IDP_MISMATCH\0" - "INVALID_DIRECTORY\0" - "INVALID_FIELD_NAME\0" - "INVALID_PARAMETER\0" - "INVALID_PSS_PARAMETERS\0" - "INVALID_TRUST\0" - "ISSUER_MISMATCH\0" - "KEY_TYPE_MISMATCH\0" - "KEY_VALUES_MISMATCH\0" - "LOADING_CERT_DIR\0" - "LOADING_DEFAULTS\0" - "NAME_TOO_LONG\0" - "NEWER_CRL_NOT_NEWER\0" - "NO_CERT_SET_FOR_US_TO_VERIFY\0" - "NO_CRL_NUMBER\0" - "PUBLIC_KEY_DECODE_ERROR\0" - "PUBLIC_KEY_ENCODE_ERROR\0" - "SHOULD_RETRY\0" - "SIGNATURE_ALGORITHM_MISMATCH\0" - "UNKNOWN_KEY_TYPE\0" - "UNKNOWN_PURPOSE_ID\0" - "UNKNOWN_TRUST_ID\0" - "WRONG_LOOKUP_TYPE\0" - "BAD_IP_ADDRESS\0" - "BAD_OBJECT\0" - "BN_DEC2BN_ERROR\0" - "BN_TO_ASN1_INTEGER_ERROR\0" - "CANNOT_FIND_FREE_FUNCTION\0" - "DIRNAME_ERROR\0" - "DISTPOINT_ALREADY_SET\0" - "DUPLICATE_ZONE_ID\0" - "ERROR_CONVERTING_ZONE\0" - "ERROR_CREATING_EXTENSION\0" - "ERROR_IN_EXTENSION\0" - "EXPECTED_A_SECTION_NAME\0" - "EXTENSION_EXISTS\0" - "EXTENSION_NAME_ERROR\0" - "EXTENSION_NOT_FOUND\0" - "EXTENSION_SETTING_NOT_SUPPORTED\0" - "EXTENSION_VALUE_ERROR\0" - "ILLEGAL_EMPTY_EXTENSION\0" - "ILLEGAL_HEX_DIGIT\0" - "INCORRECT_POLICY_SYNTAX_TAG\0" - "INVALID_BOOLEAN_STRING\0" - "INVALID_EXTENSION_STRING\0" - "INVALID_MULTIPLE_RDNS\0" - "INVALID_NAME\0" - "INVALID_NULL_ARGUMENT\0" - "INVALID_NULL_NAME\0" - "INVALID_NULL_VALUE\0" - "INVALID_NUMBERS\0" - "INVALID_OBJECT_IDENTIFIER\0" - "INVALID_OPTION\0" - "INVALID_POLICY_IDENTIFIER\0" - "INVALID_PROXY_POLICY_SETTING\0" - "INVALID_PURPOSE\0" - "INVALID_SECTION\0" - "INVALID_SYNTAX\0" - "ISSUER_DECODE_ERROR\0" - "NEED_ORGANIZATION_AND_NUMBERS\0" - "NO_CONFIG_DATABASE\0" - "NO_ISSUER_CERTIFICATE\0" - "NO_ISSUER_DETAILS\0" - "NO_POLICY_IDENTIFIER\0" - "NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED\0" - "NO_PUBLIC_KEY\0" - "NO_SUBJECT_DETAILS\0" - "ODD_NUMBER_OF_DIGITS\0" - "OPERATION_NOT_DEFINED\0" - "OTHERNAME_ERROR\0" - "POLICY_LANGUAGE_ALREADY_DEFINED\0" - "POLICY_PATH_LENGTH\0" - "POLICY_PATH_LENGTH_ALREADY_DEFINED\0" - "POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY\0" - "SECTION_NOT_FOUND\0" - "UNABLE_TO_GET_ISSUER_DETAILS\0" - "UNABLE_TO_GET_ISSUER_KEYID\0" - "UNKNOWN_BIT_STRING_ARGUMENT\0" - "UNKNOWN_EXTENSION\0" - "UNKNOWN_EXTENSION_NAME\0" - "UNKNOWN_OPTION\0" - "UNSUPPORTED_OPTION\0" - "USER_TOO_LONG\0" - ""; diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/chacha/chacha-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/chacha/chacha-armv8.S deleted file mode 100644 index b14466ddd7..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/chacha/chacha-armv8.S +++ /dev/null @@ -1,1982 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - - - -.section __TEXT,__const - -.align 5 -Lsigma: -.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral -Lone: -.long 1,0,0,0 -.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 - -.text - -.globl _ChaCha20_ctr32 -.private_extern _ChaCha20_ctr32 - -.align 5 -_ChaCha20_ctr32: - cbz x2,Labort -#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 - adrp x5,:pg_hi21_nc:_OPENSSL_armcap_P -#else - adrp x5,_OPENSSL_armcap_P@PAGE -#endif - cmp x2,#192 - b.lo Lshort - ldr w17,[x5,_OPENSSL_armcap_P@PAGEOFF] - tst w17,#ARMV7_NEON - b.ne ChaCha20_neon - -Lshort: - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - - adrp x5,Lsigma@PAGE - add x5,x5,Lsigma@PAGEOFF - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#64 - - ldp x22,x23,[x5] // load sigma - ldp x24,x25,[x3] // load key - ldp x26,x27,[x3,#16] - ldp x28,x30,[x4] // load counter -#ifdef __ARMEB__ - ror x24,x24,#32 - ror x25,x25,#32 - ror x26,x26,#32 - ror x27,x27,#32 - ror x28,x28,#32 - ror x30,x30,#32 -#endif - -Loop_outer: - mov w5,w22 // unpack key block - lsr x6,x22,#32 - mov w7,w23 - lsr x8,x23,#32 - mov w9,w24 - lsr x10,x24,#32 - mov w11,w25 - lsr x12,x25,#32 - mov w13,w26 - lsr x14,x26,#32 - mov w15,w27 - lsr x16,x27,#32 - mov w17,w28 - lsr x19,x28,#32 - mov w20,w30 - lsr x21,x30,#32 - - mov x4,#10 - subs x2,x2,#64 -Loop: - sub x4,x4,#1 - add w5,w5,w9 - add w6,w6,w10 - add w7,w7,w11 - add w8,w8,w12 - eor w17,w17,w5 - eor w19,w19,w6 - eor w20,w20,w7 - eor w21,w21,w8 - ror w17,w17,#16 - ror w19,w19,#16 - ror w20,w20,#16 - ror w21,w21,#16 - add w13,w13,w17 - add w14,w14,w19 - add w15,w15,w20 - add w16,w16,w21 - eor w9,w9,w13 - eor w10,w10,w14 - eor w11,w11,w15 - eor w12,w12,w16 - ror w9,w9,#20 - ror w10,w10,#20 - ror w11,w11,#20 - ror w12,w12,#20 - add w5,w5,w9 - add w6,w6,w10 - add w7,w7,w11 - add w8,w8,w12 - eor w17,w17,w5 - eor w19,w19,w6 - eor w20,w20,w7 - eor w21,w21,w8 - ror w17,w17,#24 - ror w19,w19,#24 - ror w20,w20,#24 - ror w21,w21,#24 - add w13,w13,w17 - add w14,w14,w19 - add w15,w15,w20 - add w16,w16,w21 - eor w9,w9,w13 - eor w10,w10,w14 - eor w11,w11,w15 - eor w12,w12,w16 - ror w9,w9,#25 - ror w10,w10,#25 - ror w11,w11,#25 - ror w12,w12,#25 - add w5,w5,w10 - add w6,w6,w11 - add w7,w7,w12 - add w8,w8,w9 - eor w21,w21,w5 - eor w17,w17,w6 - eor w19,w19,w7 - eor w20,w20,w8 - ror w21,w21,#16 - ror w17,w17,#16 - ror w19,w19,#16 - ror w20,w20,#16 - add w15,w15,w21 - add w16,w16,w17 - add w13,w13,w19 - add w14,w14,w20 - eor w10,w10,w15 - eor w11,w11,w16 - eor w12,w12,w13 - eor w9,w9,w14 - ror w10,w10,#20 - ror w11,w11,#20 - ror w12,w12,#20 - ror w9,w9,#20 - add w5,w5,w10 - add w6,w6,w11 - add w7,w7,w12 - add w8,w8,w9 - eor w21,w21,w5 - eor w17,w17,w6 - eor w19,w19,w7 - eor w20,w20,w8 - ror w21,w21,#24 - ror w17,w17,#24 - ror w19,w19,#24 - ror w20,w20,#24 - add w15,w15,w21 - add w16,w16,w17 - add w13,w13,w19 - add w14,w14,w20 - eor w10,w10,w15 - eor w11,w11,w16 - eor w12,w12,w13 - eor w9,w9,w14 - ror w10,w10,#25 - ror w11,w11,#25 - ror w12,w12,#25 - ror w9,w9,#25 - cbnz x4,Loop - - add w5,w5,w22 // accumulate key block - add x6,x6,x22,lsr#32 - add w7,w7,w23 - add x8,x8,x23,lsr#32 - add w9,w9,w24 - add x10,x10,x24,lsr#32 - add w11,w11,w25 - add x12,x12,x25,lsr#32 - add w13,w13,w26 - add x14,x14,x26,lsr#32 - add w15,w15,w27 - add x16,x16,x27,lsr#32 - add w17,w17,w28 - add x19,x19,x28,lsr#32 - add w20,w20,w30 - add x21,x21,x30,lsr#32 - - b.lo Ltail - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor x15,x15,x16 - eor x17,x17,x19 - eor x20,x20,x21 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#1 // increment counter - stp x9,x11,[x0,#16] - stp x13,x15,[x0,#32] - stp x17,x20,[x0,#48] - add x0,x0,#64 - - b.hi Loop_outer - - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 -Labort: - ret - -.align 4 -Ltail: - add x2,x2,#64 -Less_than_64: - sub x0,x0,#1 - add x1,x1,x2 - add x0,x0,x2 - add x4,sp,x2 - neg x2,x2 - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - stp x5,x7,[sp,#0] - stp x9,x11,[sp,#16] - stp x13,x15,[sp,#32] - stp x17,x20,[sp,#48] - -Loop_tail: - ldrb w10,[x1,x2] - ldrb w11,[x4,x2] - add x2,x2,#1 - eor w10,w10,w11 - strb w10,[x0,x2] - cbnz x2,Loop_tail - - stp xzr,xzr,[sp,#0] - stp xzr,xzr,[sp,#16] - stp xzr,xzr,[sp,#32] - stp xzr,xzr,[sp,#48] - - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret - - - -.align 5 -ChaCha20_neon: - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - - adrp x5,Lsigma@PAGE - add x5,x5,Lsigma@PAGEOFF - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - cmp x2,#512 - b.hs L512_or_more_neon - - sub sp,sp,#64 - - ldp x22,x23,[x5] // load sigma - ld1 {v24.4s},[x5],#16 - ldp x24,x25,[x3] // load key - ldp x26,x27,[x3,#16] - ld1 {v25.4s,v26.4s},[x3] - ldp x28,x30,[x4] // load counter - ld1 {v27.4s},[x4] - ld1 {v31.4s},[x5] -#ifdef __ARMEB__ - rev64 v24.4s,v24.4s - ror x24,x24,#32 - ror x25,x25,#32 - ror x26,x26,#32 - ror x27,x27,#32 - ror x28,x28,#32 - ror x30,x30,#32 -#endif - add v27.4s,v27.4s,v31.4s // += 1 - add v28.4s,v27.4s,v31.4s - add v29.4s,v28.4s,v31.4s - shl v31.4s,v31.4s,#2 // 1 -> 4 - -Loop_outer_neon: - mov w5,w22 // unpack key block - lsr x6,x22,#32 - mov v0.16b,v24.16b - mov w7,w23 - lsr x8,x23,#32 - mov v4.16b,v24.16b - mov w9,w24 - lsr x10,x24,#32 - mov v16.16b,v24.16b - mov w11,w25 - mov v1.16b,v25.16b - lsr x12,x25,#32 - mov v5.16b,v25.16b - mov w13,w26 - mov v17.16b,v25.16b - lsr x14,x26,#32 - mov v3.16b,v27.16b - mov w15,w27 - mov v7.16b,v28.16b - lsr x16,x27,#32 - mov v19.16b,v29.16b - mov w17,w28 - mov v2.16b,v26.16b - lsr x19,x28,#32 - mov v6.16b,v26.16b - mov w20,w30 - mov v18.16b,v26.16b - lsr x21,x30,#32 - - mov x4,#10 - subs x2,x2,#256 -Loop_neon: - sub x4,x4,#1 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v16.4s,v16.4s,v17.4s - add w7,w7,w11 - eor v3.16b,v3.16b,v0.16b - add w8,w8,w12 - eor v7.16b,v7.16b,v4.16b - eor w17,w17,w5 - eor v19.16b,v19.16b,v16.16b - eor w19,w19,w6 - rev32 v3.8h,v3.8h - eor w20,w20,w7 - rev32 v7.8h,v7.8h - eor w21,w21,w8 - rev32 v19.8h,v19.8h - ror w17,w17,#16 - add v2.4s,v2.4s,v3.4s - ror w19,w19,#16 - add v6.4s,v6.4s,v7.4s - ror w20,w20,#16 - add v18.4s,v18.4s,v19.4s - ror w21,w21,#16 - eor v20.16b,v1.16b,v2.16b - add w13,w13,w17 - eor v21.16b,v5.16b,v6.16b - add w14,w14,w19 - eor v22.16b,v17.16b,v18.16b - add w15,w15,w20 - ushr v1.4s,v20.4s,#20 - add w16,w16,w21 - ushr v5.4s,v21.4s,#20 - eor w9,w9,w13 - ushr v17.4s,v22.4s,#20 - eor w10,w10,w14 - sli v1.4s,v20.4s,#12 - eor w11,w11,w15 - sli v5.4s,v21.4s,#12 - eor w12,w12,w16 - sli v17.4s,v22.4s,#12 - ror w9,w9,#20 - add v0.4s,v0.4s,v1.4s - ror w10,w10,#20 - add v4.4s,v4.4s,v5.4s - ror w11,w11,#20 - add v16.4s,v16.4s,v17.4s - ror w12,w12,#20 - eor v20.16b,v3.16b,v0.16b - add w5,w5,w9 - eor v21.16b,v7.16b,v4.16b - add w6,w6,w10 - eor v22.16b,v19.16b,v16.16b - add w7,w7,w11 - ushr v3.4s,v20.4s,#24 - add w8,w8,w12 - ushr v7.4s,v21.4s,#24 - eor w17,w17,w5 - ushr v19.4s,v22.4s,#24 - eor w19,w19,w6 - sli v3.4s,v20.4s,#8 - eor w20,w20,w7 - sli v7.4s,v21.4s,#8 - eor w21,w21,w8 - sli v19.4s,v22.4s,#8 - ror w17,w17,#24 - add v2.4s,v2.4s,v3.4s - ror w19,w19,#24 - add v6.4s,v6.4s,v7.4s - ror w20,w20,#24 - add v18.4s,v18.4s,v19.4s - ror w21,w21,#24 - eor v20.16b,v1.16b,v2.16b - add w13,w13,w17 - eor v21.16b,v5.16b,v6.16b - add w14,w14,w19 - eor v22.16b,v17.16b,v18.16b - add w15,w15,w20 - ushr v1.4s,v20.4s,#25 - add w16,w16,w21 - ushr v5.4s,v21.4s,#25 - eor w9,w9,w13 - ushr v17.4s,v22.4s,#25 - eor w10,w10,w14 - sli v1.4s,v20.4s,#7 - eor w11,w11,w15 - sli v5.4s,v21.4s,#7 - eor w12,w12,w16 - sli v17.4s,v22.4s,#7 - ror w9,w9,#25 - ext v2.16b,v2.16b,v2.16b,#8 - ror w10,w10,#25 - ext v6.16b,v6.16b,v6.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v3.16b,v3.16b,v3.16b,#12 - ext v7.16b,v7.16b,v7.16b,#12 - ext v19.16b,v19.16b,v19.16b,#12 - ext v1.16b,v1.16b,v1.16b,#4 - ext v5.16b,v5.16b,v5.16b,#4 - ext v17.16b,v17.16b,v17.16b,#4 - add v0.4s,v0.4s,v1.4s - add w5,w5,w10 - add v4.4s,v4.4s,v5.4s - add w6,w6,w11 - add v16.4s,v16.4s,v17.4s - add w7,w7,w12 - eor v3.16b,v3.16b,v0.16b - add w8,w8,w9 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w5 - eor v19.16b,v19.16b,v16.16b - eor w17,w17,w6 - rev32 v3.8h,v3.8h - eor w19,w19,w7 - rev32 v7.8h,v7.8h - eor w20,w20,w8 - rev32 v19.8h,v19.8h - ror w21,w21,#16 - add v2.4s,v2.4s,v3.4s - ror w17,w17,#16 - add v6.4s,v6.4s,v7.4s - ror w19,w19,#16 - add v18.4s,v18.4s,v19.4s - ror w20,w20,#16 - eor v20.16b,v1.16b,v2.16b - add w15,w15,w21 - eor v21.16b,v5.16b,v6.16b - add w16,w16,w17 - eor v22.16b,v17.16b,v18.16b - add w13,w13,w19 - ushr v1.4s,v20.4s,#20 - add w14,w14,w20 - ushr v5.4s,v21.4s,#20 - eor w10,w10,w15 - ushr v17.4s,v22.4s,#20 - eor w11,w11,w16 - sli v1.4s,v20.4s,#12 - eor w12,w12,w13 - sli v5.4s,v21.4s,#12 - eor w9,w9,w14 - sli v17.4s,v22.4s,#12 - ror w10,w10,#20 - add v0.4s,v0.4s,v1.4s - ror w11,w11,#20 - add v4.4s,v4.4s,v5.4s - ror w12,w12,#20 - add v16.4s,v16.4s,v17.4s - ror w9,w9,#20 - eor v20.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v21.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v22.16b,v19.16b,v16.16b - add w7,w7,w12 - ushr v3.4s,v20.4s,#24 - add w8,w8,w9 - ushr v7.4s,v21.4s,#24 - eor w21,w21,w5 - ushr v19.4s,v22.4s,#24 - eor w17,w17,w6 - sli v3.4s,v20.4s,#8 - eor w19,w19,w7 - sli v7.4s,v21.4s,#8 - eor w20,w20,w8 - sli v19.4s,v22.4s,#8 - ror w21,w21,#24 - add v2.4s,v2.4s,v3.4s - ror w17,w17,#24 - add v6.4s,v6.4s,v7.4s - ror w19,w19,#24 - add v18.4s,v18.4s,v19.4s - ror w20,w20,#24 - eor v20.16b,v1.16b,v2.16b - add w15,w15,w21 - eor v21.16b,v5.16b,v6.16b - add w16,w16,w17 - eor v22.16b,v17.16b,v18.16b - add w13,w13,w19 - ushr v1.4s,v20.4s,#25 - add w14,w14,w20 - ushr v5.4s,v21.4s,#25 - eor w10,w10,w15 - ushr v17.4s,v22.4s,#25 - eor w11,w11,w16 - sli v1.4s,v20.4s,#7 - eor w12,w12,w13 - sli v5.4s,v21.4s,#7 - eor w9,w9,w14 - sli v17.4s,v22.4s,#7 - ror w10,w10,#25 - ext v2.16b,v2.16b,v2.16b,#8 - ror w11,w11,#25 - ext v6.16b,v6.16b,v6.16b,#8 - ror w12,w12,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#4 - ext v7.16b,v7.16b,v7.16b,#4 - ext v19.16b,v19.16b,v19.16b,#4 - ext v1.16b,v1.16b,v1.16b,#12 - ext v5.16b,v5.16b,v5.16b,#12 - ext v17.16b,v17.16b,v17.16b,#12 - cbnz x4,Loop_neon - - add w5,w5,w22 // accumulate key block - add v0.4s,v0.4s,v24.4s - add x6,x6,x22,lsr#32 - add v4.4s,v4.4s,v24.4s - add w7,w7,w23 - add v16.4s,v16.4s,v24.4s - add x8,x8,x23,lsr#32 - add v2.4s,v2.4s,v26.4s - add w9,w9,w24 - add v6.4s,v6.4s,v26.4s - add x10,x10,x24,lsr#32 - add v18.4s,v18.4s,v26.4s - add w11,w11,w25 - add v3.4s,v3.4s,v27.4s - add x12,x12,x25,lsr#32 - add w13,w13,w26 - add v7.4s,v7.4s,v28.4s - add x14,x14,x26,lsr#32 - add w15,w15,w27 - add v19.4s,v19.4s,v29.4s - add x16,x16,x27,lsr#32 - add w17,w17,w28 - add v1.4s,v1.4s,v25.4s - add x19,x19,x28,lsr#32 - add w20,w20,w30 - add v5.4s,v5.4s,v25.4s - add x21,x21,x30,lsr#32 - add v17.4s,v17.4s,v25.4s - - b.lo Ltail_neon - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor v0.16b,v0.16b,v20.16b - eor x15,x15,x16 - eor v1.16b,v1.16b,v21.16b - eor x17,x17,x19 - eor v2.16b,v2.16b,v22.16b - eor x20,x20,x21 - eor v3.16b,v3.16b,v23.16b - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#4 // increment counter - stp x9,x11,[x0,#16] - add v27.4s,v27.4s,v31.4s // += 4 - stp x13,x15,[x0,#32] - add v28.4s,v28.4s,v31.4s - stp x17,x20,[x0,#48] - add v29.4s,v29.4s,v31.4s - add x0,x0,#64 - - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 - ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 - - eor v4.16b,v4.16b,v20.16b - eor v5.16b,v5.16b,v21.16b - eor v6.16b,v6.16b,v22.16b - eor v7.16b,v7.16b,v23.16b - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 - - eor v16.16b,v16.16b,v0.16b - eor v17.16b,v17.16b,v1.16b - eor v18.16b,v18.16b,v2.16b - eor v19.16b,v19.16b,v3.16b - st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 - - b.hi Loop_outer_neon - - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret - -Ltail_neon: - add x2,x2,#256 - cmp x2,#64 - b.lo Less_than_64 - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor x15,x15,x16 - eor x17,x17,x19 - eor x20,x20,x21 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#4 // increment counter - stp x9,x11,[x0,#16] - stp x13,x15,[x0,#32] - stp x17,x20,[x0,#48] - add x0,x0,#64 - b.eq Ldone_neon - sub x2,x2,#64 - cmp x2,#64 - b.lo Less_than_128 - - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - eor v0.16b,v0.16b,v20.16b - eor v1.16b,v1.16b,v21.16b - eor v2.16b,v2.16b,v22.16b - eor v3.16b,v3.16b,v23.16b - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 - b.eq Ldone_neon - sub x2,x2,#64 - cmp x2,#64 - b.lo Less_than_192 - - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - eor v4.16b,v4.16b,v20.16b - eor v5.16b,v5.16b,v21.16b - eor v6.16b,v6.16b,v22.16b - eor v7.16b,v7.16b,v23.16b - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 - b.eq Ldone_neon - sub x2,x2,#64 - - st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] - b Last_neon - -Less_than_128: - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] - b Last_neon -Less_than_192: - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] - b Last_neon - -.align 4 -Last_neon: - sub x0,x0,#1 - add x1,x1,x2 - add x0,x0,x2 - add x4,sp,x2 - neg x2,x2 - -Loop_tail_neon: - ldrb w10,[x1,x2] - ldrb w11,[x4,x2] - add x2,x2,#1 - eor w10,w10,w11 - strb w10,[x0,x2] - cbnz x2,Loop_tail_neon - - stp xzr,xzr,[sp,#0] - stp xzr,xzr,[sp,#16] - stp xzr,xzr,[sp,#32] - stp xzr,xzr,[sp,#48] - -Ldone_neon: - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret - - -.align 5 -ChaCha20_512_neon: - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - - adrp x5,Lsigma@PAGE - add x5,x5,Lsigma@PAGEOFF - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - -L512_or_more_neon: - sub sp,sp,#128+64 - - ldp x22,x23,[x5] // load sigma - ld1 {v24.4s},[x5],#16 - ldp x24,x25,[x3] // load key - ldp x26,x27,[x3,#16] - ld1 {v25.4s,v26.4s},[x3] - ldp x28,x30,[x4] // load counter - ld1 {v27.4s},[x4] - ld1 {v31.4s},[x5] -#ifdef __ARMEB__ - rev64 v24.4s,v24.4s - ror x24,x24,#32 - ror x25,x25,#32 - ror x26,x26,#32 - ror x27,x27,#32 - ror x28,x28,#32 - ror x30,x30,#32 -#endif - add v27.4s,v27.4s,v31.4s // += 1 - stp q24,q25,[sp,#0] // off-load key block, invariant part - add v27.4s,v27.4s,v31.4s // not typo - str q26,[sp,#32] - add v28.4s,v27.4s,v31.4s - add v29.4s,v28.4s,v31.4s - add v30.4s,v29.4s,v31.4s - shl v31.4s,v31.4s,#2 // 1 -> 4 - - stp d8,d9,[sp,#128+0] // meet ABI requirements - stp d10,d11,[sp,#128+16] - stp d12,d13,[sp,#128+32] - stp d14,d15,[sp,#128+48] - - sub x2,x2,#512 // not typo - -Loop_outer_512_neon: - mov v0.16b,v24.16b - mov v4.16b,v24.16b - mov v8.16b,v24.16b - mov v12.16b,v24.16b - mov v16.16b,v24.16b - mov v20.16b,v24.16b - mov v1.16b,v25.16b - mov w5,w22 // unpack key block - mov v5.16b,v25.16b - lsr x6,x22,#32 - mov v9.16b,v25.16b - mov w7,w23 - mov v13.16b,v25.16b - lsr x8,x23,#32 - mov v17.16b,v25.16b - mov w9,w24 - mov v21.16b,v25.16b - lsr x10,x24,#32 - mov v3.16b,v27.16b - mov w11,w25 - mov v7.16b,v28.16b - lsr x12,x25,#32 - mov v11.16b,v29.16b - mov w13,w26 - mov v15.16b,v30.16b - lsr x14,x26,#32 - mov v2.16b,v26.16b - mov w15,w27 - mov v6.16b,v26.16b - lsr x16,x27,#32 - add v19.4s,v3.4s,v31.4s // +4 - mov w17,w28 - add v23.4s,v7.4s,v31.4s // +4 - lsr x19,x28,#32 - mov v10.16b,v26.16b - mov w20,w30 - mov v14.16b,v26.16b - lsr x21,x30,#32 - mov v18.16b,v26.16b - stp q27,q28,[sp,#48] // off-load key block, variable part - mov v22.16b,v26.16b - str q29,[sp,#80] - - mov x4,#5 - subs x2,x2,#512 -Loop_upper_neon: - sub x4,x4,#1 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#12 - ext v7.16b,v7.16b,v7.16b,#12 - ext v11.16b,v11.16b,v11.16b,#12 - ext v15.16b,v15.16b,v15.16b,#12 - ext v19.16b,v19.16b,v19.16b,#12 - ext v23.16b,v23.16b,v23.16b,#12 - ext v1.16b,v1.16b,v1.16b,#4 - ext v5.16b,v5.16b,v5.16b,#4 - ext v9.16b,v9.16b,v9.16b,#4 - ext v13.16b,v13.16b,v13.16b,#4 - ext v17.16b,v17.16b,v17.16b,#4 - ext v21.16b,v21.16b,v21.16b,#4 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#4 - ext v7.16b,v7.16b,v7.16b,#4 - ext v11.16b,v11.16b,v11.16b,#4 - ext v15.16b,v15.16b,v15.16b,#4 - ext v19.16b,v19.16b,v19.16b,#4 - ext v23.16b,v23.16b,v23.16b,#4 - ext v1.16b,v1.16b,v1.16b,#12 - ext v5.16b,v5.16b,v5.16b,#12 - ext v9.16b,v9.16b,v9.16b,#12 - ext v13.16b,v13.16b,v13.16b,#12 - ext v17.16b,v17.16b,v17.16b,#12 - ext v21.16b,v21.16b,v21.16b,#12 - cbnz x4,Loop_upper_neon - - add w5,w5,w22 // accumulate key block - add x6,x6,x22,lsr#32 - add w7,w7,w23 - add x8,x8,x23,lsr#32 - add w9,w9,w24 - add x10,x10,x24,lsr#32 - add w11,w11,w25 - add x12,x12,x25,lsr#32 - add w13,w13,w26 - add x14,x14,x26,lsr#32 - add w15,w15,w27 - add x16,x16,x27,lsr#32 - add w17,w17,w28 - add x19,x19,x28,lsr#32 - add w20,w20,w30 - add x21,x21,x30,lsr#32 - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor x15,x15,x16 - eor x17,x17,x19 - eor x20,x20,x21 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#1 // increment counter - mov w5,w22 // unpack key block - lsr x6,x22,#32 - stp x9,x11,[x0,#16] - mov w7,w23 - lsr x8,x23,#32 - stp x13,x15,[x0,#32] - mov w9,w24 - lsr x10,x24,#32 - stp x17,x20,[x0,#48] - add x0,x0,#64 - mov w11,w25 - lsr x12,x25,#32 - mov w13,w26 - lsr x14,x26,#32 - mov w15,w27 - lsr x16,x27,#32 - mov w17,w28 - lsr x19,x28,#32 - mov w20,w30 - lsr x21,x30,#32 - - mov x4,#5 -Loop_lower_neon: - sub x4,x4,#1 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#12 - ext v7.16b,v7.16b,v7.16b,#12 - ext v11.16b,v11.16b,v11.16b,#12 - ext v15.16b,v15.16b,v15.16b,#12 - ext v19.16b,v19.16b,v19.16b,#12 - ext v23.16b,v23.16b,v23.16b,#12 - ext v1.16b,v1.16b,v1.16b,#4 - ext v5.16b,v5.16b,v5.16b,#4 - ext v9.16b,v9.16b,v9.16b,#4 - ext v13.16b,v13.16b,v13.16b,#4 - ext v17.16b,v17.16b,v17.16b,#4 - ext v21.16b,v21.16b,v21.16b,#4 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#4 - ext v7.16b,v7.16b,v7.16b,#4 - ext v11.16b,v11.16b,v11.16b,#4 - ext v15.16b,v15.16b,v15.16b,#4 - ext v19.16b,v19.16b,v19.16b,#4 - ext v23.16b,v23.16b,v23.16b,#4 - ext v1.16b,v1.16b,v1.16b,#12 - ext v5.16b,v5.16b,v5.16b,#12 - ext v9.16b,v9.16b,v9.16b,#12 - ext v13.16b,v13.16b,v13.16b,#12 - ext v17.16b,v17.16b,v17.16b,#12 - ext v21.16b,v21.16b,v21.16b,#12 - cbnz x4,Loop_lower_neon - - add w5,w5,w22 // accumulate key block - ldp q24,q25,[sp,#0] - add x6,x6,x22,lsr#32 - ldp q26,q27,[sp,#32] - add w7,w7,w23 - ldp q28,q29,[sp,#64] - add x8,x8,x23,lsr#32 - add v0.4s,v0.4s,v24.4s - add w9,w9,w24 - add v4.4s,v4.4s,v24.4s - add x10,x10,x24,lsr#32 - add v8.4s,v8.4s,v24.4s - add w11,w11,w25 - add v12.4s,v12.4s,v24.4s - add x12,x12,x25,lsr#32 - add v16.4s,v16.4s,v24.4s - add w13,w13,w26 - add v20.4s,v20.4s,v24.4s - add x14,x14,x26,lsr#32 - add v2.4s,v2.4s,v26.4s - add w15,w15,w27 - add v6.4s,v6.4s,v26.4s - add x16,x16,x27,lsr#32 - add v10.4s,v10.4s,v26.4s - add w17,w17,w28 - add v14.4s,v14.4s,v26.4s - add x19,x19,x28,lsr#32 - add v18.4s,v18.4s,v26.4s - add w20,w20,w30 - add v22.4s,v22.4s,v26.4s - add x21,x21,x30,lsr#32 - add v19.4s,v19.4s,v31.4s // +4 - add x5,x5,x6,lsl#32 // pack - add v23.4s,v23.4s,v31.4s // +4 - add x7,x7,x8,lsl#32 - add v3.4s,v3.4s,v27.4s - ldp x6,x8,[x1,#0] // load input - add v7.4s,v7.4s,v28.4s - add x9,x9,x10,lsl#32 - add v11.4s,v11.4s,v29.4s - add x11,x11,x12,lsl#32 - add v15.4s,v15.4s,v30.4s - ldp x10,x12,[x1,#16] - add v19.4s,v19.4s,v27.4s - add x13,x13,x14,lsl#32 - add v23.4s,v23.4s,v28.4s - add x15,x15,x16,lsl#32 - add v1.4s,v1.4s,v25.4s - ldp x14,x16,[x1,#32] - add v5.4s,v5.4s,v25.4s - add x17,x17,x19,lsl#32 - add v9.4s,v9.4s,v25.4s - add x20,x20,x21,lsl#32 - add v13.4s,v13.4s,v25.4s - ldp x19,x21,[x1,#48] - add v17.4s,v17.4s,v25.4s - add x1,x1,#64 - add v21.4s,v21.4s,v25.4s - -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor v0.16b,v0.16b,v24.16b - eor x15,x15,x16 - eor v1.16b,v1.16b,v25.16b - eor x17,x17,x19 - eor v2.16b,v2.16b,v26.16b - eor x20,x20,x21 - eor v3.16b,v3.16b,v27.16b - ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#7 // increment counter - stp x9,x11,[x0,#16] - stp x13,x15,[x0,#32] - stp x17,x20,[x0,#48] - add x0,x0,#64 - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 - - ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 - eor v4.16b,v4.16b,v24.16b - eor v5.16b,v5.16b,v25.16b - eor v6.16b,v6.16b,v26.16b - eor v7.16b,v7.16b,v27.16b - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 - - ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 - eor v8.16b,v8.16b,v0.16b - ldp q24,q25,[sp,#0] - eor v9.16b,v9.16b,v1.16b - ldp q26,q27,[sp,#32] - eor v10.16b,v10.16b,v2.16b - eor v11.16b,v11.16b,v3.16b - st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 - - ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 - eor v12.16b,v12.16b,v4.16b - eor v13.16b,v13.16b,v5.16b - eor v14.16b,v14.16b,v6.16b - eor v15.16b,v15.16b,v7.16b - st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 - - ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 - eor v16.16b,v16.16b,v8.16b - eor v17.16b,v17.16b,v9.16b - eor v18.16b,v18.16b,v10.16b - eor v19.16b,v19.16b,v11.16b - st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 - - shl v0.4s,v31.4s,#1 // 4 -> 8 - eor v20.16b,v20.16b,v12.16b - eor v21.16b,v21.16b,v13.16b - eor v22.16b,v22.16b,v14.16b - eor v23.16b,v23.16b,v15.16b - st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 - - add v27.4s,v27.4s,v0.4s // += 8 - add v28.4s,v28.4s,v0.4s - add v29.4s,v29.4s,v0.4s - add v30.4s,v30.4s,v0.4s - - b.hs Loop_outer_512_neon - - adds x2,x2,#512 - ushr v0.4s,v31.4s,#2 // 4 -> 1 - - ldp d8,d9,[sp,#128+0] // meet ABI requirements - ldp d10,d11,[sp,#128+16] - ldp d12,d13,[sp,#128+32] - ldp d14,d15,[sp,#128+48] - - stp q24,q31,[sp,#0] // wipe off-load area - stp q24,q31,[sp,#32] - stp q24,q31,[sp,#64] - - b.eq Ldone_512_neon - - cmp x2,#192 - sub v27.4s,v27.4s,v0.4s // -= 1 - sub v28.4s,v28.4s,v0.4s - sub v29.4s,v29.4s,v0.4s - add sp,sp,#128 - b.hs Loop_outer_neon - - eor v25.16b,v25.16b,v25.16b - eor v26.16b,v26.16b,v26.16b - eor v27.16b,v27.16b,v27.16b - eor v28.16b,v28.16b,v28.16b - eor v29.16b,v29.16b,v29.16b - eor v30.16b,v30.16b,v30.16b - b Loop_outer - -Ldone_512_neon: - ldp x19,x20,[x29,#16] - add sp,sp,#128+64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret - -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/aesv8-armx64.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/aesv8-armx64.S deleted file mode 100644 index dc2d6e432c..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/aesv8-armx64.S +++ /dev/null @@ -1,772 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -#if __ARM_MAX_ARCH__>=7 -.text - -.section __TEXT,__const -.align 5 -Lrcon: -.long 0x01,0x01,0x01,0x01 -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat -.long 0x1b,0x1b,0x1b,0x1b - -.text - -.globl _aes_hw_set_encrypt_key -.private_extern _aes_hw_set_encrypt_key - -.align 5 -_aes_hw_set_encrypt_key: -Lenc_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - mov x3,#-1 - cmp x0,#0 - b.eq Lenc_key_abort - cmp x2,#0 - b.eq Lenc_key_abort - mov x3,#-2 - cmp w1,#128 - b.lt Lenc_key_abort - cmp w1,#256 - b.gt Lenc_key_abort - tst w1,#0x3f - b.ne Lenc_key_abort - - adrp x3,Lrcon@PAGE - add x3,x3,Lrcon@PAGEOFF - cmp w1,#192 - - eor v0.16b,v0.16b,v0.16b - ld1 {v3.16b},[x0],#16 - mov w1,#8 // reuse w1 - ld1 {v1.4s,v2.4s},[x3],#32 - - b.lt Loop128 - b.eq L192 - b L256 - -.align 4 -Loop128: - tbl v6.16b,{v3.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v3.4s},[x2],#16 - aese v6.16b,v0.16b - subs w1,w1,#1 - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - shl v1.16b,v1.16b,#1 - eor v3.16b,v3.16b,v6.16b - b.ne Loop128 - - ld1 {v1.4s},[x3] - - tbl v6.16b,{v3.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v3.4s},[x2],#16 - aese v6.16b,v0.16b - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - shl v1.16b,v1.16b,#1 - eor v3.16b,v3.16b,v6.16b - - tbl v6.16b,{v3.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v3.4s},[x2],#16 - aese v6.16b,v0.16b - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - eor v3.16b,v3.16b,v6.16b - st1 {v3.4s},[x2] - add x2,x2,#0x50 - - mov w12,#10 - b Ldone - -.align 4 -L192: - ld1 {v4.8b},[x0],#8 - movi v6.16b,#8 // borrow v6.16b - st1 {v3.4s},[x2],#16 - sub v2.16b,v2.16b,v6.16b // adjust the mask - -Loop192: - tbl v6.16b,{v4.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v4.8b},[x2],#8 - aese v6.16b,v0.16b - subs w1,w1,#1 - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - - dup v5.4s,v3.s[3] - eor v5.16b,v5.16b,v4.16b - eor v6.16b,v6.16b,v1.16b - ext v4.16b,v0.16b,v4.16b,#12 - shl v1.16b,v1.16b,#1 - eor v4.16b,v4.16b,v5.16b - eor v3.16b,v3.16b,v6.16b - eor v4.16b,v4.16b,v6.16b - st1 {v3.4s},[x2],#16 - b.ne Loop192 - - mov w12,#12 - add x2,x2,#0x20 - b Ldone - -.align 4 -L256: - ld1 {v4.16b},[x0] - mov w1,#7 - mov w12,#14 - st1 {v3.4s},[x2],#16 - -Loop256: - tbl v6.16b,{v4.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v4.4s},[x2],#16 - aese v6.16b,v0.16b - subs w1,w1,#1 - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - shl v1.16b,v1.16b,#1 - eor v3.16b,v3.16b,v6.16b - st1 {v3.4s},[x2],#16 - b.eq Ldone - - dup v6.4s,v3.s[3] // just splat - ext v5.16b,v0.16b,v4.16b,#12 - aese v6.16b,v0.16b - - eor v4.16b,v4.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v4.16b,v4.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v4.16b,v4.16b,v5.16b - - eor v4.16b,v4.16b,v6.16b - b Loop256 - -Ldone: - str w12,[x2] - mov x3,#0 - -Lenc_key_abort: - mov x0,x3 // return value - ldr x29,[sp],#16 - ret - - -.globl _aes_hw_set_decrypt_key -.private_extern _aes_hw_set_decrypt_key - -.align 5 -_aes_hw_set_decrypt_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - bl Lenc_key - - cmp x0,#0 - b.ne Ldec_key_abort - - sub x2,x2,#240 // restore original x2 - mov x4,#-16 - add x0,x2,x12,lsl#4 // end of key schedule - - ld1 {v0.4s},[x2] - ld1 {v1.4s},[x0] - st1 {v0.4s},[x0],x4 - st1 {v1.4s},[x2],#16 - -Loop_imc: - ld1 {v0.4s},[x2] - ld1 {v1.4s},[x0] - aesimc v0.16b,v0.16b - aesimc v1.16b,v1.16b - st1 {v0.4s},[x0],x4 - st1 {v1.4s},[x2],#16 - cmp x0,x2 - b.hi Loop_imc - - ld1 {v0.4s},[x2] - aesimc v0.16b,v0.16b - st1 {v0.4s},[x0] - - eor x0,x0,x0 // return value -Ldec_key_abort: - ldp x29,x30,[sp],#16 - ret - -.globl _aes_hw_encrypt -.private_extern _aes_hw_encrypt - -.align 5 -_aes_hw_encrypt: - ldr w3,[x2,#240] - ld1 {v0.4s},[x2],#16 - ld1 {v2.16b},[x0] - sub w3,w3,#2 - ld1 {v1.4s},[x2],#16 - -Loop_enc: - aese v2.16b,v0.16b - aesmc v2.16b,v2.16b - ld1 {v0.4s},[x2],#16 - subs w3,w3,#2 - aese v2.16b,v1.16b - aesmc v2.16b,v2.16b - ld1 {v1.4s},[x2],#16 - b.gt Loop_enc - - aese v2.16b,v0.16b - aesmc v2.16b,v2.16b - ld1 {v0.4s},[x2] - aese v2.16b,v1.16b - eor v2.16b,v2.16b,v0.16b - - st1 {v2.16b},[x1] - ret - -.globl _aes_hw_decrypt -.private_extern _aes_hw_decrypt - -.align 5 -_aes_hw_decrypt: - ldr w3,[x2,#240] - ld1 {v0.4s},[x2],#16 - ld1 {v2.16b},[x0] - sub w3,w3,#2 - ld1 {v1.4s},[x2],#16 - -Loop_dec: - aesd v2.16b,v0.16b - aesimc v2.16b,v2.16b - ld1 {v0.4s},[x2],#16 - subs w3,w3,#2 - aesd v2.16b,v1.16b - aesimc v2.16b,v2.16b - ld1 {v1.4s},[x2],#16 - b.gt Loop_dec - - aesd v2.16b,v0.16b - aesimc v2.16b,v2.16b - ld1 {v0.4s},[x2] - aesd v2.16b,v1.16b - eor v2.16b,v2.16b,v0.16b - - st1 {v2.16b},[x1] - ret - -.globl _aes_hw_cbc_encrypt -.private_extern _aes_hw_cbc_encrypt - -.align 5 -_aes_hw_cbc_encrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - subs x2,x2,#16 - mov x8,#16 - b.lo Lcbc_abort - csel x8,xzr,x8,eq - - cmp w5,#0 // en- or decrypting? - ldr w5,[x3,#240] - and x2,x2,#-16 - ld1 {v6.16b},[x4] - ld1 {v0.16b},[x0],x8 - - ld1 {v16.4s,v17.4s},[x3] // load key schedule... - sub w5,w5,#6 - add x7,x3,x5,lsl#4 // pointer to last 7 round keys - sub w5,w5,#2 - ld1 {v18.4s,v19.4s},[x7],#32 - ld1 {v20.4s,v21.4s},[x7],#32 - ld1 {v22.4s,v23.4s},[x7],#32 - ld1 {v7.4s},[x7] - - add x7,x3,#32 - mov w6,w5 - b.eq Lcbc_dec - - cmp w5,#2 - eor v0.16b,v0.16b,v6.16b - eor v5.16b,v16.16b,v7.16b - b.eq Lcbc_enc128 - - ld1 {v2.4s,v3.4s},[x7] - add x7,x3,#16 - add x6,x3,#16*4 - add x12,x3,#16*5 - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - add x14,x3,#16*6 - add x3,x3,#16*7 - b Lenter_cbc_enc - -.align 4 -Loop_cbc_enc: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - st1 {v6.16b},[x1],#16 -Lenter_cbc_enc: - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v0.16b,v2.16b - aesmc v0.16b,v0.16b - ld1 {v16.4s},[x6] - cmp w5,#4 - aese v0.16b,v3.16b - aesmc v0.16b,v0.16b - ld1 {v17.4s},[x12] - b.eq Lcbc_enc192 - - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - ld1 {v16.4s},[x14] - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - ld1 {v17.4s},[x3] - nop - -Lcbc_enc192: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - subs x2,x2,#16 - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - csel x8,xzr,x8,eq - aese v0.16b,v18.16b - aesmc v0.16b,v0.16b - aese v0.16b,v19.16b - aesmc v0.16b,v0.16b - ld1 {v16.16b},[x0],x8 - aese v0.16b,v20.16b - aesmc v0.16b,v0.16b - eor v16.16b,v16.16b,v5.16b - aese v0.16b,v21.16b - aesmc v0.16b,v0.16b - ld1 {v17.4s},[x7] // re-pre-load rndkey[1] - aese v0.16b,v22.16b - aesmc v0.16b,v0.16b - aese v0.16b,v23.16b - eor v6.16b,v0.16b,v7.16b - b.hs Loop_cbc_enc - - st1 {v6.16b},[x1],#16 - b Lcbc_done - -.align 5 -Lcbc_enc128: - ld1 {v2.4s,v3.4s},[x7] - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - b Lenter_cbc_enc128 -Loop_cbc_enc128: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - st1 {v6.16b},[x1],#16 -Lenter_cbc_enc128: - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - subs x2,x2,#16 - aese v0.16b,v2.16b - aesmc v0.16b,v0.16b - csel x8,xzr,x8,eq - aese v0.16b,v3.16b - aesmc v0.16b,v0.16b - aese v0.16b,v18.16b - aesmc v0.16b,v0.16b - aese v0.16b,v19.16b - aesmc v0.16b,v0.16b - ld1 {v16.16b},[x0],x8 - aese v0.16b,v20.16b - aesmc v0.16b,v0.16b - aese v0.16b,v21.16b - aesmc v0.16b,v0.16b - aese v0.16b,v22.16b - aesmc v0.16b,v0.16b - eor v16.16b,v16.16b,v5.16b - aese v0.16b,v23.16b - eor v6.16b,v0.16b,v7.16b - b.hs Loop_cbc_enc128 - - st1 {v6.16b},[x1],#16 - b Lcbc_done -.align 5 -Lcbc_dec: - ld1 {v18.16b},[x0],#16 - subs x2,x2,#32 // bias - add w6,w5,#2 - orr v3.16b,v0.16b,v0.16b - orr v1.16b,v0.16b,v0.16b - orr v19.16b,v18.16b,v18.16b - b.lo Lcbc_dec_tail - - orr v1.16b,v18.16b,v18.16b - ld1 {v18.16b},[x0],#16 - orr v2.16b,v0.16b,v0.16b - orr v3.16b,v1.16b,v1.16b - orr v19.16b,v18.16b,v18.16b - -Loop3x_cbc_dec: - aesd v0.16b,v16.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aesd v0.16b,v17.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - ld1 {v17.4s},[x7],#16 - b.gt Loop3x_cbc_dec - - aesd v0.16b,v16.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - eor v4.16b,v6.16b,v7.16b - subs x2,x2,#0x30 - eor v5.16b,v2.16b,v7.16b - csel x6,x2,x6,lo // x6, w6, is zero at this point - aesd v0.16b,v17.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - eor v17.16b,v3.16b,v7.16b - add x0,x0,x6 // x0 is adjusted in such way that - // at exit from the loop v1.16b-v18.16b - // are loaded with last "words" - orr v6.16b,v19.16b,v19.16b - mov x7,x3 - aesd v0.16b,v20.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v20.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v20.16b - aesimc v18.16b,v18.16b - ld1 {v2.16b},[x0],#16 - aesd v0.16b,v21.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v21.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v21.16b - aesimc v18.16b,v18.16b - ld1 {v3.16b},[x0],#16 - aesd v0.16b,v22.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v22.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v22.16b - aesimc v18.16b,v18.16b - ld1 {v19.16b},[x0],#16 - aesd v0.16b,v23.16b - aesd v1.16b,v23.16b - aesd v18.16b,v23.16b - ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] - add w6,w5,#2 - eor v4.16b,v4.16b,v0.16b - eor v5.16b,v5.16b,v1.16b - eor v18.16b,v18.16b,v17.16b - ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] - st1 {v4.16b},[x1],#16 - orr v0.16b,v2.16b,v2.16b - st1 {v5.16b},[x1],#16 - orr v1.16b,v3.16b,v3.16b - st1 {v18.16b},[x1],#16 - orr v18.16b,v19.16b,v19.16b - b.hs Loop3x_cbc_dec - - cmn x2,#0x30 - b.eq Lcbc_done - nop - -Lcbc_dec_tail: - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - ld1 {v17.4s},[x7],#16 - b.gt Lcbc_dec_tail - - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - aesd v1.16b,v20.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v20.16b - aesimc v18.16b,v18.16b - cmn x2,#0x20 - aesd v1.16b,v21.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v21.16b - aesimc v18.16b,v18.16b - eor v5.16b,v6.16b,v7.16b - aesd v1.16b,v22.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v22.16b - aesimc v18.16b,v18.16b - eor v17.16b,v3.16b,v7.16b - aesd v1.16b,v23.16b - aesd v18.16b,v23.16b - b.eq Lcbc_dec_one - eor v5.16b,v5.16b,v1.16b - eor v17.16b,v17.16b,v18.16b - orr v6.16b,v19.16b,v19.16b - st1 {v5.16b},[x1],#16 - st1 {v17.16b},[x1],#16 - b Lcbc_done - -Lcbc_dec_one: - eor v5.16b,v5.16b,v18.16b - orr v6.16b,v19.16b,v19.16b - st1 {v5.16b},[x1],#16 - -Lcbc_done: - st1 {v6.16b},[x4] -Lcbc_abort: - ldr x29,[sp],#16 - ret - -.globl _aes_hw_ctr32_encrypt_blocks -.private_extern _aes_hw_ctr32_encrypt_blocks - -.align 5 -_aes_hw_ctr32_encrypt_blocks: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - ldr w5,[x3,#240] - - ldr w8, [x4, #12] - ld1 {v0.4s},[x4] - - ld1 {v16.4s,v17.4s},[x3] // load key schedule... - sub w5,w5,#4 - mov x12,#16 - cmp x2,#2 - add x7,x3,x5,lsl#4 // pointer to last 5 round keys - sub w5,w5,#2 - ld1 {v20.4s,v21.4s},[x7],#32 - ld1 {v22.4s,v23.4s},[x7],#32 - ld1 {v7.4s},[x7] - add x7,x3,#32 - mov w6,w5 - csel x12,xzr,x12,lo -#ifndef __ARMEB__ - rev w8, w8 -#endif - orr v1.16b,v0.16b,v0.16b - add w10, w8, #1 - orr v18.16b,v0.16b,v0.16b - add w8, w8, #2 - orr v6.16b,v0.16b,v0.16b - rev w10, w10 - mov v1.s[3],w10 - b.ls Lctr32_tail - rev w12, w8 - sub x2,x2,#3 // bias - mov v18.s[3],w12 - b Loop3x_ctr32 - -.align 4 -Loop3x_ctr32: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - aese v1.16b,v16.16b - aesmc v1.16b,v1.16b - aese v18.16b,v16.16b - aesmc v18.16b,v18.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v1.16b,v17.16b - aesmc v1.16b,v1.16b - aese v18.16b,v17.16b - aesmc v18.16b,v18.16b - ld1 {v17.4s},[x7],#16 - b.gt Loop3x_ctr32 - - aese v0.16b,v16.16b - aesmc v4.16b,v0.16b - aese v1.16b,v16.16b - aesmc v5.16b,v1.16b - ld1 {v2.16b},[x0],#16 - orr v0.16b,v6.16b,v6.16b - aese v18.16b,v16.16b - aesmc v18.16b,v18.16b - ld1 {v3.16b},[x0],#16 - orr v1.16b,v6.16b,v6.16b - aese v4.16b,v17.16b - aesmc v4.16b,v4.16b - aese v5.16b,v17.16b - aesmc v5.16b,v5.16b - ld1 {v19.16b},[x0],#16 - mov x7,x3 - aese v18.16b,v17.16b - aesmc v17.16b,v18.16b - orr v18.16b,v6.16b,v6.16b - add w9,w8,#1 - aese v4.16b,v20.16b - aesmc v4.16b,v4.16b - aese v5.16b,v20.16b - aesmc v5.16b,v5.16b - eor v2.16b,v2.16b,v7.16b - add w10,w8,#2 - aese v17.16b,v20.16b - aesmc v17.16b,v17.16b - eor v3.16b,v3.16b,v7.16b - add w8,w8,#3 - aese v4.16b,v21.16b - aesmc v4.16b,v4.16b - aese v5.16b,v21.16b - aesmc v5.16b,v5.16b - eor v19.16b,v19.16b,v7.16b - rev w9,w9 - aese v17.16b,v21.16b - aesmc v17.16b,v17.16b - mov v0.s[3], w9 - rev w10,w10 - aese v4.16b,v22.16b - aesmc v4.16b,v4.16b - aese v5.16b,v22.16b - aesmc v5.16b,v5.16b - mov v1.s[3], w10 - rev w12,w8 - aese v17.16b,v22.16b - aesmc v17.16b,v17.16b - mov v18.s[3], w12 - subs x2,x2,#3 - aese v4.16b,v23.16b - aese v5.16b,v23.16b - aese v17.16b,v23.16b - - eor v2.16b,v2.16b,v4.16b - ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] - st1 {v2.16b},[x1],#16 - eor v3.16b,v3.16b,v5.16b - mov w6,w5 - st1 {v3.16b},[x1],#16 - eor v19.16b,v19.16b,v17.16b - ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] - st1 {v19.16b},[x1],#16 - b.hs Loop3x_ctr32 - - adds x2,x2,#3 - b.eq Lctr32_done - cmp x2,#1 - mov x12,#16 - csel x12,xzr,x12,eq - -Lctr32_tail: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - aese v1.16b,v16.16b - aesmc v1.16b,v1.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v1.16b,v17.16b - aesmc v1.16b,v1.16b - ld1 {v17.4s},[x7],#16 - b.gt Lctr32_tail - - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - aese v1.16b,v16.16b - aesmc v1.16b,v1.16b - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v1.16b,v17.16b - aesmc v1.16b,v1.16b - ld1 {v2.16b},[x0],x12 - aese v0.16b,v20.16b - aesmc v0.16b,v0.16b - aese v1.16b,v20.16b - aesmc v1.16b,v1.16b - ld1 {v3.16b},[x0] - aese v0.16b,v21.16b - aesmc v0.16b,v0.16b - aese v1.16b,v21.16b - aesmc v1.16b,v1.16b - eor v2.16b,v2.16b,v7.16b - aese v0.16b,v22.16b - aesmc v0.16b,v0.16b - aese v1.16b,v22.16b - aesmc v1.16b,v1.16b - eor v3.16b,v3.16b,v7.16b - aese v0.16b,v23.16b - aese v1.16b,v23.16b - - cmp x2,#1 - eor v2.16b,v2.16b,v0.16b - eor v3.16b,v3.16b,v1.16b - st1 {v2.16b},[x1],#16 - b.eq Lctr32_done - st1 {v3.16b},[x1] - -Lctr32_done: - ldr x29,[sp],#16 - ret - -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/armv8-mont.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/armv8-mont.S deleted file mode 100644 index 3d83f4d8d6..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/armv8-mont.S +++ /dev/null @@ -1,1420 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl _bn_mul_mont -.private_extern _bn_mul_mont - -.align 5 -_bn_mul_mont: - tst x5,#7 - b.eq __bn_sqr8x_mont - tst x5,#3 - b.eq __bn_mul4x_mont -Lmul_mont: - stp x29,x30,[sp,#-64]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - - ldr x9,[x2],#8 // bp[0] - sub x22,sp,x5,lsl#3 - ldp x7,x8,[x1],#16 // ap[0..1] - lsl x5,x5,#3 - ldr x4,[x4] // *n0 - and x22,x22,#-16 // ABI says so - ldp x13,x14,[x3],#16 // np[0..1] - - mul x6,x7,x9 // ap[0]*bp[0] - sub x21,x5,#16 // j=num-2 - umulh x7,x7,x9 - mul x10,x8,x9 // ap[1]*bp[0] - umulh x11,x8,x9 - - mul x15,x6,x4 // "tp[0]"*n0 - mov sp,x22 // alloca - - // (*) mul x12,x13,x15 // np[0]*m1 - umulh x13,x13,x15 - mul x16,x14,x15 // np[1]*m1 - // (*) adds x12,x12,x6 // discarded - // (*) As for removal of first multiplication and addition - // instructions. The outcome of first addition is - // guaranteed to be zero, which leaves two computationally - // significant outcomes: it either carries or not. Then - // question is when does it carry? Is there alternative - // way to deduce it? If you follow operations, you can - // observe that condition for carry is quite simple: - // x6 being non-zero. So that carry can be calculated - // by adding -1 to x6. That's what next instruction does. - subs xzr,x6,#1 // (*) - umulh x17,x14,x15 - adc x13,x13,xzr - cbz x21,L1st_skip - -L1st: - ldr x8,[x1],#8 - adds x6,x10,x7 - sub x21,x21,#8 // j-- - adc x7,x11,xzr - - ldr x14,[x3],#8 - adds x12,x16,x13 - mul x10,x8,x9 // ap[j]*bp[0] - adc x13,x17,xzr - umulh x11,x8,x9 - - adds x12,x12,x6 - mul x16,x14,x15 // np[j]*m1 - adc x13,x13,xzr - umulh x17,x14,x15 - str x12,[x22],#8 // tp[j-1] - cbnz x21,L1st - -L1st_skip: - adds x6,x10,x7 - sub x1,x1,x5 // rewind x1 - adc x7,x11,xzr - - adds x12,x16,x13 - sub x3,x3,x5 // rewind x3 - adc x13,x17,xzr - - adds x12,x12,x6 - sub x20,x5,#8 // i=num-1 - adcs x13,x13,x7 - - adc x19,xzr,xzr // upmost overflow bit - stp x12,x13,[x22] - -Louter: - ldr x9,[x2],#8 // bp[i] - ldp x7,x8,[x1],#16 - ldr x23,[sp] // tp[0] - add x22,sp,#8 - - mul x6,x7,x9 // ap[0]*bp[i] - sub x21,x5,#16 // j=num-2 - umulh x7,x7,x9 - ldp x13,x14,[x3],#16 - mul x10,x8,x9 // ap[1]*bp[i] - adds x6,x6,x23 - umulh x11,x8,x9 - adc x7,x7,xzr - - mul x15,x6,x4 - sub x20,x20,#8 // i-- - - // (*) mul x12,x13,x15 // np[0]*m1 - umulh x13,x13,x15 - mul x16,x14,x15 // np[1]*m1 - // (*) adds x12,x12,x6 - subs xzr,x6,#1 // (*) - umulh x17,x14,x15 - cbz x21,Linner_skip - -Linner: - ldr x8,[x1],#8 - adc x13,x13,xzr - ldr x23,[x22],#8 // tp[j] - adds x6,x10,x7 - sub x21,x21,#8 // j-- - adc x7,x11,xzr - - adds x12,x16,x13 - ldr x14,[x3],#8 - adc x13,x17,xzr - - mul x10,x8,x9 // ap[j]*bp[i] - adds x6,x6,x23 - umulh x11,x8,x9 - adc x7,x7,xzr - - mul x16,x14,x15 // np[j]*m1 - adds x12,x12,x6 - umulh x17,x14,x15 - str x12,[x22,#-16] // tp[j-1] - cbnz x21,Linner - -Linner_skip: - ldr x23,[x22],#8 // tp[j] - adc x13,x13,xzr - adds x6,x10,x7 - sub x1,x1,x5 // rewind x1 - adc x7,x11,xzr - - adds x12,x16,x13 - sub x3,x3,x5 // rewind x3 - adcs x13,x17,x19 - adc x19,xzr,xzr - - adds x6,x6,x23 - adc x7,x7,xzr - - adds x12,x12,x6 - adcs x13,x13,x7 - adc x19,x19,xzr // upmost overflow bit - stp x12,x13,[x22,#-16] - - cbnz x20,Louter - - // Final step. We see if result is larger than modulus, and - // if it is, subtract the modulus. But comparison implies - // subtraction. So we subtract modulus, see if it borrowed, - // and conditionally copy original value. - ldr x23,[sp] // tp[0] - add x22,sp,#8 - ldr x14,[x3],#8 // np[0] - subs x21,x5,#8 // j=num-1 and clear borrow - mov x1,x0 -Lsub: - sbcs x8,x23,x14 // tp[j]-np[j] - ldr x23,[x22],#8 - sub x21,x21,#8 // j-- - ldr x14,[x3],#8 - str x8,[x1],#8 // rp[j]=tp[j]-np[j] - cbnz x21,Lsub - - sbcs x8,x23,x14 - sbcs x19,x19,xzr // did it borrow? - str x8,[x1],#8 // rp[num-1] - - ldr x23,[sp] // tp[0] - add x22,sp,#8 - ldr x8,[x0],#8 // rp[0] - sub x5,x5,#8 // num-- - nop -Lcond_copy: - sub x5,x5,#8 // num-- - csel x14,x23,x8,lo // did it borrow? - ldr x23,[x22],#8 - ldr x8,[x0],#8 - str xzr,[x22,#-16] // wipe tp - str x14,[x0,#-16] - cbnz x5,Lcond_copy - - csel x14,x23,x8,lo - str xzr,[x22,#-8] // wipe tp - str x14,[x0,#-8] - - ldp x19,x20,[x29,#16] - mov sp,x29 - ldp x21,x22,[x29,#32] - mov x0,#1 - ldp x23,x24,[x29,#48] - ldr x29,[sp],#64 - ret - - -.align 5 -__bn_sqr8x_mont: - cmp x1,x2 - b.ne __bn_mul4x_mont -Lsqr8x_mont: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - stp x0,x3,[sp,#96] // offload rp and np - - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - ldp x10,x11,[x1,#8*4] - ldp x12,x13,[x1,#8*6] - - sub x2,sp,x5,lsl#4 - lsl x5,x5,#3 - ldr x4,[x4] // *n0 - mov sp,x2 // alloca - sub x27,x5,#8*8 - b Lsqr8x_zero_start - -Lsqr8x_zero: - sub x27,x27,#8*8 - stp xzr,xzr,[x2,#8*0] - stp xzr,xzr,[x2,#8*2] - stp xzr,xzr,[x2,#8*4] - stp xzr,xzr,[x2,#8*6] -Lsqr8x_zero_start: - stp xzr,xzr,[x2,#8*8] - stp xzr,xzr,[x2,#8*10] - stp xzr,xzr,[x2,#8*12] - stp xzr,xzr,[x2,#8*14] - add x2,x2,#8*16 - cbnz x27,Lsqr8x_zero - - add x3,x1,x5 - add x1,x1,#8*8 - mov x19,xzr - mov x20,xzr - mov x21,xzr - mov x22,xzr - mov x23,xzr - mov x24,xzr - mov x25,xzr - mov x26,xzr - mov x2,sp - str x4,[x29,#112] // offload n0 - - // Multiply everything but a[i]*a[i] -.align 4 -Lsqr8x_outer_loop: - // a[1]a[0] (i) - // a[2]a[0] - // a[3]a[0] - // a[4]a[0] - // a[5]a[0] - // a[6]a[0] - // a[7]a[0] - // a[2]a[1] (ii) - // a[3]a[1] - // a[4]a[1] - // a[5]a[1] - // a[6]a[1] - // a[7]a[1] - // a[3]a[2] (iii) - // a[4]a[2] - // a[5]a[2] - // a[6]a[2] - // a[7]a[2] - // a[4]a[3] (iv) - // a[5]a[3] - // a[6]a[3] - // a[7]a[3] - // a[5]a[4] (v) - // a[6]a[4] - // a[7]a[4] - // a[6]a[5] (vi) - // a[7]a[5] - // a[7]a[6] (vii) - - mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) - mul x15,x8,x6 - mul x16,x9,x6 - mul x17,x10,x6 - adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) - mul x14,x11,x6 - adcs x21,x21,x15 - mul x15,x12,x6 - adcs x22,x22,x16 - mul x16,x13,x6 - adcs x23,x23,x17 - umulh x17,x7,x6 // hi(a[1..7]*a[0]) - adcs x24,x24,x14 - umulh x14,x8,x6 - adcs x25,x25,x15 - umulh x15,x9,x6 - adcs x26,x26,x16 - umulh x16,x10,x6 - stp x19,x20,[x2],#8*2 // t[0..1] - adc x19,xzr,xzr // t[8] - adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) - umulh x17,x11,x6 - adcs x22,x22,x14 - umulh x14,x12,x6 - adcs x23,x23,x15 - umulh x15,x13,x6 - adcs x24,x24,x16 - mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) - adcs x25,x25,x17 - mul x17,x9,x7 - adcs x26,x26,x14 - mul x14,x10,x7 - adc x19,x19,x15 - - mul x15,x11,x7 - adds x22,x22,x16 - mul x16,x12,x7 - adcs x23,x23,x17 - mul x17,x13,x7 - adcs x24,x24,x14 - umulh x14,x8,x7 // hi(a[2..7]*a[1]) - adcs x25,x25,x15 - umulh x15,x9,x7 - adcs x26,x26,x16 - umulh x16,x10,x7 - adcs x19,x19,x17 - umulh x17,x11,x7 - stp x21,x22,[x2],#8*2 // t[2..3] - adc x20,xzr,xzr // t[9] - adds x23,x23,x14 - umulh x14,x12,x7 - adcs x24,x24,x15 - umulh x15,x13,x7 - adcs x25,x25,x16 - mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) - adcs x26,x26,x17 - mul x17,x10,x8 - adcs x19,x19,x14 - mul x14,x11,x8 - adc x20,x20,x15 - - mul x15,x12,x8 - adds x24,x24,x16 - mul x16,x13,x8 - adcs x25,x25,x17 - umulh x17,x9,x8 // hi(a[3..7]*a[2]) - adcs x26,x26,x14 - umulh x14,x10,x8 - adcs x19,x19,x15 - umulh x15,x11,x8 - adcs x20,x20,x16 - umulh x16,x12,x8 - stp x23,x24,[x2],#8*2 // t[4..5] - adc x21,xzr,xzr // t[10] - adds x25,x25,x17 - umulh x17,x13,x8 - adcs x26,x26,x14 - mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) - adcs x19,x19,x15 - mul x15,x11,x9 - adcs x20,x20,x16 - mul x16,x12,x9 - adc x21,x21,x17 - - mul x17,x13,x9 - adds x26,x26,x14 - umulh x14,x10,x9 // hi(a[4..7]*a[3]) - adcs x19,x19,x15 - umulh x15,x11,x9 - adcs x20,x20,x16 - umulh x16,x12,x9 - adcs x21,x21,x17 - umulh x17,x13,x9 - stp x25,x26,[x2],#8*2 // t[6..7] - adc x22,xzr,xzr // t[11] - adds x19,x19,x14 - mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) - adcs x20,x20,x15 - mul x15,x12,x10 - adcs x21,x21,x16 - mul x16,x13,x10 - adc x22,x22,x17 - - umulh x17,x11,x10 // hi(a[5..7]*a[4]) - adds x20,x20,x14 - umulh x14,x12,x10 - adcs x21,x21,x15 - umulh x15,x13,x10 - adcs x22,x22,x16 - mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) - adc x23,xzr,xzr // t[12] - adds x21,x21,x17 - mul x17,x13,x11 - adcs x22,x22,x14 - umulh x14,x12,x11 // hi(a[6..7]*a[5]) - adc x23,x23,x15 - - umulh x15,x13,x11 - adds x22,x22,x16 - mul x16,x13,x12 // lo(a[7]*a[6]) (vii) - adcs x23,x23,x17 - umulh x17,x13,x12 // hi(a[7]*a[6]) - adc x24,xzr,xzr // t[13] - adds x23,x23,x14 - sub x27,x3,x1 // done yet? - adc x24,x24,x15 - - adds x24,x24,x16 - sub x14,x3,x5 // rewinded ap - adc x25,xzr,xzr // t[14] - add x25,x25,x17 - - cbz x27,Lsqr8x_outer_break - - mov x4,x6 - ldp x6,x7,[x2,#8*0] - ldp x8,x9,[x2,#8*2] - ldp x10,x11,[x2,#8*4] - ldp x12,x13,[x2,#8*6] - adds x19,x19,x6 - adcs x20,x20,x7 - ldp x6,x7,[x1,#8*0] - adcs x21,x21,x8 - adcs x22,x22,x9 - ldp x8,x9,[x1,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x1,#8*4] - adcs x25,x25,x12 - mov x0,x1 - adcs x26,xzr,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - //adc x28,xzr,xzr // moved below - mov x27,#-8*8 - - // a[8]a[0] - // a[9]a[0] - // a[a]a[0] - // a[b]a[0] - // a[c]a[0] - // a[d]a[0] - // a[e]a[0] - // a[f]a[0] - // a[8]a[1] - // a[f]a[1]........................ - // a[8]a[2] - // a[f]a[2]........................ - // a[8]a[3] - // a[f]a[3]........................ - // a[8]a[4] - // a[f]a[4]........................ - // a[8]a[5] - // a[f]a[5]........................ - // a[8]a[6] - // a[f]a[6]........................ - // a[8]a[7] - // a[f]a[7]........................ -Lsqr8x_mul: - mul x14,x6,x4 - adc x28,xzr,xzr // carry bit, modulo-scheduled - mul x15,x7,x4 - add x27,x27,#8 - mul x16,x8,x4 - mul x17,x9,x4 - adds x19,x19,x14 - mul x14,x10,x4 - adcs x20,x20,x15 - mul x15,x11,x4 - adcs x21,x21,x16 - mul x16,x12,x4 - adcs x22,x22,x17 - mul x17,x13,x4 - adcs x23,x23,x14 - umulh x14,x6,x4 - adcs x24,x24,x15 - umulh x15,x7,x4 - adcs x25,x25,x16 - umulh x16,x8,x4 - adcs x26,x26,x17 - umulh x17,x9,x4 - adc x28,x28,xzr - str x19,[x2],#8 - adds x19,x20,x14 - umulh x14,x10,x4 - adcs x20,x21,x15 - umulh x15,x11,x4 - adcs x21,x22,x16 - umulh x16,x12,x4 - adcs x22,x23,x17 - umulh x17,x13,x4 - ldr x4,[x0,x27] - adcs x23,x24,x14 - adcs x24,x25,x15 - adcs x25,x26,x16 - adcs x26,x28,x17 - //adc x28,xzr,xzr // moved above - cbnz x27,Lsqr8x_mul - // note that carry flag is guaranteed - // to be zero at this point - cmp x1,x3 // done yet? - b.eq Lsqr8x_break - - ldp x6,x7,[x2,#8*0] - ldp x8,x9,[x2,#8*2] - ldp x10,x11,[x2,#8*4] - ldp x12,x13,[x2,#8*6] - adds x19,x19,x6 - ldr x4,[x0,#-8*8] - adcs x20,x20,x7 - ldp x6,x7,[x1,#8*0] - adcs x21,x21,x8 - adcs x22,x22,x9 - ldp x8,x9,[x1,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x1,#8*4] - adcs x25,x25,x12 - mov x27,#-8*8 - adcs x26,x26,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - //adc x28,xzr,xzr // moved above - b Lsqr8x_mul - -.align 4 -Lsqr8x_break: - ldp x6,x7,[x0,#8*0] - add x1,x0,#8*8 - ldp x8,x9,[x0,#8*2] - sub x14,x3,x1 // is it last iteration? - ldp x10,x11,[x0,#8*4] - sub x15,x2,x14 - ldp x12,x13,[x0,#8*6] - cbz x14,Lsqr8x_outer_loop - - stp x19,x20,[x2,#8*0] - ldp x19,x20,[x15,#8*0] - stp x21,x22,[x2,#8*2] - ldp x21,x22,[x15,#8*2] - stp x23,x24,[x2,#8*4] - ldp x23,x24,[x15,#8*4] - stp x25,x26,[x2,#8*6] - mov x2,x15 - ldp x25,x26,[x15,#8*6] - b Lsqr8x_outer_loop - -.align 4 -Lsqr8x_outer_break: - // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] - ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] - ldp x15,x16,[sp,#8*1] - ldp x11,x13,[x14,#8*2] - add x1,x14,#8*4 - ldp x17,x14,[sp,#8*3] - - stp x19,x20,[x2,#8*0] - mul x19,x7,x7 - stp x21,x22,[x2,#8*2] - umulh x7,x7,x7 - stp x23,x24,[x2,#8*4] - mul x8,x9,x9 - stp x25,x26,[x2,#8*6] - mov x2,sp - umulh x9,x9,x9 - adds x20,x7,x15,lsl#1 - extr x15,x16,x15,#63 - sub x27,x5,#8*4 - -Lsqr4x_shift_n_add: - adcs x21,x8,x15 - extr x16,x17,x16,#63 - sub x27,x27,#8*4 - adcs x22,x9,x16 - ldp x15,x16,[x2,#8*5] - mul x10,x11,x11 - ldp x7,x9,[x1],#8*2 - umulh x11,x11,x11 - mul x12,x13,x13 - umulh x13,x13,x13 - extr x17,x14,x17,#63 - stp x19,x20,[x2,#8*0] - adcs x23,x10,x17 - extr x14,x15,x14,#63 - stp x21,x22,[x2,#8*2] - adcs x24,x11,x14 - ldp x17,x14,[x2,#8*7] - extr x15,x16,x15,#63 - adcs x25,x12,x15 - extr x16,x17,x16,#63 - adcs x26,x13,x16 - ldp x15,x16,[x2,#8*9] - mul x6,x7,x7 - ldp x11,x13,[x1],#8*2 - umulh x7,x7,x7 - mul x8,x9,x9 - umulh x9,x9,x9 - stp x23,x24,[x2,#8*4] - extr x17,x14,x17,#63 - stp x25,x26,[x2,#8*6] - add x2,x2,#8*8 - adcs x19,x6,x17 - extr x14,x15,x14,#63 - adcs x20,x7,x14 - ldp x17,x14,[x2,#8*3] - extr x15,x16,x15,#63 - cbnz x27,Lsqr4x_shift_n_add - ldp x1,x4,[x29,#104] // pull np and n0 - - adcs x21,x8,x15 - extr x16,x17,x16,#63 - adcs x22,x9,x16 - ldp x15,x16,[x2,#8*5] - mul x10,x11,x11 - umulh x11,x11,x11 - stp x19,x20,[x2,#8*0] - mul x12,x13,x13 - umulh x13,x13,x13 - stp x21,x22,[x2,#8*2] - extr x17,x14,x17,#63 - adcs x23,x10,x17 - extr x14,x15,x14,#63 - ldp x19,x20,[sp,#8*0] - adcs x24,x11,x14 - extr x15,x16,x15,#63 - ldp x6,x7,[x1,#8*0] - adcs x25,x12,x15 - extr x16,xzr,x16,#63 - ldp x8,x9,[x1,#8*2] - adc x26,x13,x16 - ldp x10,x11,[x1,#8*4] - - // Reduce by 512 bits per iteration - mul x28,x4,x19 // t[0]*n0 - ldp x12,x13,[x1,#8*6] - add x3,x1,x5 - ldp x21,x22,[sp,#8*2] - stp x23,x24,[x2,#8*4] - ldp x23,x24,[sp,#8*4] - stp x25,x26,[x2,#8*6] - ldp x25,x26,[sp,#8*6] - add x1,x1,#8*8 - mov x30,xzr // initial top-most carry - mov x2,sp - mov x27,#8 - -Lsqr8x_reduction: - // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) - mul x15,x7,x28 - sub x27,x27,#1 - mul x16,x8,x28 - str x28,[x2],#8 // put aside t[0]*n0 for tail processing - mul x17,x9,x28 - // (*) adds xzr,x19,x14 - subs xzr,x19,#1 // (*) - mul x14,x10,x28 - adcs x19,x20,x15 - mul x15,x11,x28 - adcs x20,x21,x16 - mul x16,x12,x28 - adcs x21,x22,x17 - mul x17,x13,x28 - adcs x22,x23,x14 - umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) - adcs x23,x24,x15 - umulh x15,x7,x28 - adcs x24,x25,x16 - umulh x16,x8,x28 - adcs x25,x26,x17 - umulh x17,x9,x28 - adc x26,xzr,xzr - adds x19,x19,x14 - umulh x14,x10,x28 - adcs x20,x20,x15 - umulh x15,x11,x28 - adcs x21,x21,x16 - umulh x16,x12,x28 - adcs x22,x22,x17 - umulh x17,x13,x28 - mul x28,x4,x19 // next t[0]*n0 - adcs x23,x23,x14 - adcs x24,x24,x15 - adcs x25,x25,x16 - adc x26,x26,x17 - cbnz x27,Lsqr8x_reduction - - ldp x14,x15,[x2,#8*0] - ldp x16,x17,[x2,#8*2] - mov x0,x2 - sub x27,x3,x1 // done yet? - adds x19,x19,x14 - adcs x20,x20,x15 - ldp x14,x15,[x2,#8*4] - adcs x21,x21,x16 - adcs x22,x22,x17 - ldp x16,x17,[x2,#8*6] - adcs x23,x23,x14 - adcs x24,x24,x15 - adcs x25,x25,x16 - adcs x26,x26,x17 - //adc x28,xzr,xzr // moved below - cbz x27,Lsqr8x8_post_condition - - ldr x4,[x2,#-8*8] - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - ldp x10,x11,[x1,#8*4] - mov x27,#-8*8 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - -Lsqr8x_tail: - mul x14,x6,x4 - adc x28,xzr,xzr // carry bit, modulo-scheduled - mul x15,x7,x4 - add x27,x27,#8 - mul x16,x8,x4 - mul x17,x9,x4 - adds x19,x19,x14 - mul x14,x10,x4 - adcs x20,x20,x15 - mul x15,x11,x4 - adcs x21,x21,x16 - mul x16,x12,x4 - adcs x22,x22,x17 - mul x17,x13,x4 - adcs x23,x23,x14 - umulh x14,x6,x4 - adcs x24,x24,x15 - umulh x15,x7,x4 - adcs x25,x25,x16 - umulh x16,x8,x4 - adcs x26,x26,x17 - umulh x17,x9,x4 - adc x28,x28,xzr - str x19,[x2],#8 - adds x19,x20,x14 - umulh x14,x10,x4 - adcs x20,x21,x15 - umulh x15,x11,x4 - adcs x21,x22,x16 - umulh x16,x12,x4 - adcs x22,x23,x17 - umulh x17,x13,x4 - ldr x4,[x0,x27] - adcs x23,x24,x14 - adcs x24,x25,x15 - adcs x25,x26,x16 - adcs x26,x28,x17 - //adc x28,xzr,xzr // moved above - cbnz x27,Lsqr8x_tail - // note that carry flag is guaranteed - // to be zero at this point - ldp x6,x7,[x2,#8*0] - sub x27,x3,x1 // done yet? - sub x16,x3,x5 // rewinded np - ldp x8,x9,[x2,#8*2] - ldp x10,x11,[x2,#8*4] - ldp x12,x13,[x2,#8*6] - cbz x27,Lsqr8x_tail_break - - ldr x4,[x0,#-8*8] - adds x19,x19,x6 - adcs x20,x20,x7 - ldp x6,x7,[x1,#8*0] - adcs x21,x21,x8 - adcs x22,x22,x9 - ldp x8,x9,[x1,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x1,#8*4] - adcs x25,x25,x12 - mov x27,#-8*8 - adcs x26,x26,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - //adc x28,xzr,xzr // moved above - b Lsqr8x_tail - -.align 4 -Lsqr8x_tail_break: - ldr x4,[x29,#112] // pull n0 - add x27,x2,#8*8 // end of current t[num] window - - subs xzr,x30,#1 // "move" top-most carry to carry bit - adcs x14,x19,x6 - adcs x15,x20,x7 - ldp x19,x20,[x0,#8*0] - adcs x21,x21,x8 - ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] - adcs x22,x22,x9 - ldp x8,x9,[x16,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x16,#8*4] - adcs x25,x25,x12 - adcs x26,x26,x13 - ldp x12,x13,[x16,#8*6] - add x1,x16,#8*8 - adc x30,xzr,xzr // top-most carry - mul x28,x4,x19 - stp x14,x15,[x2,#8*0] - stp x21,x22,[x2,#8*2] - ldp x21,x22,[x0,#8*2] - stp x23,x24,[x2,#8*4] - ldp x23,x24,[x0,#8*4] - cmp x27,x29 // did we hit the bottom? - stp x25,x26,[x2,#8*6] - mov x2,x0 // slide the window - ldp x25,x26,[x0,#8*6] - mov x27,#8 - b.ne Lsqr8x_reduction - - // Final step. We see if result is larger than modulus, and - // if it is, subtract the modulus. But comparison implies - // subtraction. So we subtract modulus, see if it borrowed, - // and conditionally copy original value. - ldr x0,[x29,#96] // pull rp - add x2,x2,#8*8 - subs x14,x19,x6 - sbcs x15,x20,x7 - sub x27,x5,#8*8 - mov x3,x0 // x0 copy - -Lsqr8x_sub: - sbcs x16,x21,x8 - ldp x6,x7,[x1,#8*0] - sbcs x17,x22,x9 - stp x14,x15,[x0,#8*0] - sbcs x14,x23,x10 - ldp x8,x9,[x1,#8*2] - sbcs x15,x24,x11 - stp x16,x17,[x0,#8*2] - sbcs x16,x25,x12 - ldp x10,x11,[x1,#8*4] - sbcs x17,x26,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - ldp x19,x20,[x2,#8*0] - sub x27,x27,#8*8 - ldp x21,x22,[x2,#8*2] - ldp x23,x24,[x2,#8*4] - ldp x25,x26,[x2,#8*6] - add x2,x2,#8*8 - stp x14,x15,[x0,#8*4] - sbcs x14,x19,x6 - stp x16,x17,[x0,#8*6] - add x0,x0,#8*8 - sbcs x15,x20,x7 - cbnz x27,Lsqr8x_sub - - sbcs x16,x21,x8 - mov x2,sp - add x1,sp,x5 - ldp x6,x7,[x3,#8*0] - sbcs x17,x22,x9 - stp x14,x15,[x0,#8*0] - sbcs x14,x23,x10 - ldp x8,x9,[x3,#8*2] - sbcs x15,x24,x11 - stp x16,x17,[x0,#8*2] - sbcs x16,x25,x12 - ldp x19,x20,[x1,#8*0] - sbcs x17,x26,x13 - ldp x21,x22,[x1,#8*2] - sbcs xzr,x30,xzr // did it borrow? - ldr x30,[x29,#8] // pull return address - stp x14,x15,[x0,#8*4] - stp x16,x17,[x0,#8*6] - - sub x27,x5,#8*4 -Lsqr4x_cond_copy: - sub x27,x27,#8*4 - csel x14,x19,x6,lo - stp xzr,xzr,[x2,#8*0] - csel x15,x20,x7,lo - ldp x6,x7,[x3,#8*4] - ldp x19,x20,[x1,#8*4] - csel x16,x21,x8,lo - stp xzr,xzr,[x2,#8*2] - add x2,x2,#8*4 - csel x17,x22,x9,lo - ldp x8,x9,[x3,#8*6] - ldp x21,x22,[x1,#8*6] - add x1,x1,#8*4 - stp x14,x15,[x3,#8*0] - stp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - stp xzr,xzr,[x1,#8*0] - stp xzr,xzr,[x1,#8*2] - cbnz x27,Lsqr4x_cond_copy - - csel x14,x19,x6,lo - stp xzr,xzr,[x2,#8*0] - csel x15,x20,x7,lo - stp xzr,xzr,[x2,#8*2] - csel x16,x21,x8,lo - csel x17,x22,x9,lo - stp x14,x15,[x3,#8*0] - stp x16,x17,[x3,#8*2] - - b Lsqr8x_done - -.align 4 -Lsqr8x8_post_condition: - adc x28,xzr,xzr - ldr x30,[x29,#8] // pull return address - // x19-7,x28 hold result, x6-7 hold modulus - subs x6,x19,x6 - ldr x1,[x29,#96] // pull rp - sbcs x7,x20,x7 - stp xzr,xzr,[sp,#8*0] - sbcs x8,x21,x8 - stp xzr,xzr,[sp,#8*2] - sbcs x9,x22,x9 - stp xzr,xzr,[sp,#8*4] - sbcs x10,x23,x10 - stp xzr,xzr,[sp,#8*6] - sbcs x11,x24,x11 - stp xzr,xzr,[sp,#8*8] - sbcs x12,x25,x12 - stp xzr,xzr,[sp,#8*10] - sbcs x13,x26,x13 - stp xzr,xzr,[sp,#8*12] - sbcs x28,x28,xzr // did it borrow? - stp xzr,xzr,[sp,#8*14] - - // x6-7 hold result-modulus - csel x6,x19,x6,lo - csel x7,x20,x7,lo - csel x8,x21,x8,lo - csel x9,x22,x9,lo - stp x6,x7,[x1,#8*0] - csel x10,x23,x10,lo - csel x11,x24,x11,lo - stp x8,x9,[x1,#8*2] - csel x12,x25,x12,lo - csel x13,x26,x13,lo - stp x10,x11,[x1,#8*4] - stp x12,x13,[x1,#8*6] - -Lsqr8x_done: - ldp x19,x20,[x29,#16] - mov sp,x29 - ldp x21,x22,[x29,#32] - mov x0,#1 - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldr x29,[sp],#128 - ret - - -.align 5 -__bn_mul4x_mont: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - - sub x26,sp,x5,lsl#3 - lsl x5,x5,#3 - ldr x4,[x4] // *n0 - sub sp,x26,#8*4 // alloca - - add x10,x2,x5 - add x27,x1,x5 - stp x0,x10,[x29,#96] // offload rp and &b[num] - - ldr x24,[x2,#8*0] // b[0] - ldp x6,x7,[x1,#8*0] // a[0..3] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - mov x19,xzr - mov x20,xzr - mov x21,xzr - mov x22,xzr - ldp x14,x15,[x3,#8*0] // n[0..3] - ldp x16,x17,[x3,#8*2] - adds x3,x3,#8*4 // clear carry bit - mov x0,xzr - mov x28,#0 - mov x26,sp - -Loop_mul4x_1st_reduction: - mul x10,x6,x24 // lo(a[0..3]*b[0]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[0..3]*b[0]) - adcs x20,x20,x11 - mul x25,x19,x4 // t[0]*n0 - adcs x21,x21,x12 - umulh x11,x7,x24 - adcs x22,x22,x13 - umulh x12,x8,x24 - adc x23,xzr,xzr - umulh x13,x9,x24 - ldr x24,[x2,x28] // next b[i] (or b[0]) - adds x20,x20,x10 - // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) - str x25,[x26],#8 // put aside t[0]*n0 for tail processing - adcs x21,x21,x11 - mul x11,x15,x25 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - // (*) adds xzr,x19,x10 - subs xzr,x19,#1 // (*) - umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) - adcs x19,x20,x11 - umulh x11,x15,x25 - adcs x20,x21,x12 - umulh x12,x16,x25 - adcs x21,x22,x13 - umulh x13,x17,x25 - adcs x22,x23,x0 - adc x0,xzr,xzr - adds x19,x19,x10 - sub x10,x27,x1 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - cbnz x28,Loop_mul4x_1st_reduction - - cbz x10,Lmul4x4_post_condition - - ldp x6,x7,[x1,#8*0] // a[4..7] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - ldr x25,[sp] // a[0]*n0 - ldp x14,x15,[x3,#8*0] // n[4..7] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - -Loop_mul4x_1st_tail: - mul x10,x6,x24 // lo(a[4..7]*b[i]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[4..7]*b[i]) - adcs x20,x20,x11 - umulh x11,x7,x24 - adcs x21,x21,x12 - umulh x12,x8,x24 - adcs x22,x22,x13 - umulh x13,x9,x24 - adc x23,xzr,xzr - ldr x24,[x2,x28] // next b[i] (or b[0]) - adds x20,x20,x10 - mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) - adcs x21,x21,x11 - mul x11,x15,x25 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - adds x19,x19,x10 - umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) - adcs x20,x20,x11 - umulh x11,x15,x25 - adcs x21,x21,x12 - umulh x12,x16,x25 - adcs x22,x22,x13 - adcs x23,x23,x0 - umulh x13,x17,x25 - adc x0,xzr,xzr - ldr x25,[sp,x28] // next t[0]*n0 - str x19,[x26],#8 // result!!! - adds x19,x20,x10 - sub x10,x27,x1 // done yet? - adcs x20,x21,x11 - adcs x21,x22,x12 - adcs x22,x23,x13 - //adc x0,x0,xzr - cbnz x28,Loop_mul4x_1st_tail - - sub x11,x27,x5 // rewinded x1 - cbz x10,Lmul4x_proceed - - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - ldp x14,x15,[x3,#8*0] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - b Loop_mul4x_1st_tail - -.align 5 -Lmul4x_proceed: - ldr x24,[x2,#8*4]! // *++b - adc x30,x0,xzr - ldp x6,x7,[x11,#8*0] // a[0..3] - sub x3,x3,x5 // rewind np - ldp x8,x9,[x11,#8*2] - add x1,x11,#8*4 - - stp x19,x20,[x26,#8*0] // result!!! - ldp x19,x20,[sp,#8*4] // t[0..3] - stp x21,x22,[x26,#8*2] // result!!! - ldp x21,x22,[sp,#8*6] - - ldp x14,x15,[x3,#8*0] // n[0..3] - mov x26,sp - ldp x16,x17,[x3,#8*2] - adds x3,x3,#8*4 // clear carry bit - mov x0,xzr - -.align 4 -Loop_mul4x_reduction: - mul x10,x6,x24 // lo(a[0..3]*b[4]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[0..3]*b[4]) - adcs x20,x20,x11 - mul x25,x19,x4 // t[0]*n0 - adcs x21,x21,x12 - umulh x11,x7,x24 - adcs x22,x22,x13 - umulh x12,x8,x24 - adc x23,xzr,xzr - umulh x13,x9,x24 - ldr x24,[x2,x28] // next b[i] - adds x20,x20,x10 - // (*) mul x10,x14,x25 - str x25,[x26],#8 // put aside t[0]*n0 for tail processing - adcs x21,x21,x11 - mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - // (*) adds xzr,x19,x10 - subs xzr,x19,#1 // (*) - umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 - adcs x19,x20,x11 - umulh x11,x15,x25 - adcs x20,x21,x12 - umulh x12,x16,x25 - adcs x21,x22,x13 - umulh x13,x17,x25 - adcs x22,x23,x0 - adc x0,xzr,xzr - adds x19,x19,x10 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - cbnz x28,Loop_mul4x_reduction - - adc x0,x0,xzr - ldp x10,x11,[x26,#8*4] // t[4..7] - ldp x12,x13,[x26,#8*6] - ldp x6,x7,[x1,#8*0] // a[4..7] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - adds x19,x19,x10 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - - ldr x25,[sp] // t[0]*n0 - ldp x14,x15,[x3,#8*0] // n[4..7] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - -.align 4 -Loop_mul4x_tail: - mul x10,x6,x24 // lo(a[4..7]*b[4]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[4..7]*b[4]) - adcs x20,x20,x11 - umulh x11,x7,x24 - adcs x21,x21,x12 - umulh x12,x8,x24 - adcs x22,x22,x13 - umulh x13,x9,x24 - adc x23,xzr,xzr - ldr x24,[x2,x28] // next b[i] - adds x20,x20,x10 - mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) - adcs x21,x21,x11 - mul x11,x15,x25 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - adds x19,x19,x10 - umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) - adcs x20,x20,x11 - umulh x11,x15,x25 - adcs x21,x21,x12 - umulh x12,x16,x25 - adcs x22,x22,x13 - umulh x13,x17,x25 - adcs x23,x23,x0 - ldr x25,[sp,x28] // next a[0]*n0 - adc x0,xzr,xzr - str x19,[x26],#8 // result!!! - adds x19,x20,x10 - sub x10,x27,x1 // done yet? - adcs x20,x21,x11 - adcs x21,x22,x12 - adcs x22,x23,x13 - //adc x0,x0,xzr - cbnz x28,Loop_mul4x_tail - - sub x11,x3,x5 // rewinded np? - adc x0,x0,xzr - cbz x10,Loop_mul4x_break - - ldp x10,x11,[x26,#8*4] - ldp x12,x13,[x26,#8*6] - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - adds x19,x19,x10 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - ldp x14,x15,[x3,#8*0] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - b Loop_mul4x_tail - -.align 4 -Loop_mul4x_break: - ldp x12,x13,[x29,#96] // pull rp and &b[num] - adds x19,x19,x30 - add x2,x2,#8*4 // bp++ - adcs x20,x20,xzr - sub x1,x1,x5 // rewind ap - adcs x21,x21,xzr - stp x19,x20,[x26,#8*0] // result!!! - adcs x22,x22,xzr - ldp x19,x20,[sp,#8*4] // t[0..3] - adc x30,x0,xzr - stp x21,x22,[x26,#8*2] // result!!! - cmp x2,x13 // done yet? - ldp x21,x22,[sp,#8*6] - ldp x14,x15,[x11,#8*0] // n[0..3] - ldp x16,x17,[x11,#8*2] - add x3,x11,#8*4 - b.eq Lmul4x_post - - ldr x24,[x2] - ldp x6,x7,[x1,#8*0] // a[0..3] - ldp x8,x9,[x1,#8*2] - adds x1,x1,#8*4 // clear carry bit - mov x0,xzr - mov x26,sp - b Loop_mul4x_reduction - -.align 4 -Lmul4x_post: - // Final step. We see if result is larger than modulus, and - // if it is, subtract the modulus. But comparison implies - // subtraction. So we subtract modulus, see if it borrowed, - // and conditionally copy original value. - mov x0,x12 - mov x27,x12 // x0 copy - subs x10,x19,x14 - add x26,sp,#8*8 - sbcs x11,x20,x15 - sub x28,x5,#8*4 - -Lmul4x_sub: - sbcs x12,x21,x16 - ldp x14,x15,[x3,#8*0] - sub x28,x28,#8*4 - ldp x19,x20,[x26,#8*0] - sbcs x13,x22,x17 - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - ldp x21,x22,[x26,#8*2] - add x26,x26,#8*4 - stp x10,x11,[x0,#8*0] - sbcs x10,x19,x14 - stp x12,x13,[x0,#8*2] - add x0,x0,#8*4 - sbcs x11,x20,x15 - cbnz x28,Lmul4x_sub - - sbcs x12,x21,x16 - mov x26,sp - add x1,sp,#8*4 - ldp x6,x7,[x27,#8*0] - sbcs x13,x22,x17 - stp x10,x11,[x0,#8*0] - ldp x8,x9,[x27,#8*2] - stp x12,x13,[x0,#8*2] - ldp x19,x20,[x1,#8*0] - ldp x21,x22,[x1,#8*2] - sbcs xzr,x30,xzr // did it borrow? - ldr x30,[x29,#8] // pull return address - - sub x28,x5,#8*4 -Lmul4x_cond_copy: - sub x28,x28,#8*4 - csel x10,x19,x6,lo - stp xzr,xzr,[x26,#8*0] - csel x11,x20,x7,lo - ldp x6,x7,[x27,#8*4] - ldp x19,x20,[x1,#8*4] - csel x12,x21,x8,lo - stp xzr,xzr,[x26,#8*2] - add x26,x26,#8*4 - csel x13,x22,x9,lo - ldp x8,x9,[x27,#8*6] - ldp x21,x22,[x1,#8*6] - add x1,x1,#8*4 - stp x10,x11,[x27,#8*0] - stp x12,x13,[x27,#8*2] - add x27,x27,#8*4 - cbnz x28,Lmul4x_cond_copy - - csel x10,x19,x6,lo - stp xzr,xzr,[x26,#8*0] - csel x11,x20,x7,lo - stp xzr,xzr,[x26,#8*2] - csel x12,x21,x8,lo - stp xzr,xzr,[x26,#8*3] - csel x13,x22,x9,lo - stp xzr,xzr,[x26,#8*4] - stp x10,x11,[x27,#8*0] - stp x12,x13,[x27,#8*2] - - b Lmul4x_done - -.align 4 -Lmul4x4_post_condition: - adc x0,x0,xzr - ldr x1,[x29,#96] // pull rp - // x19-3,x0 hold result, x14-7 hold modulus - subs x6,x19,x14 - ldr x30,[x29,#8] // pull return address - sbcs x7,x20,x15 - stp xzr,xzr,[sp,#8*0] - sbcs x8,x21,x16 - stp xzr,xzr,[sp,#8*2] - sbcs x9,x22,x17 - stp xzr,xzr,[sp,#8*4] - sbcs xzr,x0,xzr // did it borrow? - stp xzr,xzr,[sp,#8*6] - - // x6-3 hold result-modulus - csel x6,x19,x6,lo - csel x7,x20,x7,lo - csel x8,x21,x8,lo - csel x9,x22,x9,lo - stp x6,x7,[x1,#8*0] - stp x8,x9,[x1,#8*2] - -Lmul4x_done: - ldp x19,x20,[x29,#16] - mov sp,x29 - ldp x21,x22,[x29,#32] - mov x0,#1 - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldr x29,[sp],#128 - ret - -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 4 -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S deleted file mode 100644 index 60bff31018..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S +++ /dev/null @@ -1,338 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl _gcm_init_neon -.private_extern _gcm_init_neon - -.align 4 -_gcm_init_neon: - // This function is adapted from gcm_init_v8. xC2 is t3. - ld1 {v17.2d}, [x1] // load H - movi v19.16b, #0xe1 - shl v19.2d, v19.2d, #57 // 0xc2.0 - ext v3.16b, v17.16b, v17.16b, #8 - ushr v18.2d, v19.2d, #63 - dup v17.4s, v17.s[1] - ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 - ushr v18.2d, v3.2d, #63 - sshr v17.4s, v17.4s, #31 // broadcast carry bit - and v18.16b, v18.16b, v16.16b - shl v3.2d, v3.2d, #1 - ext v18.16b, v18.16b, v18.16b, #8 - and v16.16b, v16.16b, v17.16b - orr v3.16b, v3.16b, v18.16b // H<<<=1 - eor v5.16b, v3.16b, v16.16b // twisted H - st1 {v5.2d}, [x0] // store Htable[0] - ret - - -.globl _gcm_gmult_neon -.private_extern _gcm_gmult_neon - -.align 4 -_gcm_gmult_neon: - ld1 {v3.16b}, [x0] // load Xi - ld1 {v5.1d}, [x1], #8 // load twisted H - ld1 {v6.1d}, [x1] - adrp x9, Lmasks@PAGE // load constants - add x9, x9, Lmasks@PAGEOFF - ld1 {v24.2d, v25.2d}, [x9] - rev64 v3.16b, v3.16b // byteswap Xi - ext v3.16b, v3.16b, v3.16b, #8 - eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing - - mov x3, #16 - b Lgmult_neon - - -.globl _gcm_ghash_neon -.private_extern _gcm_ghash_neon - -.align 4 -_gcm_ghash_neon: - ld1 {v0.16b}, [x0] // load Xi - ld1 {v5.1d}, [x1], #8 // load twisted H - ld1 {v6.1d}, [x1] - adrp x9, Lmasks@PAGE // load constants - add x9, x9, Lmasks@PAGEOFF - ld1 {v24.2d, v25.2d}, [x9] - rev64 v0.16b, v0.16b // byteswap Xi - ext v0.16b, v0.16b, v0.16b, #8 - eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing - -Loop_neon: - ld1 {v3.16b}, [x2], #16 // load inp - rev64 v3.16b, v3.16b // byteswap inp - ext v3.16b, v3.16b, v3.16b, #8 - eor v3.16b, v3.16b, v0.16b // inp ^= Xi - -Lgmult_neon: - // Split the input into v3 and v4. (The upper halves are unused, - // so it is okay to leave them alone.) - ins v4.d[0], v3.d[1] - ext v16.8b, v5.8b, v5.8b, #1 // A1 - pmull v16.8h, v16.8b, v3.8b // F = A1*B - ext v0.8b, v3.8b, v3.8b, #1 // B1 - pmull v0.8h, v5.8b, v0.8b // E = A*B1 - ext v17.8b, v5.8b, v5.8b, #2 // A2 - pmull v17.8h, v17.8b, v3.8b // H = A2*B - ext v19.8b, v3.8b, v3.8b, #2 // B2 - pmull v19.8h, v5.8b, v19.8b // G = A*B2 - ext v18.8b, v5.8b, v5.8b, #3 // A3 - eor v16.16b, v16.16b, v0.16b // L = E + F - pmull v18.8h, v18.8b, v3.8b // J = A3*B - ext v0.8b, v3.8b, v3.8b, #3 // B3 - eor v17.16b, v17.16b, v19.16b // M = G + H - pmull v0.8h, v5.8b, v0.8b // I = A*B3 - - // Here we diverge from the 32-bit version. It computes the following - // (instructions reordered for clarity): - // - // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) - // vand $t0#hi, $t0#hi, $k48 - // veor $t0#lo, $t0#lo, $t0#hi - // - // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) - // vand $t1#hi, $t1#hi, $k32 - // veor $t1#lo, $t1#lo, $t1#hi - // - // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) - // vand $t2#hi, $t2#hi, $k16 - // veor $t2#lo, $t2#lo, $t2#hi - // - // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) - // vmov.i64 $t3#hi, #0 - // - // $kN is a mask with the bottom N bits set. AArch64 cannot compute on - // upper halves of SIMD registers, so we must split each half into - // separate registers. To compensate, we pair computations up and - // parallelize. - - ext v19.8b, v3.8b, v3.8b, #4 // B4 - eor v18.16b, v18.16b, v0.16b // N = I + J - pmull v19.8h, v5.8b, v19.8b // K = A*B4 - - // This can probably be scheduled more efficiently. For now, we just - // pair up independent instructions. - zip1 v20.2d, v16.2d, v17.2d - zip1 v22.2d, v18.2d, v19.2d - zip2 v21.2d, v16.2d, v17.2d - zip2 v23.2d, v18.2d, v19.2d - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - and v21.16b, v21.16b, v24.16b - and v23.16b, v23.16b, v25.16b - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - zip1 v16.2d, v20.2d, v21.2d - zip1 v18.2d, v22.2d, v23.2d - zip2 v17.2d, v20.2d, v21.2d - zip2 v19.2d, v22.2d, v23.2d - - ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 - ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 - pmull v0.8h, v5.8b, v3.8b // D = A*B - ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 - ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 - eor v16.16b, v16.16b, v17.16b - eor v18.16b, v18.16b, v19.16b - eor v0.16b, v0.16b, v16.16b - eor v0.16b, v0.16b, v18.16b - eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing - ext v16.8b, v7.8b, v7.8b, #1 // A1 - pmull v16.8h, v16.8b, v3.8b // F = A1*B - ext v1.8b, v3.8b, v3.8b, #1 // B1 - pmull v1.8h, v7.8b, v1.8b // E = A*B1 - ext v17.8b, v7.8b, v7.8b, #2 // A2 - pmull v17.8h, v17.8b, v3.8b // H = A2*B - ext v19.8b, v3.8b, v3.8b, #2 // B2 - pmull v19.8h, v7.8b, v19.8b // G = A*B2 - ext v18.8b, v7.8b, v7.8b, #3 // A3 - eor v16.16b, v16.16b, v1.16b // L = E + F - pmull v18.8h, v18.8b, v3.8b // J = A3*B - ext v1.8b, v3.8b, v3.8b, #3 // B3 - eor v17.16b, v17.16b, v19.16b // M = G + H - pmull v1.8h, v7.8b, v1.8b // I = A*B3 - - // Here we diverge from the 32-bit version. It computes the following - // (instructions reordered for clarity): - // - // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) - // vand $t0#hi, $t0#hi, $k48 - // veor $t0#lo, $t0#lo, $t0#hi - // - // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) - // vand $t1#hi, $t1#hi, $k32 - // veor $t1#lo, $t1#lo, $t1#hi - // - // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) - // vand $t2#hi, $t2#hi, $k16 - // veor $t2#lo, $t2#lo, $t2#hi - // - // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) - // vmov.i64 $t3#hi, #0 - // - // $kN is a mask with the bottom N bits set. AArch64 cannot compute on - // upper halves of SIMD registers, so we must split each half into - // separate registers. To compensate, we pair computations up and - // parallelize. - - ext v19.8b, v3.8b, v3.8b, #4 // B4 - eor v18.16b, v18.16b, v1.16b // N = I + J - pmull v19.8h, v7.8b, v19.8b // K = A*B4 - - // This can probably be scheduled more efficiently. For now, we just - // pair up independent instructions. - zip1 v20.2d, v16.2d, v17.2d - zip1 v22.2d, v18.2d, v19.2d - zip2 v21.2d, v16.2d, v17.2d - zip2 v23.2d, v18.2d, v19.2d - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - and v21.16b, v21.16b, v24.16b - and v23.16b, v23.16b, v25.16b - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - zip1 v16.2d, v20.2d, v21.2d - zip1 v18.2d, v22.2d, v23.2d - zip2 v17.2d, v20.2d, v21.2d - zip2 v19.2d, v22.2d, v23.2d - - ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 - ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 - pmull v1.8h, v7.8b, v3.8b // D = A*B - ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 - ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 - eor v16.16b, v16.16b, v17.16b - eor v18.16b, v18.16b, v19.16b - eor v1.16b, v1.16b, v16.16b - eor v1.16b, v1.16b, v18.16b - ext v16.8b, v6.8b, v6.8b, #1 // A1 - pmull v16.8h, v16.8b, v4.8b // F = A1*B - ext v2.8b, v4.8b, v4.8b, #1 // B1 - pmull v2.8h, v6.8b, v2.8b // E = A*B1 - ext v17.8b, v6.8b, v6.8b, #2 // A2 - pmull v17.8h, v17.8b, v4.8b // H = A2*B - ext v19.8b, v4.8b, v4.8b, #2 // B2 - pmull v19.8h, v6.8b, v19.8b // G = A*B2 - ext v18.8b, v6.8b, v6.8b, #3 // A3 - eor v16.16b, v16.16b, v2.16b // L = E + F - pmull v18.8h, v18.8b, v4.8b // J = A3*B - ext v2.8b, v4.8b, v4.8b, #3 // B3 - eor v17.16b, v17.16b, v19.16b // M = G + H - pmull v2.8h, v6.8b, v2.8b // I = A*B3 - - // Here we diverge from the 32-bit version. It computes the following - // (instructions reordered for clarity): - // - // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) - // vand $t0#hi, $t0#hi, $k48 - // veor $t0#lo, $t0#lo, $t0#hi - // - // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) - // vand $t1#hi, $t1#hi, $k32 - // veor $t1#lo, $t1#lo, $t1#hi - // - // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) - // vand $t2#hi, $t2#hi, $k16 - // veor $t2#lo, $t2#lo, $t2#hi - // - // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) - // vmov.i64 $t3#hi, #0 - // - // $kN is a mask with the bottom N bits set. AArch64 cannot compute on - // upper halves of SIMD registers, so we must split each half into - // separate registers. To compensate, we pair computations up and - // parallelize. - - ext v19.8b, v4.8b, v4.8b, #4 // B4 - eor v18.16b, v18.16b, v2.16b // N = I + J - pmull v19.8h, v6.8b, v19.8b // K = A*B4 - - // This can probably be scheduled more efficiently. For now, we just - // pair up independent instructions. - zip1 v20.2d, v16.2d, v17.2d - zip1 v22.2d, v18.2d, v19.2d - zip2 v21.2d, v16.2d, v17.2d - zip2 v23.2d, v18.2d, v19.2d - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - and v21.16b, v21.16b, v24.16b - and v23.16b, v23.16b, v25.16b - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - zip1 v16.2d, v20.2d, v21.2d - zip1 v18.2d, v22.2d, v23.2d - zip2 v17.2d, v20.2d, v21.2d - zip2 v19.2d, v22.2d, v23.2d - - ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 - ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 - pmull v2.8h, v6.8b, v4.8b // D = A*B - ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 - ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 - eor v16.16b, v16.16b, v17.16b - eor v18.16b, v18.16b, v19.16b - eor v2.16b, v2.16b, v16.16b - eor v2.16b, v2.16b, v18.16b - ext v16.16b, v0.16b, v2.16b, #8 - eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing - eor v1.16b, v1.16b, v2.16b - eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi - ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result - // This is a no-op due to the ins instruction below. - // ins v2.d[0], v1.d[1] - - // equivalent of reduction_avx from ghash-x86_64.pl - shl v17.2d, v0.2d, #57 // 1st phase - shl v18.2d, v0.2d, #62 - eor v18.16b, v18.16b, v17.16b // - shl v17.2d, v0.2d, #63 - eor v18.16b, v18.16b, v17.16b // - // Note Xm contains {Xl.d[1], Xh.d[0]}. - eor v18.16b, v18.16b, v1.16b - ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] - ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] - - ushr v18.2d, v0.2d, #1 // 2nd phase - eor v2.16b, v2.16b,v0.16b - eor v0.16b, v0.16b,v18.16b // - ushr v18.2d, v18.2d, #6 - ushr v0.2d, v0.2d, #1 // - eor v0.16b, v0.16b, v2.16b // - eor v0.16b, v0.16b, v18.16b // - - subs x3, x3, #16 - bne Loop_neon - - rev64 v0.16b, v0.16b // byteswap Xi and write - ext v0.16b, v0.16b, v0.16b, #8 - st1 {v0.16b}, [x0] - - ret - - -.section __TEXT,__const -.align 4 -Lmasks: -.quad 0x0000ffffffffffff // k48 -.quad 0x00000000ffffffff // k32 -.quad 0x000000000000ffff // k16 -.quad 0x0000000000000000 // k0 -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S deleted file mode 100644 index be0e283c36..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S +++ /dev/null @@ -1,246 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text - -.globl _gcm_init_v8 -.private_extern _gcm_init_v8 - -.align 4 -_gcm_init_v8: - ld1 {v17.2d},[x1] //load input H - movi v19.16b,#0xe1 - shl v19.2d,v19.2d,#57 //0xc2.0 - ext v3.16b,v17.16b,v17.16b,#8 - ushr v18.2d,v19.2d,#63 - dup v17.4s,v17.s[1] - ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 - ushr v18.2d,v3.2d,#63 - sshr v17.4s,v17.4s,#31 //broadcast carry bit - and v18.16b,v18.16b,v16.16b - shl v3.2d,v3.2d,#1 - ext v18.16b,v18.16b,v18.16b,#8 - and v16.16b,v16.16b,v17.16b - orr v3.16b,v3.16b,v18.16b //H<<<=1 - eor v20.16b,v3.16b,v16.16b //twisted H - st1 {v20.2d},[x0],#16 //store Htable[0] - - //calculate H^2 - ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing - pmull v0.1q,v20.1d,v20.1d - eor v16.16b,v16.16b,v20.16b - pmull2 v2.1q,v20.2d,v20.2d - pmull v1.1q,v16.1d,v16.1d - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase - - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - eor v0.16b,v1.16b,v18.16b - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase - pmull v0.1q,v0.1d,v19.1d - eor v18.16b,v18.16b,v2.16b - eor v22.16b,v0.16b,v18.16b - - ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing - eor v17.16b,v17.16b,v22.16b - ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed - st1 {v21.2d,v22.2d},[x0] //store Htable[1..2] - - ret - -.globl _gcm_gmult_v8 -.private_extern _gcm_gmult_v8 - -.align 4 -_gcm_gmult_v8: - ld1 {v17.2d},[x0] //load Xi - movi v19.16b,#0xe1 - ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... - shl v19.2d,v19.2d,#57 -#ifndef __ARMEB__ - rev64 v17.16b,v17.16b -#endif - ext v3.16b,v17.16b,v17.16b,#8 - - pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo - eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing - pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi - pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase of reduction - - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - eor v0.16b,v1.16b,v18.16b - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction - pmull v0.1q,v0.1d,v19.1d - eor v18.16b,v18.16b,v2.16b - eor v0.16b,v0.16b,v18.16b - -#ifndef __ARMEB__ - rev64 v0.16b,v0.16b -#endif - ext v0.16b,v0.16b,v0.16b,#8 - st1 {v0.2d},[x0] //write out Xi - - ret - -.globl _gcm_ghash_v8 -.private_extern _gcm_ghash_v8 - -.align 4 -_gcm_ghash_v8: - ld1 {v0.2d},[x0] //load [rotated] Xi - //"[rotated]" means that - //loaded value would have - //to be rotated in order to - //make it appear as in - //algorithm specification - subs x3,x3,#32 //see if x3 is 32 or larger - mov x12,#16 //x12 is used as post- - //increment for input pointer; - //as loop is modulo-scheduled - //x12 is zeroed just in time - //to preclude overstepping - //inp[len], which means that - //last block[s] are actually - //loaded twice, but last - //copy is not processed - ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2 - movi v19.16b,#0xe1 - ld1 {v22.2d},[x1] - csel x12,xzr,x12,eq //is it time to zero x12? - ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi - ld1 {v16.2d},[x2],#16 //load [rotated] I[0] - shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant -#ifndef __ARMEB__ - rev64 v16.16b,v16.16b - rev64 v0.16b,v0.16b -#endif - ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0] - b.lo Lodd_tail_v8 //x3 was less than 32 - ld1 {v17.2d},[x2],x12 //load [rotated] I[1] -#ifndef __ARMEB__ - rev64 v17.16b,v17.16b -#endif - ext v7.16b,v17.16b,v17.16b,#8 - eor v3.16b,v3.16b,v0.16b //I[i]^=Xi - pmull v4.1q,v20.1d,v7.1d //H·Ii+1 - eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing - pmull2 v6.1q,v20.2d,v7.2d - b Loop_mod2x_v8 - -.align 4 -Loop_mod2x_v8: - ext v18.16b,v3.16b,v3.16b,#8 - subs x3,x3,#32 //is there more data? - pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo - csel x12,xzr,x12,lo //is it time to zero x12? - - pmull v5.1q,v21.1d,v17.1d - eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing - pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi - eor v0.16b,v0.16b,v4.16b //accumulate - pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) - ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] - - eor v2.16b,v2.16b,v6.16b - csel x12,xzr,x12,eq //is it time to zero x12? - eor v1.16b,v1.16b,v5.16b - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3] -#ifndef __ARMEB__ - rev64 v16.16b,v16.16b -#endif - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase of reduction - -#ifndef __ARMEB__ - rev64 v17.16b,v17.16b -#endif - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - ext v7.16b,v17.16b,v17.16b,#8 - ext v3.16b,v16.16b,v16.16b,#8 - eor v0.16b,v1.16b,v18.16b - pmull v4.1q,v20.1d,v7.1d //H·Ii+1 - eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction - pmull v0.1q,v0.1d,v19.1d - eor v3.16b,v3.16b,v18.16b - eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing - eor v3.16b,v3.16b,v0.16b - pmull2 v6.1q,v20.2d,v7.2d - b.hs Loop_mod2x_v8 //there was at least 32 more bytes - - eor v2.16b,v2.16b,v18.16b - ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b - adds x3,x3,#32 //re-construct x3 - eor v0.16b,v0.16b,v2.16b //re-construct v0.16b - b.eq Ldone_v8 //is x3 zero? -Lodd_tail_v8: - ext v18.16b,v0.16b,v0.16b,#8 - eor v3.16b,v3.16b,v0.16b //inp^=Xi - eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi - - pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo - eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing - pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi - pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase of reduction - - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - eor v0.16b,v1.16b,v18.16b - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction - pmull v0.1q,v0.1d,v19.1d - eor v18.16b,v18.16b,v2.16b - eor v0.16b,v0.16b,v18.16b - -Ldone_v8: -#ifndef __ARMEB__ - rev64 v0.16b,v0.16b -#endif - ext v0.16b,v0.16b,v0.16b,#8 - st1 {v0.2d},[x0] //write out Xi - - ret - -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha1-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha1-armv8.S deleted file mode 100644 index 379107efbf..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha1-armv8.S +++ /dev/null @@ -1,1232 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text - - -.globl _sha1_block_data_order -.private_extern _sha1_block_data_order - -.align 6 -_sha1_block_data_order: -#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 - adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P -#else - adrp x16,_OPENSSL_armcap_P@PAGE -#endif - ldr w16,[x16,_OPENSSL_armcap_P@PAGEOFF] - tst w16,#ARMV8_SHA1 - b.ne Lv8_entry - - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - - ldp w20,w21,[x0] - ldp w22,w23,[x0,#8] - ldr w24,[x0,#16] - -Loop: - ldr x3,[x1],#64 - movz w28,#0x7999 - sub x2,x2,#1 - movk w28,#0x5a82,lsl#16 -#ifdef __ARMEB__ - ror x3,x3,#32 -#else - rev32 x3,x3 -#endif - add w24,w24,w28 // warm it up - add w24,w24,w3 - lsr x4,x3,#32 - ldr x5,[x1,#-56] - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w4 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x5,x5,#32 -#else - rev32 x5,x5 -#endif - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w5 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - lsr x6,x5,#32 - ldr x7,[x1,#-48] - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w6 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x7,x7,#32 -#else - rev32 x7,x7 -#endif - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w7 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - lsr x8,x7,#32 - ldr x9,[x1,#-40] - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - add w24,w24,w8 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x9,x9,#32 -#else - rev32 x9,x9 -#endif - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w9 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - lsr x10,x9,#32 - ldr x11,[x1,#-32] - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w10 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x11,x11,#32 -#else - rev32 x11,x11 -#endif - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w11 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - lsr x12,x11,#32 - ldr x13,[x1,#-24] - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w12 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x13,x13,#32 -#else - rev32 x13,x13 -#endif - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - add w24,w24,w13 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - lsr x14,x13,#32 - ldr x15,[x1,#-16] - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w14 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x15,x15,#32 -#else - rev32 x15,x15 -#endif - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w15 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - lsr x16,x15,#32 - ldr x17,[x1,#-8] - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w16 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x17,x17,#32 -#else - rev32 x17,x17 -#endif - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w17 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - lsr x19,x17,#32 - eor w3,w3,w5 - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - eor w3,w3,w11 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - eor w3,w3,w16 - ror w22,w22,#2 - add w24,w24,w19 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - eor w4,w4,w12 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - eor w4,w4,w17 - ror w21,w21,#2 - add w23,w23,w3 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - eor w5,w5,w13 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - eor w5,w5,w19 - ror w20,w20,#2 - add w22,w22,w4 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - eor w6,w6,w14 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - eor w6,w6,w3 - ror w24,w24,#2 - add w21,w21,w5 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - eor w7,w7,w15 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - eor w7,w7,w4 - ror w23,w23,#2 - add w20,w20,w6 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w7,w7,#31 - movz w28,#0xeba1 - movk w28,#0x6ed9,lsl#16 - eor w8,w8,w10 - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - eor w8,w8,w16 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - eor w8,w8,w5 - ror w22,w22,#2 - add w24,w24,w7 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w9,w9,w6 - add w23,w23,w8 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w10,w10,w7 - add w22,w22,w9 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w11,w11,w8 - add w21,w21,w10 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w11,w11,#31 - eor w12,w12,w14 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w12,w12,w9 - add w20,w20,w11 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w12,w12,#31 - eor w13,w13,w15 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w13,w13,w5 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w13,w13,w10 - add w24,w24,w12 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w13,w13,#31 - eor w14,w14,w16 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w14,w14,w6 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w14,w14,w11 - add w23,w23,w13 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w14,w14,#31 - eor w15,w15,w17 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w15,w15,w7 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w15,w15,w12 - add w22,w22,w14 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w15,w15,#31 - eor w16,w16,w19 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w16,w16,w8 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w16,w16,w13 - add w21,w21,w15 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w17,w17,w14 - add w20,w20,w16 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w19,w19,w15 - add w24,w24,w17 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w19,w19,#31 - eor w3,w3,w5 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w3,w3,w11 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w3,w3,w16 - add w23,w23,w19 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w4,w4,w12 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w4,w4,w17 - add w22,w22,w3 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w5,w5,w13 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w5,w5,w19 - add w21,w21,w4 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w6,w6,w14 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w6,w6,w3 - add w20,w20,w5 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w7,w7,w15 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w7,w7,w4 - add w24,w24,w6 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w7,w7,#31 - eor w8,w8,w10 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w8,w8,w16 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w8,w8,w5 - add w23,w23,w7 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w9,w9,w6 - add w22,w22,w8 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w10,w10,w7 - add w21,w21,w9 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w11,w11,w8 - add w20,w20,w10 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w11,w11,#31 - movz w28,#0xbcdc - movk w28,#0x8f1b,lsl#16 - eor w12,w12,w14 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w12,w12,w9 - add w24,w24,w11 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w12,w12,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w13,w13,w15 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w13,w13,w5 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w13,w13,w10 - add w23,w23,w12 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w13,w13,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w14,w14,w16 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w14,w14,w6 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w14,w14,w11 - add w22,w22,w13 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w14,w14,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w15,w15,w17 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w15,w15,w7 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w15,w15,w12 - add w21,w21,w14 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w15,w15,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w16,w16,w19 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w16,w16,w8 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w16,w16,w13 - add w20,w20,w15 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w16,w16,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w17,w17,w3 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w17,w17,w9 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w17,w17,w14 - add w24,w24,w16 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w17,w17,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w19,w19,w4 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w19,w19,w10 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w19,w19,w15 - add w23,w23,w17 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w19,w19,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w3,w3,w5 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w3,w3,w11 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w3,w3,w16 - add w22,w22,w19 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w3,w3,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w4,w4,w6 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w4,w4,w12 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w4,w4,w17 - add w21,w21,w3 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w4,w4,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w5,w5,w7 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w5,w5,w13 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w5,w5,w19 - add w20,w20,w4 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w5,w5,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w6,w6,w8 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w6,w6,w14 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w6,w6,w3 - add w24,w24,w5 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w6,w6,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w7,w7,w9 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w7,w7,w15 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w7,w7,w4 - add w23,w23,w6 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w7,w7,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w8,w8,w10 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w8,w8,w16 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w8,w8,w5 - add w22,w22,w7 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w8,w8,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w9,w9,w11 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w9,w9,w17 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w9,w9,w6 - add w21,w21,w8 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w9,w9,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w10,w10,w12 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w10,w10,w19 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w10,w10,w7 - add w20,w20,w9 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w10,w10,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w11,w11,w13 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w11,w11,w3 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w11,w11,w8 - add w24,w24,w10 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w11,w11,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w12,w12,w14 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w12,w12,w4 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w12,w12,w9 - add w23,w23,w11 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w12,w12,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w13,w13,w15 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w13,w13,w5 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w13,w13,w10 - add w22,w22,w12 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w13,w13,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w14,w14,w16 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w14,w14,w6 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w14,w14,w11 - add w21,w21,w13 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w14,w14,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w15,w15,w17 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w15,w15,w7 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w15,w15,w12 - add w20,w20,w14 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w15,w15,#31 - movz w28,#0xc1d6 - movk w28,#0xca62,lsl#16 - orr w25,w22,w23 - and w26,w22,w23 - eor w16,w16,w19 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w16,w16,w8 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w16,w16,w13 - add w24,w24,w15 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w17,w17,w14 - add w23,w23,w16 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w19,w19,w15 - add w22,w22,w17 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w19,w19,#31 - eor w3,w3,w5 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w3,w3,w11 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w3,w3,w16 - add w21,w21,w19 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w4,w4,w12 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w4,w4,w17 - add w20,w20,w3 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w5,w5,w13 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w5,w5,w19 - add w24,w24,w4 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w6,w6,w14 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w6,w6,w3 - add w23,w23,w5 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w7,w7,w15 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w7,w7,w4 - add w22,w22,w6 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w7,w7,#31 - eor w8,w8,w10 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w8,w8,w16 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w8,w8,w5 - add w21,w21,w7 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w9,w9,w6 - add w20,w20,w8 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w10,w10,w7 - add w24,w24,w9 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w11,w11,w8 - add w23,w23,w10 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w11,w11,#31 - eor w12,w12,w14 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w12,w12,w9 - add w22,w22,w11 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w12,w12,#31 - eor w13,w13,w15 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w13,w13,w5 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w13,w13,w10 - add w21,w21,w12 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w13,w13,#31 - eor w14,w14,w16 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w14,w14,w6 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w14,w14,w11 - add w20,w20,w13 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w14,w14,#31 - eor w15,w15,w17 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w15,w15,w7 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w15,w15,w12 - add w24,w24,w14 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w15,w15,#31 - eor w16,w16,w19 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w16,w16,w8 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w16,w16,w13 - add w23,w23,w15 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w17,w17,w14 - add w22,w22,w16 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w19,w19,w15 - add w21,w21,w17 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w19,w19,#31 - ldp w4,w5,[x0] - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w19 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ldp w6,w7,[x0,#8] - eor w25,w24,w22 - ror w27,w21,#27 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - ldr w8,[x0,#16] - add w20,w20,w25 // e+=F(b,c,d) - add w21,w21,w5 - add w22,w22,w6 - add w20,w20,w4 - add w23,w23,w7 - add w24,w24,w8 - stp w20,w21,[x0] - stp w22,w23,[x0,#8] - str w24,[x0,#16] - cbnz x2,Loop - - ldp x19,x20,[sp,#16] - ldp x21,x22,[sp,#32] - ldp x23,x24,[sp,#48] - ldp x25,x26,[sp,#64] - ldp x27,x28,[sp,#80] - ldr x29,[sp],#96 - ret - - -.align 6 -sha1_block_armv8: -Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - adrp x4,Lconst@PAGE - add x4,x4,Lconst@PAGEOFF - eor v1.16b,v1.16b,v1.16b - ld1 {v0.4s},[x0],#16 - ld1 {v1.s}[0],[x0] - sub x0,x0,#16 - ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4] - -Loop_hw: - ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 - sub x2,x2,#1 - rev32 v4.16b,v4.16b - rev32 v5.16b,v5.16b - - add v20.4s,v16.4s,v4.4s - rev32 v6.16b,v6.16b - orr v22.16b,v0.16b,v0.16b // offload - - add v21.4s,v16.4s,v5.4s - rev32 v7.16b,v7.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b -.long 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 - add v20.4s,v16.4s,v6.4s -.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 1 -.long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s - add v21.4s,v16.4s,v7.4s -.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 2 -.long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s - add v20.4s,v16.4s,v4.4s -.long 0x5e281885 //sha1su1 v5.16b,v4.16b -.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 3 -.long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v5.4s -.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 4 -.long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s - add v20.4s,v17.4s,v6.4s -.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 5 -.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v7.4s -.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 6 -.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v17.4s,v4.4s -.long 0x5e281885 //sha1su1 v5.16b,v4.16b -.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 7 -.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v5.4s -.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 8 -.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v6.4s -.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 9 -.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v18.4s,v7.4s -.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 10 -.long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v4.4s -.long 0x5e281885 //sha1su1 v5.16b,v4.16b -.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 11 -.long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s - add v21.4s,v18.4s,v5.4s -.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 12 -.long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v6.4s -.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 13 -.long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v7.4s -.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 14 -.long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v19.4s,v4.4s -.long 0x5e281885 //sha1su1 v5.16b,v4.16b -.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 15 -.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v5.4s -.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.long 0x5e280803 //sha1h v3.16b,v0.16b // 16 -.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v19.4s,v6.4s -.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.long 0x5e280802 //sha1h v2.16b,v0.16b // 17 -.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v7.4s - -.long 0x5e280803 //sha1h v3.16b,v0.16b // 18 -.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - -.long 0x5e280802 //sha1h v2.16b,v0.16b // 19 -.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - - add v1.4s,v1.4s,v2.4s - add v0.4s,v0.4s,v22.4s - - cbnz x2,Loop_hw - - st1 {v0.4s},[x0],#16 - st1 {v1.s}[0],[x0] - - ldr x29,[sp],#16 - ret - -.section __TEXT,__const -.align 6 -Lconst: -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -.comm _OPENSSL_armcap_P,4,4 -.private_extern _OPENSSL_armcap_P -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha256-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha256-armv8.S deleted file mode 100644 index d6fa5a930d..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha256-armv8.S +++ /dev/null @@ -1,1210 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. -// -// Licensed under the OpenSSL license (the "License"). You may not use -// this file except in compliance with the License. You can obtain a copy -// in the file LICENSE in the source distribution or at -// https://www.openssl.org/source/license.html - -// ==================================================================== -// Written by Andy Polyakov for the OpenSSL -// project. The module is, however, dual licensed under OpenSSL and -// CRYPTOGAMS licenses depending on where you obtain it. For further -// details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. -// ==================================================================== -// -// SHA256/512 for ARMv8. -// -// Performance in cycles per processed byte and improvement coefficient -// over code generated with "default" compiler: -// -// SHA256-hw SHA256(*) SHA512 -// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) -// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) -// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) -// Denver 2.01 10.5 (+26%) 6.70 (+8%) -// X-Gene 20.0 (+100%) 12.8 (+300%(***)) -// Mongoose 2.36 13.0 (+50%) 8.36 (+33%) -// -// (*) Software SHA256 results are of lesser relevance, presented -// mostly for informational purposes. -// (**) The result is a trade-off: it's possible to improve it by -// 10% (or by 1 cycle per round), but at the cost of 20% loss -// on Cortex-A53 (or by 4 cycles per round). -// (***) Super-impressive coefficients over gcc-generated code are -// indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster -// and the gap is only 40-90%. - -#ifndef __KERNEL__ -# include -#endif - -.text - - -.globl _sha256_block_data_order -.private_extern _sha256_block_data_order - -.align 6 -_sha256_block_data_order: -#ifndef __KERNEL__ -#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 - adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P -#else - adrp x16,_OPENSSL_armcap_P@PAGE -#endif - ldr w16,[x16,_OPENSSL_armcap_P@PAGEOFF] - tst w16,#ARMV8_SHA256 - b.ne Lv8_entry -#endif - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*4 - - ldp w20,w21,[x0] // load context - ldp w22,w23,[x0,#2*4] - ldp w24,w25,[x0,#4*4] - add x2,x1,x2,lsl#6 // end of input - ldp w26,w27,[x0,#6*4] - adrp x30,LK256@PAGE - add x30,x30,LK256@PAGEOFF - stp x0,x2,[x29,#96] - -Loop: - ldp w3,w4,[x1],#2*4 - ldr w19,[x30],#4 // *K++ - eor w28,w21,w22 // magic seed - str x1,[x29,#112] -#ifndef __ARMEB__ - rev w3,w3 // 0 -#endif - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - eor w6,w24,w24,ror#14 - and w17,w25,w24 - bic w19,w26,w24 - add w27,w27,w3 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w6,ror#11 // Sigma1(e) - ror w6,w20,#2 - add w27,w27,w17 // h+=Ch(e,f,g) - eor w17,w20,w20,ror#9 - add w27,w27,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w23,w23,w27 // d+=h - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w6,w17,ror#13 // Sigma0(a) - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w4,w4 // 1 -#endif - ldp w5,w6,[x1],#2*4 - add w27,w27,w17 // h+=Sigma0(a) - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - eor w7,w23,w23,ror#14 - and w17,w24,w23 - bic w28,w25,w23 - add w26,w26,w4 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w7,ror#11 // Sigma1(e) - ror w7,w27,#2 - add w26,w26,w17 // h+=Ch(e,f,g) - eor w17,w27,w27,ror#9 - add w26,w26,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w22,w22,w26 // d+=h - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w7,w17,ror#13 // Sigma0(a) - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w5,w5 // 2 -#endif - add w26,w26,w17 // h+=Sigma0(a) - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - eor w8,w22,w22,ror#14 - and w17,w23,w22 - bic w19,w24,w22 - add w25,w25,w5 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w8,ror#11 // Sigma1(e) - ror w8,w26,#2 - add w25,w25,w17 // h+=Ch(e,f,g) - eor w17,w26,w26,ror#9 - add w25,w25,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w21,w21,w25 // d+=h - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w8,w17,ror#13 // Sigma0(a) - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w6,w6 // 3 -#endif - ldp w7,w8,[x1],#2*4 - add w25,w25,w17 // h+=Sigma0(a) - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - eor w9,w21,w21,ror#14 - and w17,w22,w21 - bic w28,w23,w21 - add w24,w24,w6 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w9,ror#11 // Sigma1(e) - ror w9,w25,#2 - add w24,w24,w17 // h+=Ch(e,f,g) - eor w17,w25,w25,ror#9 - add w24,w24,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w20,w20,w24 // d+=h - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w9,w17,ror#13 // Sigma0(a) - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w7,w7 // 4 -#endif - add w24,w24,w17 // h+=Sigma0(a) - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - eor w10,w20,w20,ror#14 - and w17,w21,w20 - bic w19,w22,w20 - add w23,w23,w7 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w10,ror#11 // Sigma1(e) - ror w10,w24,#2 - add w23,w23,w17 // h+=Ch(e,f,g) - eor w17,w24,w24,ror#9 - add w23,w23,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w27,w27,w23 // d+=h - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w10,w17,ror#13 // Sigma0(a) - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w8,w8 // 5 -#endif - ldp w9,w10,[x1],#2*4 - add w23,w23,w17 // h+=Sigma0(a) - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - eor w11,w27,w27,ror#14 - and w17,w20,w27 - bic w28,w21,w27 - add w22,w22,w8 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w11,ror#11 // Sigma1(e) - ror w11,w23,#2 - add w22,w22,w17 // h+=Ch(e,f,g) - eor w17,w23,w23,ror#9 - add w22,w22,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w26,w26,w22 // d+=h - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w11,w17,ror#13 // Sigma0(a) - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w9,w9 // 6 -#endif - add w22,w22,w17 // h+=Sigma0(a) - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - eor w12,w26,w26,ror#14 - and w17,w27,w26 - bic w19,w20,w26 - add w21,w21,w9 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w12,ror#11 // Sigma1(e) - ror w12,w22,#2 - add w21,w21,w17 // h+=Ch(e,f,g) - eor w17,w22,w22,ror#9 - add w21,w21,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w25,w25,w21 // d+=h - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w12,w17,ror#13 // Sigma0(a) - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w10,w10 // 7 -#endif - ldp w11,w12,[x1],#2*4 - add w21,w21,w17 // h+=Sigma0(a) - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - eor w13,w25,w25,ror#14 - and w17,w26,w25 - bic w28,w27,w25 - add w20,w20,w10 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w13,ror#11 // Sigma1(e) - ror w13,w21,#2 - add w20,w20,w17 // h+=Ch(e,f,g) - eor w17,w21,w21,ror#9 - add w20,w20,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w24,w24,w20 // d+=h - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w13,w17,ror#13 // Sigma0(a) - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w20,w20,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w11,w11 // 8 -#endif - add w20,w20,w17 // h+=Sigma0(a) - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - eor w14,w24,w24,ror#14 - and w17,w25,w24 - bic w19,w26,w24 - add w27,w27,w11 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w14,ror#11 // Sigma1(e) - ror w14,w20,#2 - add w27,w27,w17 // h+=Ch(e,f,g) - eor w17,w20,w20,ror#9 - add w27,w27,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w23,w23,w27 // d+=h - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w14,w17,ror#13 // Sigma0(a) - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w12,w12 // 9 -#endif - ldp w13,w14,[x1],#2*4 - add w27,w27,w17 // h+=Sigma0(a) - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - eor w15,w23,w23,ror#14 - and w17,w24,w23 - bic w28,w25,w23 - add w26,w26,w12 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w15,ror#11 // Sigma1(e) - ror w15,w27,#2 - add w26,w26,w17 // h+=Ch(e,f,g) - eor w17,w27,w27,ror#9 - add w26,w26,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w22,w22,w26 // d+=h - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w15,w17,ror#13 // Sigma0(a) - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w13,w13 // 10 -#endif - add w26,w26,w17 // h+=Sigma0(a) - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - eor w0,w22,w22,ror#14 - and w17,w23,w22 - bic w19,w24,w22 - add w25,w25,w13 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w0,ror#11 // Sigma1(e) - ror w0,w26,#2 - add w25,w25,w17 // h+=Ch(e,f,g) - eor w17,w26,w26,ror#9 - add w25,w25,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w21,w21,w25 // d+=h - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w0,w17,ror#13 // Sigma0(a) - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w14,w14 // 11 -#endif - ldp w15,w0,[x1],#2*4 - add w25,w25,w17 // h+=Sigma0(a) - str w6,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - eor w6,w21,w21,ror#14 - and w17,w22,w21 - bic w28,w23,w21 - add w24,w24,w14 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w6,ror#11 // Sigma1(e) - ror w6,w25,#2 - add w24,w24,w17 // h+=Ch(e,f,g) - eor w17,w25,w25,ror#9 - add w24,w24,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w20,w20,w24 // d+=h - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w6,w17,ror#13 // Sigma0(a) - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w15,w15 // 12 -#endif - add w24,w24,w17 // h+=Sigma0(a) - str w7,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - eor w7,w20,w20,ror#14 - and w17,w21,w20 - bic w19,w22,w20 - add w23,w23,w15 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w7,ror#11 // Sigma1(e) - ror w7,w24,#2 - add w23,w23,w17 // h+=Ch(e,f,g) - eor w17,w24,w24,ror#9 - add w23,w23,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w27,w27,w23 // d+=h - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w7,w17,ror#13 // Sigma0(a) - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w0,w0 // 13 -#endif - ldp w1,w2,[x1] - add w23,w23,w17 // h+=Sigma0(a) - str w8,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - eor w8,w27,w27,ror#14 - and w17,w20,w27 - bic w28,w21,w27 - add w22,w22,w0 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w8,ror#11 // Sigma1(e) - ror w8,w23,#2 - add w22,w22,w17 // h+=Ch(e,f,g) - eor w17,w23,w23,ror#9 - add w22,w22,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w26,w26,w22 // d+=h - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w8,w17,ror#13 // Sigma0(a) - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w1,w1 // 14 -#endif - ldr w6,[sp,#12] - add w22,w22,w17 // h+=Sigma0(a) - str w9,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - eor w9,w26,w26,ror#14 - and w17,w27,w26 - bic w19,w20,w26 - add w21,w21,w1 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w9,ror#11 // Sigma1(e) - ror w9,w22,#2 - add w21,w21,w17 // h+=Ch(e,f,g) - eor w17,w22,w22,ror#9 - add w21,w21,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w25,w25,w21 // d+=h - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w9,w17,ror#13 // Sigma0(a) - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w2,w2 // 15 -#endif - ldr w7,[sp,#0] - add w21,w21,w17 // h+=Sigma0(a) - str w10,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w9,w4,#7 - and w17,w26,w25 - ror w8,w1,#17 - bic w28,w27,w25 - ror w10,w21,#2 - add w20,w20,w2 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w9,w9,w4,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w10,w10,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w8,w8,w1,ror#19 - eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w10,w21,ror#22 // Sigma0(a) - eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) - add w3,w3,w12 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w3,w3,w9 - add w20,w20,w17 // h+=Sigma0(a) - add w3,w3,w8 -Loop_16_xx: - ldr w8,[sp,#4] - str w11,[sp,#0] - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - ror w10,w5,#7 - and w17,w25,w24 - ror w9,w2,#17 - bic w19,w26,w24 - ror w11,w20,#2 - add w27,w27,w3 // h+=X[i] - eor w16,w16,w24,ror#11 - eor w10,w10,w5,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w24,ror#25 // Sigma1(e) - eor w11,w11,w20,ror#13 - add w27,w27,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w9,w9,w2,ror#19 - eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) - add w27,w27,w16 // h+=Sigma1(e) - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w11,w20,ror#22 // Sigma0(a) - eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) - add w4,w4,w13 - add w23,w23,w27 // d+=h - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w4,w4,w10 - add w27,w27,w17 // h+=Sigma0(a) - add w4,w4,w9 - ldr w9,[sp,#8] - str w12,[sp,#4] - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - ror w11,w6,#7 - and w17,w24,w23 - ror w10,w3,#17 - bic w28,w25,w23 - ror w12,w27,#2 - add w26,w26,w4 // h+=X[i] - eor w16,w16,w23,ror#11 - eor w11,w11,w6,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w23,ror#25 // Sigma1(e) - eor w12,w12,w27,ror#13 - add w26,w26,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w10,w10,w3,ror#19 - eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) - add w26,w26,w16 // h+=Sigma1(e) - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w12,w27,ror#22 // Sigma0(a) - eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) - add w5,w5,w14 - add w22,w22,w26 // d+=h - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w5,w5,w11 - add w26,w26,w17 // h+=Sigma0(a) - add w5,w5,w10 - ldr w10,[sp,#12] - str w13,[sp,#8] - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - ror w12,w7,#7 - and w17,w23,w22 - ror w11,w4,#17 - bic w19,w24,w22 - ror w13,w26,#2 - add w25,w25,w5 // h+=X[i] - eor w16,w16,w22,ror#11 - eor w12,w12,w7,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w22,ror#25 // Sigma1(e) - eor w13,w13,w26,ror#13 - add w25,w25,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w11,w11,w4,ror#19 - eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) - add w25,w25,w16 // h+=Sigma1(e) - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w13,w26,ror#22 // Sigma0(a) - eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) - add w6,w6,w15 - add w21,w21,w25 // d+=h - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w6,w6,w12 - add w25,w25,w17 // h+=Sigma0(a) - add w6,w6,w11 - ldr w11,[sp,#0] - str w14,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - ror w13,w8,#7 - and w17,w22,w21 - ror w12,w5,#17 - bic w28,w23,w21 - ror w14,w25,#2 - add w24,w24,w6 // h+=X[i] - eor w16,w16,w21,ror#11 - eor w13,w13,w8,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w21,ror#25 // Sigma1(e) - eor w14,w14,w25,ror#13 - add w24,w24,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w12,w12,w5,ror#19 - eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) - add w24,w24,w16 // h+=Sigma1(e) - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w14,w25,ror#22 // Sigma0(a) - eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) - add w7,w7,w0 - add w20,w20,w24 // d+=h - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w7,w7,w13 - add w24,w24,w17 // h+=Sigma0(a) - add w7,w7,w12 - ldr w12,[sp,#4] - str w15,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - ror w14,w9,#7 - and w17,w21,w20 - ror w13,w6,#17 - bic w19,w22,w20 - ror w15,w24,#2 - add w23,w23,w7 // h+=X[i] - eor w16,w16,w20,ror#11 - eor w14,w14,w9,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w20,ror#25 // Sigma1(e) - eor w15,w15,w24,ror#13 - add w23,w23,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w13,w13,w6,ror#19 - eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) - add w23,w23,w16 // h+=Sigma1(e) - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w15,w24,ror#22 // Sigma0(a) - eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) - add w8,w8,w1 - add w27,w27,w23 // d+=h - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w8,w8,w14 - add w23,w23,w17 // h+=Sigma0(a) - add w8,w8,w13 - ldr w13,[sp,#8] - str w0,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - ror w15,w10,#7 - and w17,w20,w27 - ror w14,w7,#17 - bic w28,w21,w27 - ror w0,w23,#2 - add w22,w22,w8 // h+=X[i] - eor w16,w16,w27,ror#11 - eor w15,w15,w10,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w27,ror#25 // Sigma1(e) - eor w0,w0,w23,ror#13 - add w22,w22,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w14,w14,w7,ror#19 - eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) - add w22,w22,w16 // h+=Sigma1(e) - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w0,w23,ror#22 // Sigma0(a) - eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) - add w9,w9,w2 - add w26,w26,w22 // d+=h - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w9,w9,w15 - add w22,w22,w17 // h+=Sigma0(a) - add w9,w9,w14 - ldr w14,[sp,#12] - str w1,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - ror w0,w11,#7 - and w17,w27,w26 - ror w15,w8,#17 - bic w19,w20,w26 - ror w1,w22,#2 - add w21,w21,w9 // h+=X[i] - eor w16,w16,w26,ror#11 - eor w0,w0,w11,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w26,ror#25 // Sigma1(e) - eor w1,w1,w22,ror#13 - add w21,w21,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w15,w15,w8,ror#19 - eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) - add w21,w21,w16 // h+=Sigma1(e) - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w1,w22,ror#22 // Sigma0(a) - eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) - add w10,w10,w3 - add w25,w25,w21 // d+=h - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w10,w10,w0 - add w21,w21,w17 // h+=Sigma0(a) - add w10,w10,w15 - ldr w15,[sp,#0] - str w2,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w1,w12,#7 - and w17,w26,w25 - ror w0,w9,#17 - bic w28,w27,w25 - ror w2,w21,#2 - add w20,w20,w10 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w1,w1,w12,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w2,w2,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w0,w0,w9,ror#19 - eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w2,w21,ror#22 // Sigma0(a) - eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) - add w11,w11,w4 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w11,w11,w1 - add w20,w20,w17 // h+=Sigma0(a) - add w11,w11,w0 - ldr w0,[sp,#4] - str w3,[sp,#0] - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - ror w2,w13,#7 - and w17,w25,w24 - ror w1,w10,#17 - bic w19,w26,w24 - ror w3,w20,#2 - add w27,w27,w11 // h+=X[i] - eor w16,w16,w24,ror#11 - eor w2,w2,w13,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w24,ror#25 // Sigma1(e) - eor w3,w3,w20,ror#13 - add w27,w27,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w1,w1,w10,ror#19 - eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) - add w27,w27,w16 // h+=Sigma1(e) - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w3,w20,ror#22 // Sigma0(a) - eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) - add w12,w12,w5 - add w23,w23,w27 // d+=h - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w12,w12,w2 - add w27,w27,w17 // h+=Sigma0(a) - add w12,w12,w1 - ldr w1,[sp,#8] - str w4,[sp,#4] - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - ror w3,w14,#7 - and w17,w24,w23 - ror w2,w11,#17 - bic w28,w25,w23 - ror w4,w27,#2 - add w26,w26,w12 // h+=X[i] - eor w16,w16,w23,ror#11 - eor w3,w3,w14,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w23,ror#25 // Sigma1(e) - eor w4,w4,w27,ror#13 - add w26,w26,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w2,w2,w11,ror#19 - eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) - add w26,w26,w16 // h+=Sigma1(e) - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w4,w27,ror#22 // Sigma0(a) - eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) - add w13,w13,w6 - add w22,w22,w26 // d+=h - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w13,w13,w3 - add w26,w26,w17 // h+=Sigma0(a) - add w13,w13,w2 - ldr w2,[sp,#12] - str w5,[sp,#8] - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - ror w4,w15,#7 - and w17,w23,w22 - ror w3,w12,#17 - bic w19,w24,w22 - ror w5,w26,#2 - add w25,w25,w13 // h+=X[i] - eor w16,w16,w22,ror#11 - eor w4,w4,w15,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w22,ror#25 // Sigma1(e) - eor w5,w5,w26,ror#13 - add w25,w25,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w3,w3,w12,ror#19 - eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) - add w25,w25,w16 // h+=Sigma1(e) - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w5,w26,ror#22 // Sigma0(a) - eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) - add w14,w14,w7 - add w21,w21,w25 // d+=h - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w14,w14,w4 - add w25,w25,w17 // h+=Sigma0(a) - add w14,w14,w3 - ldr w3,[sp,#0] - str w6,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - ror w5,w0,#7 - and w17,w22,w21 - ror w4,w13,#17 - bic w28,w23,w21 - ror w6,w25,#2 - add w24,w24,w14 // h+=X[i] - eor w16,w16,w21,ror#11 - eor w5,w5,w0,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w21,ror#25 // Sigma1(e) - eor w6,w6,w25,ror#13 - add w24,w24,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w4,w4,w13,ror#19 - eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) - add w24,w24,w16 // h+=Sigma1(e) - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w6,w25,ror#22 // Sigma0(a) - eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) - add w15,w15,w8 - add w20,w20,w24 // d+=h - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w15,w15,w5 - add w24,w24,w17 // h+=Sigma0(a) - add w15,w15,w4 - ldr w4,[sp,#4] - str w7,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - ror w6,w1,#7 - and w17,w21,w20 - ror w5,w14,#17 - bic w19,w22,w20 - ror w7,w24,#2 - add w23,w23,w15 // h+=X[i] - eor w16,w16,w20,ror#11 - eor w6,w6,w1,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w20,ror#25 // Sigma1(e) - eor w7,w7,w24,ror#13 - add w23,w23,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w5,w5,w14,ror#19 - eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) - add w23,w23,w16 // h+=Sigma1(e) - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w7,w24,ror#22 // Sigma0(a) - eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) - add w0,w0,w9 - add w27,w27,w23 // d+=h - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w0,w0,w6 - add w23,w23,w17 // h+=Sigma0(a) - add w0,w0,w5 - ldr w5,[sp,#8] - str w8,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - ror w7,w2,#7 - and w17,w20,w27 - ror w6,w15,#17 - bic w28,w21,w27 - ror w8,w23,#2 - add w22,w22,w0 // h+=X[i] - eor w16,w16,w27,ror#11 - eor w7,w7,w2,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w27,ror#25 // Sigma1(e) - eor w8,w8,w23,ror#13 - add w22,w22,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w6,w6,w15,ror#19 - eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) - add w22,w22,w16 // h+=Sigma1(e) - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w8,w23,ror#22 // Sigma0(a) - eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) - add w1,w1,w10 - add w26,w26,w22 // d+=h - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w1,w1,w7 - add w22,w22,w17 // h+=Sigma0(a) - add w1,w1,w6 - ldr w6,[sp,#12] - str w9,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - ror w8,w3,#7 - and w17,w27,w26 - ror w7,w0,#17 - bic w19,w20,w26 - ror w9,w22,#2 - add w21,w21,w1 // h+=X[i] - eor w16,w16,w26,ror#11 - eor w8,w8,w3,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w26,ror#25 // Sigma1(e) - eor w9,w9,w22,ror#13 - add w21,w21,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w7,w7,w0,ror#19 - eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) - add w21,w21,w16 // h+=Sigma1(e) - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w9,w22,ror#22 // Sigma0(a) - eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) - add w2,w2,w11 - add w25,w25,w21 // d+=h - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w2,w2,w8 - add w21,w21,w17 // h+=Sigma0(a) - add w2,w2,w7 - ldr w7,[sp,#0] - str w10,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w9,w4,#7 - and w17,w26,w25 - ror w8,w1,#17 - bic w28,w27,w25 - ror w10,w21,#2 - add w20,w20,w2 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w9,w9,w4,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w10,w10,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w8,w8,w1,ror#19 - eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w10,w21,ror#22 // Sigma0(a) - eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) - add w3,w3,w12 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w3,w3,w9 - add w20,w20,w17 // h+=Sigma0(a) - add w3,w3,w8 - cbnz w19,Loop_16_xx - - ldp x0,x2,[x29,#96] - ldr x1,[x29,#112] - sub x30,x30,#260 // rewind - - ldp w3,w4,[x0] - ldp w5,w6,[x0,#2*4] - add x1,x1,#14*4 // advance input pointer - ldp w7,w8,[x0,#4*4] - add w20,w20,w3 - ldp w9,w10,[x0,#6*4] - add w21,w21,w4 - add w22,w22,w5 - add w23,w23,w6 - stp w20,w21,[x0] - add w24,w24,w7 - add w25,w25,w8 - stp w22,w23,[x0,#2*4] - add w26,w26,w9 - add w27,w27,w10 - cmp x1,x2 - stp w24,w25,[x0,#4*4] - stp w26,w27,[x0,#6*4] - b.ne Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*4 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret - - -.section __TEXT,__const -.align 6 - -LK256: -.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 -.long 0 //terminator - -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -.text -#ifndef __KERNEL__ - -.align 6 -sha256_block_armv8: -Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v0.4s,v1.4s},[x0] - adrp x3,LK256@PAGE - add x3,x3,LK256@PAGEOFF - -Loop_hw: - ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 - sub x2,x2,#1 - ld1 {v16.4s},[x3],#16 - rev32 v4.16b,v4.16b - rev32 v5.16b,v5.16b - rev32 v6.16b,v6.16b - rev32 v7.16b,v7.16b - orr v18.16b,v0.16b,v0.16b // offload - orr v19.16b,v1.16b,v1.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s -.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s -.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s -.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s -.long 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s -.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s -.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s -.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s -.long 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s -.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s -.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s -.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s -.long 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - - ld1 {v17.4s},[x3] - add v16.4s,v16.4s,v6.4s - sub x3,x3,#64*4-16 // rewind - orr v2.16b,v0.16b,v0.16b -.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - - add v17.4s,v17.4s,v7.4s - orr v2.16b,v0.16b,v0.16b -.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - - add v0.4s,v0.4s,v18.4s - add v1.4s,v1.4s,v19.4s - - cbnz x2,Loop_hw - - st1 {v0.4s,v1.4s},[x0] - - ldr x29,[sp],#16 - ret - -#endif -#ifndef __KERNEL__ -.comm _OPENSSL_armcap_P,4,4 -.private_extern _OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha512-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha512-armv8.S deleted file mode 100644 index 29e122b180..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/sha512-armv8.S +++ /dev/null @@ -1,1082 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. -// -// Licensed under the OpenSSL license (the "License"). You may not use -// this file except in compliance with the License. You can obtain a copy -// in the file LICENSE in the source distribution or at -// https://www.openssl.org/source/license.html - -// ==================================================================== -// Written by Andy Polyakov for the OpenSSL -// project. The module is, however, dual licensed under OpenSSL and -// CRYPTOGAMS licenses depending on where you obtain it. For further -// details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. -// ==================================================================== -// -// SHA256/512 for ARMv8. -// -// Performance in cycles per processed byte and improvement coefficient -// over code generated with "default" compiler: -// -// SHA256-hw SHA256(*) SHA512 -// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) -// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) -// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) -// Denver 2.01 10.5 (+26%) 6.70 (+8%) -// X-Gene 20.0 (+100%) 12.8 (+300%(***)) -// Mongoose 2.36 13.0 (+50%) 8.36 (+33%) -// -// (*) Software SHA256 results are of lesser relevance, presented -// mostly for informational purposes. -// (**) The result is a trade-off: it's possible to improve it by -// 10% (or by 1 cycle per round), but at the cost of 20% loss -// on Cortex-A53 (or by 4 cycles per round). -// (***) Super-impressive coefficients over gcc-generated code are -// indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster -// and the gap is only 40-90%. - -#ifndef __KERNEL__ -# include -#endif - -.text - - -.globl _sha512_block_data_order -.private_extern _sha512_block_data_order - -.align 6 -_sha512_block_data_order: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*8 - - ldp x20,x21,[x0] // load context - ldp x22,x23,[x0,#2*8] - ldp x24,x25,[x0,#4*8] - add x2,x1,x2,lsl#7 // end of input - ldp x26,x27,[x0,#6*8] - adrp x30,LK512@PAGE - add x30,x30,LK512@PAGEOFF - stp x0,x2,[x29,#96] - -Loop: - ldp x3,x4,[x1],#2*8 - ldr x19,[x30],#8 // *K++ - eor x28,x21,x22 // magic seed - str x1,[x29,#112] -#ifndef __ARMEB__ - rev x3,x3 // 0 -#endif - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - eor x6,x24,x24,ror#23 - and x17,x25,x24 - bic x19,x26,x24 - add x27,x27,x3 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x6,ror#18 // Sigma1(e) - ror x6,x20,#28 - add x27,x27,x17 // h+=Ch(e,f,g) - eor x17,x20,x20,ror#5 - add x27,x27,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x23,x23,x27 // d+=h - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x6,x17,ror#34 // Sigma0(a) - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x4,x4 // 1 -#endif - ldp x5,x6,[x1],#2*8 - add x27,x27,x17 // h+=Sigma0(a) - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - eor x7,x23,x23,ror#23 - and x17,x24,x23 - bic x28,x25,x23 - add x26,x26,x4 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x7,ror#18 // Sigma1(e) - ror x7,x27,#28 - add x26,x26,x17 // h+=Ch(e,f,g) - eor x17,x27,x27,ror#5 - add x26,x26,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x22,x22,x26 // d+=h - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x7,x17,ror#34 // Sigma0(a) - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x5,x5 // 2 -#endif - add x26,x26,x17 // h+=Sigma0(a) - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - eor x8,x22,x22,ror#23 - and x17,x23,x22 - bic x19,x24,x22 - add x25,x25,x5 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x8,ror#18 // Sigma1(e) - ror x8,x26,#28 - add x25,x25,x17 // h+=Ch(e,f,g) - eor x17,x26,x26,ror#5 - add x25,x25,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x21,x21,x25 // d+=h - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x8,x17,ror#34 // Sigma0(a) - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x6,x6 // 3 -#endif - ldp x7,x8,[x1],#2*8 - add x25,x25,x17 // h+=Sigma0(a) - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - eor x9,x21,x21,ror#23 - and x17,x22,x21 - bic x28,x23,x21 - add x24,x24,x6 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x9,ror#18 // Sigma1(e) - ror x9,x25,#28 - add x24,x24,x17 // h+=Ch(e,f,g) - eor x17,x25,x25,ror#5 - add x24,x24,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x20,x20,x24 // d+=h - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x9,x17,ror#34 // Sigma0(a) - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x7,x7 // 4 -#endif - add x24,x24,x17 // h+=Sigma0(a) - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - eor x10,x20,x20,ror#23 - and x17,x21,x20 - bic x19,x22,x20 - add x23,x23,x7 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x10,ror#18 // Sigma1(e) - ror x10,x24,#28 - add x23,x23,x17 // h+=Ch(e,f,g) - eor x17,x24,x24,ror#5 - add x23,x23,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x27,x27,x23 // d+=h - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x10,x17,ror#34 // Sigma0(a) - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x8,x8 // 5 -#endif - ldp x9,x10,[x1],#2*8 - add x23,x23,x17 // h+=Sigma0(a) - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - eor x11,x27,x27,ror#23 - and x17,x20,x27 - bic x28,x21,x27 - add x22,x22,x8 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x11,ror#18 // Sigma1(e) - ror x11,x23,#28 - add x22,x22,x17 // h+=Ch(e,f,g) - eor x17,x23,x23,ror#5 - add x22,x22,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x26,x26,x22 // d+=h - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x11,x17,ror#34 // Sigma0(a) - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x9,x9 // 6 -#endif - add x22,x22,x17 // h+=Sigma0(a) - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - eor x12,x26,x26,ror#23 - and x17,x27,x26 - bic x19,x20,x26 - add x21,x21,x9 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x12,ror#18 // Sigma1(e) - ror x12,x22,#28 - add x21,x21,x17 // h+=Ch(e,f,g) - eor x17,x22,x22,ror#5 - add x21,x21,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x25,x25,x21 // d+=h - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x12,x17,ror#34 // Sigma0(a) - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x10,x10 // 7 -#endif - ldp x11,x12,[x1],#2*8 - add x21,x21,x17 // h+=Sigma0(a) - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - eor x13,x25,x25,ror#23 - and x17,x26,x25 - bic x28,x27,x25 - add x20,x20,x10 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x13,ror#18 // Sigma1(e) - ror x13,x21,#28 - add x20,x20,x17 // h+=Ch(e,f,g) - eor x17,x21,x21,ror#5 - add x20,x20,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x24,x24,x20 // d+=h - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x13,x17,ror#34 // Sigma0(a) - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x20,x20,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x11,x11 // 8 -#endif - add x20,x20,x17 // h+=Sigma0(a) - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - eor x14,x24,x24,ror#23 - and x17,x25,x24 - bic x19,x26,x24 - add x27,x27,x11 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x14,ror#18 // Sigma1(e) - ror x14,x20,#28 - add x27,x27,x17 // h+=Ch(e,f,g) - eor x17,x20,x20,ror#5 - add x27,x27,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x23,x23,x27 // d+=h - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x14,x17,ror#34 // Sigma0(a) - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x12,x12 // 9 -#endif - ldp x13,x14,[x1],#2*8 - add x27,x27,x17 // h+=Sigma0(a) - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - eor x15,x23,x23,ror#23 - and x17,x24,x23 - bic x28,x25,x23 - add x26,x26,x12 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x15,ror#18 // Sigma1(e) - ror x15,x27,#28 - add x26,x26,x17 // h+=Ch(e,f,g) - eor x17,x27,x27,ror#5 - add x26,x26,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x22,x22,x26 // d+=h - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x15,x17,ror#34 // Sigma0(a) - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x13,x13 // 10 -#endif - add x26,x26,x17 // h+=Sigma0(a) - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - eor x0,x22,x22,ror#23 - and x17,x23,x22 - bic x19,x24,x22 - add x25,x25,x13 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x0,ror#18 // Sigma1(e) - ror x0,x26,#28 - add x25,x25,x17 // h+=Ch(e,f,g) - eor x17,x26,x26,ror#5 - add x25,x25,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x21,x21,x25 // d+=h - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x0,x17,ror#34 // Sigma0(a) - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x14,x14 // 11 -#endif - ldp x15,x0,[x1],#2*8 - add x25,x25,x17 // h+=Sigma0(a) - str x6,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - eor x6,x21,x21,ror#23 - and x17,x22,x21 - bic x28,x23,x21 - add x24,x24,x14 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x6,ror#18 // Sigma1(e) - ror x6,x25,#28 - add x24,x24,x17 // h+=Ch(e,f,g) - eor x17,x25,x25,ror#5 - add x24,x24,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x20,x20,x24 // d+=h - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x6,x17,ror#34 // Sigma0(a) - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x15,x15 // 12 -#endif - add x24,x24,x17 // h+=Sigma0(a) - str x7,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - eor x7,x20,x20,ror#23 - and x17,x21,x20 - bic x19,x22,x20 - add x23,x23,x15 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x7,ror#18 // Sigma1(e) - ror x7,x24,#28 - add x23,x23,x17 // h+=Ch(e,f,g) - eor x17,x24,x24,ror#5 - add x23,x23,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x27,x27,x23 // d+=h - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x7,x17,ror#34 // Sigma0(a) - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x0,x0 // 13 -#endif - ldp x1,x2,[x1] - add x23,x23,x17 // h+=Sigma0(a) - str x8,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - eor x8,x27,x27,ror#23 - and x17,x20,x27 - bic x28,x21,x27 - add x22,x22,x0 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x8,ror#18 // Sigma1(e) - ror x8,x23,#28 - add x22,x22,x17 // h+=Ch(e,f,g) - eor x17,x23,x23,ror#5 - add x22,x22,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x26,x26,x22 // d+=h - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x8,x17,ror#34 // Sigma0(a) - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x1,x1 // 14 -#endif - ldr x6,[sp,#24] - add x22,x22,x17 // h+=Sigma0(a) - str x9,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - eor x9,x26,x26,ror#23 - and x17,x27,x26 - bic x19,x20,x26 - add x21,x21,x1 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x9,ror#18 // Sigma1(e) - ror x9,x22,#28 - add x21,x21,x17 // h+=Ch(e,f,g) - eor x17,x22,x22,ror#5 - add x21,x21,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x25,x25,x21 // d+=h - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x9,x17,ror#34 // Sigma0(a) - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x2,x2 // 15 -#endif - ldr x7,[sp,#0] - add x21,x21,x17 // h+=Sigma0(a) - str x10,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x9,x4,#1 - and x17,x26,x25 - ror x8,x1,#19 - bic x28,x27,x25 - ror x10,x21,#28 - add x20,x20,x2 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x9,x9,x4,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x10,x10,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x8,x8,x1,ror#61 - eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x10,x21,ror#39 // Sigma0(a) - eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) - add x3,x3,x12 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x3,x3,x9 - add x20,x20,x17 // h+=Sigma0(a) - add x3,x3,x8 -Loop_16_xx: - ldr x8,[sp,#8] - str x11,[sp,#0] - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - ror x10,x5,#1 - and x17,x25,x24 - ror x9,x2,#19 - bic x19,x26,x24 - ror x11,x20,#28 - add x27,x27,x3 // h+=X[i] - eor x16,x16,x24,ror#18 - eor x10,x10,x5,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x24,ror#41 // Sigma1(e) - eor x11,x11,x20,ror#34 - add x27,x27,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x9,x9,x2,ror#61 - eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) - add x27,x27,x16 // h+=Sigma1(e) - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x11,x20,ror#39 // Sigma0(a) - eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) - add x4,x4,x13 - add x23,x23,x27 // d+=h - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x4,x4,x10 - add x27,x27,x17 // h+=Sigma0(a) - add x4,x4,x9 - ldr x9,[sp,#16] - str x12,[sp,#8] - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - ror x11,x6,#1 - and x17,x24,x23 - ror x10,x3,#19 - bic x28,x25,x23 - ror x12,x27,#28 - add x26,x26,x4 // h+=X[i] - eor x16,x16,x23,ror#18 - eor x11,x11,x6,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x23,ror#41 // Sigma1(e) - eor x12,x12,x27,ror#34 - add x26,x26,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x10,x10,x3,ror#61 - eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) - add x26,x26,x16 // h+=Sigma1(e) - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x12,x27,ror#39 // Sigma0(a) - eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) - add x5,x5,x14 - add x22,x22,x26 // d+=h - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x5,x5,x11 - add x26,x26,x17 // h+=Sigma0(a) - add x5,x5,x10 - ldr x10,[sp,#24] - str x13,[sp,#16] - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - ror x12,x7,#1 - and x17,x23,x22 - ror x11,x4,#19 - bic x19,x24,x22 - ror x13,x26,#28 - add x25,x25,x5 // h+=X[i] - eor x16,x16,x22,ror#18 - eor x12,x12,x7,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x22,ror#41 // Sigma1(e) - eor x13,x13,x26,ror#34 - add x25,x25,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x11,x11,x4,ror#61 - eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) - add x25,x25,x16 // h+=Sigma1(e) - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x13,x26,ror#39 // Sigma0(a) - eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) - add x6,x6,x15 - add x21,x21,x25 // d+=h - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x6,x6,x12 - add x25,x25,x17 // h+=Sigma0(a) - add x6,x6,x11 - ldr x11,[sp,#0] - str x14,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - ror x13,x8,#1 - and x17,x22,x21 - ror x12,x5,#19 - bic x28,x23,x21 - ror x14,x25,#28 - add x24,x24,x6 // h+=X[i] - eor x16,x16,x21,ror#18 - eor x13,x13,x8,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x21,ror#41 // Sigma1(e) - eor x14,x14,x25,ror#34 - add x24,x24,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x12,x12,x5,ror#61 - eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) - add x24,x24,x16 // h+=Sigma1(e) - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x14,x25,ror#39 // Sigma0(a) - eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) - add x7,x7,x0 - add x20,x20,x24 // d+=h - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x7,x7,x13 - add x24,x24,x17 // h+=Sigma0(a) - add x7,x7,x12 - ldr x12,[sp,#8] - str x15,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - ror x14,x9,#1 - and x17,x21,x20 - ror x13,x6,#19 - bic x19,x22,x20 - ror x15,x24,#28 - add x23,x23,x7 // h+=X[i] - eor x16,x16,x20,ror#18 - eor x14,x14,x9,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x20,ror#41 // Sigma1(e) - eor x15,x15,x24,ror#34 - add x23,x23,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x13,x13,x6,ror#61 - eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) - add x23,x23,x16 // h+=Sigma1(e) - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x15,x24,ror#39 // Sigma0(a) - eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) - add x8,x8,x1 - add x27,x27,x23 // d+=h - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x8,x8,x14 - add x23,x23,x17 // h+=Sigma0(a) - add x8,x8,x13 - ldr x13,[sp,#16] - str x0,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - ror x15,x10,#1 - and x17,x20,x27 - ror x14,x7,#19 - bic x28,x21,x27 - ror x0,x23,#28 - add x22,x22,x8 // h+=X[i] - eor x16,x16,x27,ror#18 - eor x15,x15,x10,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x27,ror#41 // Sigma1(e) - eor x0,x0,x23,ror#34 - add x22,x22,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x14,x14,x7,ror#61 - eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) - add x22,x22,x16 // h+=Sigma1(e) - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x0,x23,ror#39 // Sigma0(a) - eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) - add x9,x9,x2 - add x26,x26,x22 // d+=h - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x9,x9,x15 - add x22,x22,x17 // h+=Sigma0(a) - add x9,x9,x14 - ldr x14,[sp,#24] - str x1,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - ror x0,x11,#1 - and x17,x27,x26 - ror x15,x8,#19 - bic x19,x20,x26 - ror x1,x22,#28 - add x21,x21,x9 // h+=X[i] - eor x16,x16,x26,ror#18 - eor x0,x0,x11,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x26,ror#41 // Sigma1(e) - eor x1,x1,x22,ror#34 - add x21,x21,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x15,x15,x8,ror#61 - eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) - add x21,x21,x16 // h+=Sigma1(e) - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x1,x22,ror#39 // Sigma0(a) - eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) - add x10,x10,x3 - add x25,x25,x21 // d+=h - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x10,x10,x0 - add x21,x21,x17 // h+=Sigma0(a) - add x10,x10,x15 - ldr x15,[sp,#0] - str x2,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x1,x12,#1 - and x17,x26,x25 - ror x0,x9,#19 - bic x28,x27,x25 - ror x2,x21,#28 - add x20,x20,x10 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x1,x1,x12,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x2,x2,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x0,x0,x9,ror#61 - eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x2,x21,ror#39 // Sigma0(a) - eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) - add x11,x11,x4 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x11,x11,x1 - add x20,x20,x17 // h+=Sigma0(a) - add x11,x11,x0 - ldr x0,[sp,#8] - str x3,[sp,#0] - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - ror x2,x13,#1 - and x17,x25,x24 - ror x1,x10,#19 - bic x19,x26,x24 - ror x3,x20,#28 - add x27,x27,x11 // h+=X[i] - eor x16,x16,x24,ror#18 - eor x2,x2,x13,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x24,ror#41 // Sigma1(e) - eor x3,x3,x20,ror#34 - add x27,x27,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x1,x1,x10,ror#61 - eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) - add x27,x27,x16 // h+=Sigma1(e) - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x3,x20,ror#39 // Sigma0(a) - eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) - add x12,x12,x5 - add x23,x23,x27 // d+=h - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x12,x12,x2 - add x27,x27,x17 // h+=Sigma0(a) - add x12,x12,x1 - ldr x1,[sp,#16] - str x4,[sp,#8] - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - ror x3,x14,#1 - and x17,x24,x23 - ror x2,x11,#19 - bic x28,x25,x23 - ror x4,x27,#28 - add x26,x26,x12 // h+=X[i] - eor x16,x16,x23,ror#18 - eor x3,x3,x14,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x23,ror#41 // Sigma1(e) - eor x4,x4,x27,ror#34 - add x26,x26,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x2,x2,x11,ror#61 - eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) - add x26,x26,x16 // h+=Sigma1(e) - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x4,x27,ror#39 // Sigma0(a) - eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) - add x13,x13,x6 - add x22,x22,x26 // d+=h - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x13,x13,x3 - add x26,x26,x17 // h+=Sigma0(a) - add x13,x13,x2 - ldr x2,[sp,#24] - str x5,[sp,#16] - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - ror x4,x15,#1 - and x17,x23,x22 - ror x3,x12,#19 - bic x19,x24,x22 - ror x5,x26,#28 - add x25,x25,x13 // h+=X[i] - eor x16,x16,x22,ror#18 - eor x4,x4,x15,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x22,ror#41 // Sigma1(e) - eor x5,x5,x26,ror#34 - add x25,x25,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x3,x3,x12,ror#61 - eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) - add x25,x25,x16 // h+=Sigma1(e) - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x5,x26,ror#39 // Sigma0(a) - eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) - add x14,x14,x7 - add x21,x21,x25 // d+=h - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x14,x14,x4 - add x25,x25,x17 // h+=Sigma0(a) - add x14,x14,x3 - ldr x3,[sp,#0] - str x6,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - ror x5,x0,#1 - and x17,x22,x21 - ror x4,x13,#19 - bic x28,x23,x21 - ror x6,x25,#28 - add x24,x24,x14 // h+=X[i] - eor x16,x16,x21,ror#18 - eor x5,x5,x0,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x21,ror#41 // Sigma1(e) - eor x6,x6,x25,ror#34 - add x24,x24,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x4,x4,x13,ror#61 - eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) - add x24,x24,x16 // h+=Sigma1(e) - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x6,x25,ror#39 // Sigma0(a) - eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) - add x15,x15,x8 - add x20,x20,x24 // d+=h - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x15,x15,x5 - add x24,x24,x17 // h+=Sigma0(a) - add x15,x15,x4 - ldr x4,[sp,#8] - str x7,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - ror x6,x1,#1 - and x17,x21,x20 - ror x5,x14,#19 - bic x19,x22,x20 - ror x7,x24,#28 - add x23,x23,x15 // h+=X[i] - eor x16,x16,x20,ror#18 - eor x6,x6,x1,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x20,ror#41 // Sigma1(e) - eor x7,x7,x24,ror#34 - add x23,x23,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x5,x5,x14,ror#61 - eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) - add x23,x23,x16 // h+=Sigma1(e) - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x7,x24,ror#39 // Sigma0(a) - eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) - add x0,x0,x9 - add x27,x27,x23 // d+=h - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x0,x0,x6 - add x23,x23,x17 // h+=Sigma0(a) - add x0,x0,x5 - ldr x5,[sp,#16] - str x8,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - ror x7,x2,#1 - and x17,x20,x27 - ror x6,x15,#19 - bic x28,x21,x27 - ror x8,x23,#28 - add x22,x22,x0 // h+=X[i] - eor x16,x16,x27,ror#18 - eor x7,x7,x2,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x27,ror#41 // Sigma1(e) - eor x8,x8,x23,ror#34 - add x22,x22,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x6,x6,x15,ror#61 - eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) - add x22,x22,x16 // h+=Sigma1(e) - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x8,x23,ror#39 // Sigma0(a) - eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) - add x1,x1,x10 - add x26,x26,x22 // d+=h - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x1,x1,x7 - add x22,x22,x17 // h+=Sigma0(a) - add x1,x1,x6 - ldr x6,[sp,#24] - str x9,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - ror x8,x3,#1 - and x17,x27,x26 - ror x7,x0,#19 - bic x19,x20,x26 - ror x9,x22,#28 - add x21,x21,x1 // h+=X[i] - eor x16,x16,x26,ror#18 - eor x8,x8,x3,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x26,ror#41 // Sigma1(e) - eor x9,x9,x22,ror#34 - add x21,x21,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x7,x7,x0,ror#61 - eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) - add x21,x21,x16 // h+=Sigma1(e) - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x9,x22,ror#39 // Sigma0(a) - eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) - add x2,x2,x11 - add x25,x25,x21 // d+=h - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x2,x2,x8 - add x21,x21,x17 // h+=Sigma0(a) - add x2,x2,x7 - ldr x7,[sp,#0] - str x10,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x9,x4,#1 - and x17,x26,x25 - ror x8,x1,#19 - bic x28,x27,x25 - ror x10,x21,#28 - add x20,x20,x2 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x9,x9,x4,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x10,x10,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x8,x8,x1,ror#61 - eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x10,x21,ror#39 // Sigma0(a) - eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) - add x3,x3,x12 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x3,x3,x9 - add x20,x20,x17 // h+=Sigma0(a) - add x3,x3,x8 - cbnz x19,Loop_16_xx - - ldp x0,x2,[x29,#96] - ldr x1,[x29,#112] - sub x30,x30,#648 // rewind - - ldp x3,x4,[x0] - ldp x5,x6,[x0,#2*8] - add x1,x1,#14*8 // advance input pointer - ldp x7,x8,[x0,#4*8] - add x20,x20,x3 - ldp x9,x10,[x0,#6*8] - add x21,x21,x4 - add x22,x22,x5 - add x23,x23,x6 - stp x20,x21,[x0] - add x24,x24,x7 - add x25,x25,x8 - stp x22,x23,[x0,#2*8] - add x26,x26,x9 - add x27,x27,x10 - cmp x1,x2 - stp x24,x25,[x0,#4*8] - stp x26,x27,[x0,#6*8] - b.ne Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*8 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret - - -.section __TEXT,__const -.align 6 - -LK512: -.quad 0x428a2f98d728ae22,0x7137449123ef65cd -.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc -.quad 0x3956c25bf348b538,0x59f111f1b605d019 -.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 -.quad 0xd807aa98a3030242,0x12835b0145706fbe -.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 -.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 -.quad 0x9bdc06a725c71235,0xc19bf174cf692694 -.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 -.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 -.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 -.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 -.quad 0x983e5152ee66dfab,0xa831c66d2db43210 -.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 -.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 -.quad 0x06ca6351e003826f,0x142929670a0e6e70 -.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 -.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df -.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 -.quad 0x81c2c92e47edaee6,0x92722c851482353b -.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 -.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 -.quad 0xd192e819d6ef5218,0xd69906245565a910 -.quad 0xf40e35855771202a,0x106aa07032bbd1b8 -.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 -.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 -.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb -.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 -.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 -.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec -.quad 0x90befffa23631e28,0xa4506cebde82bde9 -.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b -.quad 0xca273eceea26619c,0xd186b8c721c0c207 -.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 -.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 -.quad 0x113f9804bef90dae,0x1b710b35131c471b -.quad 0x28db77f523047d84,0x32caab7b40c72493 -.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c -.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a -.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 -.quad 0 // terminator - -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#ifndef __KERNEL__ -.comm _OPENSSL_armcap_P,4,4 -.private_extern _OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/vpaes-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/vpaes-armv8.S deleted file mode 100644 index 0f5cbeadaf..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/fipsmodule/vpaes-armv8.S +++ /dev/null @@ -1,1213 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.section __TEXT,__const - - -.align 7 // totally strategic alignment -_vpaes_consts: -Lk_mc_forward: // mc_forward -.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 -.quad 0x080B0A0904070605, 0x000302010C0F0E0D -.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 -.quad 0x000302010C0F0E0D, 0x080B0A0904070605 -Lk_mc_backward: // mc_backward -.quad 0x0605040702010003, 0x0E0D0C0F0A09080B -.quad 0x020100030E0D0C0F, 0x0A09080B06050407 -.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 -.quad 0x0A09080B06050407, 0x020100030E0D0C0F -Lk_sr: // sr -.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 -.quad 0x030E09040F0A0500, 0x0B06010C07020D08 -.quad 0x0F060D040B020900, 0x070E050C030A0108 -.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 - -// -// "Hot" constants -// -Lk_inv: // inv, inva -.quad 0x0E05060F0D080180, 0x040703090A0B0C02 -.quad 0x01040A060F0B0780, 0x030D0E0C02050809 -Lk_ipt: // input transform (lo, hi) -.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 -.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 -Lk_sbo: // sbou, sbot -.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 -.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA -Lk_sb1: // sb1u, sb1t -.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF -.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 -Lk_sb2: // sb2u, sb2t -.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A -.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD - -// -// Decryption stuff -// -Lk_dipt: // decryption input transform -.quad 0x0F505B040B545F00, 0x154A411E114E451A -.quad 0x86E383E660056500, 0x12771772F491F194 -Lk_dsbo: // decryption sbox final output -.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D -.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C -Lk_dsb9: // decryption sbox output *9*u, *9*t -.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 -.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 -Lk_dsbd: // decryption sbox output *D*u, *D*t -.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 -.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 -Lk_dsbb: // decryption sbox output *B*u, *B*t -.quad 0xD022649296B44200, 0x602646F6B0F2D404 -.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B -Lk_dsbe: // decryption sbox output *E*u, *E*t -.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 -.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 - -// -// Key schedule constants -// -Lk_dksd: // decryption key schedule: invskew x*D -.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 -.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E -Lk_dksb: // decryption key schedule: invskew x*B -.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 -.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 -Lk_dkse: // decryption key schedule: invskew x*E + 0x63 -.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 -.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 -Lk_dks9: // decryption key schedule: invskew x*9 -.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC -.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE - -Lk_rcon: // rcon -.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 - -Lk_opt: // output transform -.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 -.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 -Lk_deskew: // deskew tables: inverts the sbox's "skew" -.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A -.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 - -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 -.align 2 - -.align 6 - -.text -## -## _aes_preheat -## -## Fills register %r10 -> .aes_consts (so you can -fPIC) -## and %xmm9-%xmm15 as specified below. -## - -.align 4 -_vpaes_encrypt_preheat: - adrp x10, Lk_inv@PAGE - add x10, x10, Lk_inv@PAGEOFF - movi v17.16b, #0x0f - ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv - ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo - ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 - ret - - -## -## _aes_encrypt_core -## -## AES-encrypt %xmm0. -## -## Inputs: -## %xmm0 = input -## %xmm9-%xmm15 as in _vpaes_preheat -## (%rdx) = scheduled keys -## -## Output in %xmm0 -## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax -## Preserves %xmm6 - %xmm8 so you get some local vectors -## -## - -.align 4 -_vpaes_encrypt_core: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - adrp x11, Lk_mc_forward@PAGE+16 - add x11, x11, Lk_mc_forward@PAGEOFF+16 - // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo - ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key - and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 - // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi - tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 - eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - b Lenc_entry - -.align 4 -Lenc_loop: - // middle of middle round - add x10, x11, #0x40 - tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u - ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] - tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t - ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] - tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B - eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A - tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B - tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C - eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D - and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D - sub w8, w8, #1 // nr-- - -Lenc_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 - cbnz w8, Lenc_loop - - // middle of last round - add x10, x11, #0x80 - // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo - // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] - tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 - ret - - -.globl _vpaes_encrypt -.private_extern _vpaes_encrypt - -.align 4 -_vpaes_encrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v7.16b}, [x0] - bl _vpaes_encrypt_preheat - bl _vpaes_encrypt_core - st1 {v0.16b}, [x1] - - ldp x29,x30,[sp],#16 - ret - - - -.align 4 -_vpaes_encrypt_2x: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - adrp x11, Lk_mc_forward@PAGE+16 - add x11, x11, Lk_mc_forward@PAGEOFF+16 - // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo - ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key - and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - and v9.16b, v15.16b, v17.16b - ushr v8.16b, v15.16b, #4 - tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 - tbl v9.16b, {v20.16b}, v9.16b - // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi - tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 - tbl v10.16b, {v21.16b}, v8.16b - eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 - eor v8.16b, v9.16b, v16.16b - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - eor v8.16b, v8.16b, v10.16b - b Lenc_2x_entry - -.align 4 -Lenc_2x_loop: - // middle of middle round - add x10, x11, #0x40 - tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u - tbl v12.16b, {v25.16b}, v10.16b - ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] - tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t - tbl v8.16b, {v24.16b}, v11.16b - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - eor v12.16b, v12.16b, v16.16b - tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u - tbl v13.16b, {v27.16b}, v10.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - eor v8.16b, v8.16b, v12.16b - tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t - tbl v10.16b, {v26.16b}, v11.16b - ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] - tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B - tbl v11.16b, {v8.16b}, v1.16b - eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A - eor v10.16b, v10.16b, v13.16b - tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D - tbl v8.16b, {v8.16b}, v4.16b - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B - eor v11.16b, v11.16b, v10.16b - tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C - tbl v12.16b, {v11.16b},v1.16b - eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D - eor v8.16b, v8.16b, v11.16b - and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D - eor v8.16b, v8.16b, v12.16b - sub w8, w8, #1 // nr-- - -Lenc_2x_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - and v9.16b, v8.16b, v17.16b - ushr v8.16b, v8.16b, #4 - tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k - tbl v13.16b, {v19.16b},v9.16b - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - eor v9.16b, v9.16b, v8.16b - tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v11.16b, {v18.16b},v8.16b - tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - tbl v12.16b, {v18.16b},v9.16b - eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v11.16b, v11.16b, v13.16b - eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - eor v12.16b, v12.16b, v13.16b - tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v10.16b, {v18.16b},v11.16b - tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - tbl v11.16b, {v18.16b},v12.16b - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v10.16b, v10.16b, v9.16b - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - eor v11.16b, v11.16b, v8.16b - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 - cbnz w8, Lenc_2x_loop - - // middle of last round - add x10, x11, #0x80 - // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo - // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - tbl v12.16b, {v22.16b}, v10.16b - ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] - tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t - tbl v8.16b, {v23.16b}, v11.16b - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - eor v12.16b, v12.16b, v16.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - eor v8.16b, v8.16b, v12.16b - tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 - tbl v1.16b, {v8.16b},v1.16b - ret - - - -.align 4 -_vpaes_decrypt_preheat: - adrp x10, Lk_inv@PAGE - add x10, x10, Lk_inv@PAGEOFF - movi v17.16b, #0x0f - adrp x11, Lk_dipt@PAGE - add x11, x11, Lk_dipt@PAGEOFF - ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv - ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // Lk_dipt, Lk_dsbo - ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // Lk_dsb9, Lk_dsbd - ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // Lk_dsbb, Lk_dsbe - ret - - -## -## Decryption core -## -## Same API as encryption core. -## - -.align 4 -_vpaes_decrypt_core: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - - // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo - lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 - eor x11, x11, #0x30 // xor $0x30, %r11 - adrp x10, Lk_sr@PAGE - add x10, x10, Lk_sr@PAGEOFF - and x11, x11, #0x30 // and $0x30, %r11 - add x11, x11, x10 - adrp x10, Lk_mc_forward@PAGE+48 - add x10, x10, Lk_mc_forward@PAGEOFF+48 - - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key - and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 - ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 - // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi - tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 - eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - b Ldec_entry - -.align 4 -Ldec_loop: -// -// Inverse mix columns -// - // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u - // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t - tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u - tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t - eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 - // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt - - tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu - tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt - - tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu - tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet - - tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu - tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - sub w8, w8, #1 // sub $1,%rax # nr-- - -Ldec_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 - cbnz w8, Ldec_loop - - // middle of last round - // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot - ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 - tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t - eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k - eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A - tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 - ret - - -.globl _vpaes_decrypt -.private_extern _vpaes_decrypt - -.align 4 -_vpaes_decrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v7.16b}, [x0] - bl _vpaes_decrypt_preheat - bl _vpaes_decrypt_core - st1 {v0.16b}, [x1] - - ldp x29,x30,[sp],#16 - ret - - -// v14-v15 input, v0-v1 output - -.align 4 -_vpaes_decrypt_2x: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - - // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo - lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 - eor x11, x11, #0x30 // xor $0x30, %r11 - adrp x10, Lk_sr@PAGE - add x10, x10, Lk_sr@PAGEOFF - and x11, x11, #0x30 // and $0x30, %r11 - add x11, x11, x10 - adrp x10, Lk_mc_forward@PAGE+48 - add x10, x10, Lk_mc_forward@PAGEOFF+48 - - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key - and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - and v9.16b, v15.16b, v17.16b - ushr v8.16b, v15.16b, #4 - tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 - tbl v10.16b, {v20.16b},v9.16b - ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 - // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi - tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 - tbl v8.16b, {v21.16b},v8.16b - eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 - eor v10.16b, v10.16b, v16.16b - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - eor v8.16b, v8.16b, v10.16b - b Ldec_2x_entry - -.align 4 -Ldec_2x_loop: -// -// Inverse mix columns -// - // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u - // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t - tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u - tbl v12.16b, {v24.16b}, v10.16b - tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t - tbl v9.16b, {v25.16b}, v11.16b - eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 - eor v8.16b, v12.16b, v16.16b - // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt - - tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu - tbl v12.16b, {v26.16b}, v10.16b - tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v8.16b, {v8.16b},v5.16b - tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt - tbl v9.16b, {v27.16b}, v11.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - eor v8.16b, v8.16b, v12.16b - // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b - // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt - - tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu - tbl v12.16b, {v28.16b}, v10.16b - tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v8.16b, {v8.16b},v5.16b - tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt - tbl v9.16b, {v29.16b}, v11.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - eor v8.16b, v8.16b, v12.16b - // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b - // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet - - tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu - tbl v12.16b, {v30.16b}, v10.16b - tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v8.16b, {v8.16b},v5.16b - tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet - tbl v9.16b, {v31.16b}, v11.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - eor v8.16b, v8.16b, v12.16b - ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b - sub w8, w8, #1 // sub $1,%rax # nr-- - -Ldec_2x_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - and v9.16b, v8.16b, v17.16b - ushr v8.16b, v8.16b, #4 - tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - tbl v10.16b, {v19.16b},v9.16b - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - eor v9.16b, v9.16b, v8.16b - tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v11.16b, {v18.16b},v8.16b - tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - tbl v12.16b, {v18.16b},v9.16b - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v11.16b, v11.16b, v10.16b - eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - eor v12.16b, v12.16b, v10.16b - tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v10.16b, {v18.16b},v11.16b - tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - tbl v11.16b, {v18.16b},v12.16b - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v10.16b, v10.16b, v9.16b - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - eor v11.16b, v11.16b, v8.16b - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 - cbnz w8, Ldec_2x_loop - - // middle of last round - // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - tbl v12.16b, {v22.16b}, v10.16b - // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot - tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t - tbl v9.16b, {v23.16b}, v11.16b - ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 - eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k - eor v12.16b, v12.16b, v16.16b - eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A - eor v8.16b, v9.16b, v12.16b - tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 - tbl v1.16b, {v8.16b},v2.16b - ret - -######################################################## -## ## -## AES key schedule ## -## ## -######################################################## - -.align 4 -_vpaes_key_preheat: - adrp x10, Lk_inv@PAGE - add x10, x10, Lk_inv@PAGEOFF - movi v16.16b, #0x5b // Lk_s63 - adrp x11, Lk_sb1@PAGE - add x11, x11, Lk_sb1@PAGEOFF - movi v17.16b, #0x0f // Lk_s0F - ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt - adrp x10, Lk_dksd@PAGE - add x10, x10, Lk_dksd@PAGEOFF - ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 - adrp x11, Lk_mc_forward@PAGE - add x11, x11, Lk_mc_forward@PAGEOFF - ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb - ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 - ld1 {v8.2d}, [x10] // Lk_rcon - ld1 {v9.2d}, [x11] // Lk_mc_forward[0] - ret - - - -.align 4 -_vpaes_schedule_core: - stp x29, x30, [sp,#-16]! - add x29,sp,#0 - - bl _vpaes_key_preheat // load the tables - - ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) - - // input transform - mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 - bl _vpaes_schedule_transform - mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 - - adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10 - add x10, x10, Lk_sr@PAGEOFF - - add x8, x8, x10 - cbnz w3, Lschedule_am_decrypting - - // encrypting, output zeroth round key after transform - st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) - b Lschedule_go - -Lschedule_am_decrypting: - // decrypting, output zeroth round key after shiftrows - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 - tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) - eor x8, x8, #0x30 // xor $0x30, %r8 - -Lschedule_go: - cmp w1, #192 // cmp $192, %esi - b.hi Lschedule_256 - b.eq Lschedule_192 - // 128: fall though - -## -## .schedule_128 -## -## 128-bit specific part of key schedule. -## -## This schedule is really simple, because all its parts -## are accomplished by the subroutines. -## -Lschedule_128: - mov x0, #10 // mov $10, %esi - -Loop_schedule_128: - sub x0, x0, #1 // dec %esi - bl _vpaes_schedule_round - cbz x0, Lschedule_mangle_last - bl _vpaes_schedule_mangle // write output - b Loop_schedule_128 - -## -## .aes_schedule_192 -## -## 192-bit specific part of key schedule. -## -## The main body of this schedule is the same as the 128-bit -## schedule, but with more smearing. The long, high side is -## stored in %xmm7 as before, and the short, low side is in -## the high bits of %xmm6. -## -## This schedule is somewhat nastier, however, because each -## round produces 192 bits of key material, or 1.5 round keys. -## Therefore, on each cycle we do 2 rounds and produce 3 round -## keys. -## -.align 4 -Lschedule_192: - sub x0, x0, #8 - ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) - bl _vpaes_schedule_transform // input transform - mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part - eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 - ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros - mov x0, #4 // mov $4, %esi - -Loop_schedule_192: - sub x0, x0, #1 // dec %esi - bl _vpaes_schedule_round - ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 - bl _vpaes_schedule_mangle // save key n - bl _vpaes_schedule_192_smear - bl _vpaes_schedule_mangle // save key n+1 - bl _vpaes_schedule_round - cbz x0, Lschedule_mangle_last - bl _vpaes_schedule_mangle // save key n+2 - bl _vpaes_schedule_192_smear - b Loop_schedule_192 - -## -## .aes_schedule_256 -## -## 256-bit specific part of key schedule. -## -## The structure here is very similar to the 128-bit -## schedule, but with an additional "low side" in -## %xmm6. The low side's rounds are the same as the -## high side's, except no rcon and no rotation. -## -.align 4 -Lschedule_256: - ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) - bl _vpaes_schedule_transform // input transform - mov x0, #7 // mov $7, %esi - -Loop_schedule_256: - sub x0, x0, #1 // dec %esi - bl _vpaes_schedule_mangle // output low result - mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 - - // high round - bl _vpaes_schedule_round - cbz x0, Lschedule_mangle_last - bl _vpaes_schedule_mangle - - // low round. swap xmm7 and xmm6 - dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 - movi v4.16b, #0 - mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 - mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 - bl _vpaes_schedule_low_round - mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 - - b Loop_schedule_256 - -## -## .aes_schedule_mangle_last -## -## Mangler for last round of key schedule -## Mangles %xmm0 -## when encrypting, outputs out(%xmm0) ^ 63 -## when decrypting, outputs unskew(%xmm0) -## -## Always called right before return... jumps to cleanup and exits -## -.align 4 -Lschedule_mangle_last: - // schedule last round key from xmm0 - adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew - add x11, x11, Lk_deskew@PAGEOFF - - cbnz w3, Lschedule_mangle_last_dec - - // encrypting - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 - adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform - add x11, x11, Lk_opt@PAGEOFF - add x2, x2, #32 // add $32, %rdx - tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute - -Lschedule_mangle_last_dec: - ld1 {v20.2d,v21.2d}, [x11] // reload constants - sub x2, x2, #16 // add $-16, %rdx - eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 - bl _vpaes_schedule_transform // output transform - st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key - - // cleanup - eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 - eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 - eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 - eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 - eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 - eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 - eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 - eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 - ldp x29, x30, [sp],#16 - ret - - -## -## .aes_schedule_192_smear -## -## Smear the short, low side in the 192-bit key schedule. -## -## Inputs: -## %xmm7: high side, b a x y -## %xmm6: low side, d c 0 0 -## %xmm13: 0 -## -## Outputs: -## %xmm6: b+c+d b+c 0 0 -## %xmm0: b+c+d b+c b a -## - -.align 4 -_vpaes_schedule_192_smear: - movi v1.16b, #0 - dup v0.4s, v7.s[3] - ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 - ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a - eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 - eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 - eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a - mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 - ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros - ret - - -## -## .aes_schedule_round -## -## Runs one main round of the key schedule on %xmm0, %xmm7 -## -## Specifically, runs subbytes on the high dword of %xmm0 -## then rotates it by one byte and xors into the low dword of -## %xmm7. -## -## Adds rcon from low byte of %xmm8, then rotates %xmm8 for -## next rcon. -## -## Smears the dwords of %xmm7 by xoring the low into the -## second low, result into third, result into highest. -## -## Returns results in %xmm7 = %xmm0. -## Clobbers %xmm1-%xmm4, %r11. -## - -.align 4 -_vpaes_schedule_round: - // extract rcon from xmm8 - movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 - ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 - ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 - eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 - - // rotate - dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 - ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 - - // fall through... - - // low round: same as high round, but no rotation and no rcon. -_vpaes_schedule_low_round: - // smear xmm7 - ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 - eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 - ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 - - // subbytes - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 - tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 - tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak - eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak - eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io - eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo - tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou - tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t - eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output - - // add in smeared stuff - eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 - eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 - ret - - -## -## .aes_schedule_transform -## -## Linear-transform %xmm0 according to tables at (%r11) -## -## Requires that %xmm9 = 0x0F0F... as in preheat -## Output in %xmm0 -## Clobbers %xmm1, %xmm2 -## - -.align 4 -_vpaes_schedule_transform: - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - // vmovdqa (%r11), %xmm2 # lo - tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 - // vmovdqa 16(%r11), %xmm1 # hi - tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - ret - - -## -## .aes_schedule_mangle -## -## Mangle xmm0 from (basis-transformed) standard version -## to our version. -## -## On encrypt, -## xor with 0x63 -## multiply by circulant 0,1,1,1 -## apply shiftrows transform -## -## On decrypt, -## xor with 0x63 -## multiply by "inverse mixcolumns" circulant E,B,D,9 -## deskew -## apply shiftrows transform -## -## -## Writes out to (%rdx), and increments or decrements it -## Keeps track of round number mod 4 in %r8 -## Preserves xmm0 -## Clobbers xmm1-xmm5 -## - -.align 4 -_vpaes_schedule_mangle: - mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later - // vmovdqa .Lk_mc_forward(%rip),%xmm5 - cbnz w3, Lschedule_mangle_dec - - // encrypting - eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 - add x2, x2, #16 // add $16, %rdx - tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 - tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 - tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 - eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 - eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 - - b Lschedule_mangle_both -.align 4 -Lschedule_mangle_dec: - // inverse mix columns - // lea .Lk_dksd(%rip),%r11 - ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi - and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo - - // vmovdqa 0x00(%r11), %xmm2 - tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - // vmovdqa 0x10(%r11), %xmm3 - tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 - tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 - - // vmovdqa 0x20(%r11), %xmm2 - tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 - // vmovdqa 0x30(%r11), %xmm3 - tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 - tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 - - // vmovdqa 0x40(%r11), %xmm2 - tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 - // vmovdqa 0x50(%r11), %xmm3 - tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 - - // vmovdqa 0x60(%r11), %xmm2 - tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 - // vmovdqa 0x70(%r11), %xmm4 - tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 - eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 - eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 - - sub x2, x2, #16 // add $-16, %rdx - -Lschedule_mangle_both: - tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - add x8, x8, #64-16 // add $-16, %r8 - and x8, x8, #~(1<<6) // and $0x30, %r8 - st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) - ret - - -.globl _vpaes_set_encrypt_key -.private_extern _vpaes_set_encrypt_key - -.align 4 -_vpaes_set_encrypt_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - - lsr w9, w1, #5 // shr $5,%eax - add w9, w9, #5 // $5,%eax - str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - - mov w3, #0 // mov $0,%ecx - mov x8, #0x30 // mov $0x30,%r8d - bl _vpaes_schedule_core - eor x0, x0, x0 - - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret - - -.globl _vpaes_set_decrypt_key -.private_extern _vpaes_set_decrypt_key - -.align 4 -_vpaes_set_decrypt_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - - lsr w9, w1, #5 // shr $5,%eax - add w9, w9, #5 // $5,%eax - str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - lsl w9, w9, #4 // shl $4,%eax - add x2, x2, #16 // lea 16(%rdx,%rax),%rdx - add x2, x2, x9 - - mov w3, #1 // mov $1,%ecx - lsr w8, w1, #1 // shr $1,%r8d - and x8, x8, #32 // and $32,%r8d - eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 - bl _vpaes_schedule_core - - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret - -.globl _vpaes_cbc_encrypt -.private_extern _vpaes_cbc_encrypt - -.align 4 -_vpaes_cbc_encrypt: - cbz x2, Lcbc_abort - cmp w5, #0 // check direction - b.eq vpaes_cbc_decrypt - - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - mov x17, x2 // reassign - mov x2, x3 // reassign - - ld1 {v0.16b}, [x4] // load ivec - bl _vpaes_encrypt_preheat - b Lcbc_enc_loop - -.align 4 -Lcbc_enc_loop: - ld1 {v7.16b}, [x0],#16 // load input - eor v7.16b, v7.16b, v0.16b // xor with ivec - bl _vpaes_encrypt_core - st1 {v0.16b}, [x1],#16 // save output - subs x17, x17, #16 - b.hi Lcbc_enc_loop - - st1 {v0.16b}, [x4] // write ivec - - ldp x29,x30,[sp],#16 -Lcbc_abort: - ret - - - -.align 4 -vpaes_cbc_decrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - stp d10,d11,[sp,#-16]! - stp d12,d13,[sp,#-16]! - stp d14,d15,[sp,#-16]! - - mov x17, x2 // reassign - mov x2, x3 // reassign - ld1 {v6.16b}, [x4] // load ivec - bl _vpaes_decrypt_preheat - tst x17, #16 - b.eq Lcbc_dec_loop2x - - ld1 {v7.16b}, [x0], #16 // load input - bl _vpaes_decrypt_core - eor v0.16b, v0.16b, v6.16b // xor with ivec - orr v6.16b, v7.16b, v7.16b // next ivec value - st1 {v0.16b}, [x1], #16 - subs x17, x17, #16 - b.ls Lcbc_dec_done - -.align 4 -Lcbc_dec_loop2x: - ld1 {v14.16b,v15.16b}, [x0], #32 - bl _vpaes_decrypt_2x - eor v0.16b, v0.16b, v6.16b // xor with ivec - eor v1.16b, v1.16b, v14.16b - orr v6.16b, v15.16b, v15.16b - st1 {v0.16b,v1.16b}, [x1], #32 - subs x17, x17, #32 - b.hi Lcbc_dec_loop2x - -Lcbc_dec_done: - st1 {v6.16b}, [x4] - - ldp d14,d15,[sp],#16 - ldp d12,d13,[sp],#16 - ldp d10,d11,[sp],#16 - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret - -.globl _vpaes_ctr32_encrypt_blocks -.private_extern _vpaes_ctr32_encrypt_blocks - -.align 4 -_vpaes_ctr32_encrypt_blocks: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - stp d10,d11,[sp,#-16]! - stp d12,d13,[sp,#-16]! - stp d14,d15,[sp,#-16]! - - cbz x2, Lctr32_done - - // Note, unlike the other functions, x2 here is measured in blocks, - // not bytes. - mov x17, x2 - mov x2, x3 - - // Load the IV and counter portion. - ldr w6, [x4, #12] - ld1 {v7.16b}, [x4] - - bl _vpaes_encrypt_preheat - tst x17, #1 - rev w6, w6 // The counter is big-endian. - b.eq Lctr32_prep_loop - - // Handle one block so the remaining block count is even for - // _vpaes_encrypt_2x. - ld1 {v6.16b}, [x0], #16 // Load input ahead of time - bl _vpaes_encrypt_core - eor v0.16b, v0.16b, v6.16b // XOR input and result - st1 {v0.16b}, [x1], #16 - subs x17, x17, #1 - // Update the counter. - add w6, w6, #1 - rev w7, w6 - mov v7.s[3], w7 - b.ls Lctr32_done - -Lctr32_prep_loop: - // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x - // uses v14 and v15. - mov v15.16b, v7.16b - mov v14.16b, v7.16b - add w6, w6, #1 - rev w7, w6 - mov v15.s[3], w7 - -Lctr32_loop: - ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time - bl _vpaes_encrypt_2x - eor v0.16b, v0.16b, v6.16b // XOR input and result - eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) - st1 {v0.16b,v1.16b}, [x1], #32 - subs x17, x17, #2 - // Update the counter. - add w7, w6, #1 - add w6, w6, #2 - rev w7, w7 - mov v14.s[3], w7 - rev w7, w6 - mov v15.s[3], w7 - b.hi Lctr32_loop - -Lctr32_done: - ldp d14,d15,[sp],#16 - ldp d12,d13,[sp],#16 - ldp d10,d11,[sp],#16 - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret - -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/test/trampoline-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/test/trampoline-armv8.S deleted file mode 100644 index 438e9298c0..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/test/trampoline-armv8.S +++ /dev/null @@ -1,685 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -// abi_test_trampoline loads callee-saved registers from |state|, calls |func| -// with |argv|, then saves the callee-saved registers into |state|. It returns -// the result of |func|. The |unwind| argument is unused. -// uint64_t abi_test_trampoline(void (*func)(...), CallerState *state, -// const uint64_t *argv, size_t argc, -// uint64_t unwind); - -.globl _abi_test_trampoline -.private_extern _abi_test_trampoline -.align 4 -_abi_test_trampoline: -Labi_test_trampoline_begin: - // Stack layout (low to high addresses) - // x29,x30 (16 bytes) - // d8-d15 (64 bytes) - // x19-x28 (80 bytes) - // x1 (8 bytes) - // padding (8 bytes) - stp x29, x30, [sp, #-176]! - mov x29, sp - - // Saved callee-saved registers and |state|. - stp d8, d9, [sp, #16] - stp d10, d11, [sp, #32] - stp d12, d13, [sp, #48] - stp d14, d15, [sp, #64] - stp x19, x20, [sp, #80] - stp x21, x22, [sp, #96] - stp x23, x24, [sp, #112] - stp x25, x26, [sp, #128] - stp x27, x28, [sp, #144] - str x1, [sp, #160] - - // Load registers from |state|, with the exception of x29. x29 is the - // frame pointer and also callee-saved, but AAPCS64 allows platforms to - // mandate that x29 always point to a frame. iOS64 does so, which means - // we cannot fill x29 with entropy without violating ABI rules - // ourselves. x29 is tested separately below. - ldp d8, d9, [x1], #16 - ldp d10, d11, [x1], #16 - ldp d12, d13, [x1], #16 - ldp d14, d15, [x1], #16 - ldp x19, x20, [x1], #16 - ldp x21, x22, [x1], #16 - ldp x23, x24, [x1], #16 - ldp x25, x26, [x1], #16 - ldp x27, x28, [x1], #16 - - // Move parameters into temporary registers. - mov x9, x0 - mov x10, x2 - mov x11, x3 - - // Load parameters into registers. - cbz x11, Largs_done - ldr x0, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x1, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x2, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x3, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x4, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x5, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x6, [x10], #8 - subs x11, x11, #1 - b.eq Largs_done - ldr x7, [x10], #8 - -Largs_done: - blr x9 - - // Reload |state| and store registers. - ldr x1, [sp, #160] - stp d8, d9, [x1], #16 - stp d10, d11, [x1], #16 - stp d12, d13, [x1], #16 - stp d14, d15, [x1], #16 - stp x19, x20, [x1], #16 - stp x21, x22, [x1], #16 - stp x23, x24, [x1], #16 - stp x25, x26, [x1], #16 - stp x27, x28, [x1], #16 - - // |func| is required to preserve x29, the frame pointer. We cannot load - // random values into x29 (see comment above), so compare it against the - // expected value and zero the field of |state| if corrupted. - mov x9, sp - cmp x29, x9 - b.eq Lx29_ok - str xzr, [x1] - -Lx29_ok: - // Restore callee-saved registers. - ldp d8, d9, [sp, #16] - ldp d10, d11, [sp, #32] - ldp d12, d13, [sp, #48] - ldp d14, d15, [sp, #64] - ldp x19, x20, [sp, #80] - ldp x21, x22, [sp, #96] - ldp x23, x24, [sp, #112] - ldp x25, x26, [sp, #128] - ldp x27, x28, [sp, #144] - - ldp x29, x30, [sp], #176 - ret - - -.globl _abi_test_clobber_x0 -.private_extern _abi_test_clobber_x0 -.align 4 -_abi_test_clobber_x0: - mov x0, xzr - ret - - -.globl _abi_test_clobber_x1 -.private_extern _abi_test_clobber_x1 -.align 4 -_abi_test_clobber_x1: - mov x1, xzr - ret - - -.globl _abi_test_clobber_x2 -.private_extern _abi_test_clobber_x2 -.align 4 -_abi_test_clobber_x2: - mov x2, xzr - ret - - -.globl _abi_test_clobber_x3 -.private_extern _abi_test_clobber_x3 -.align 4 -_abi_test_clobber_x3: - mov x3, xzr - ret - - -.globl _abi_test_clobber_x4 -.private_extern _abi_test_clobber_x4 -.align 4 -_abi_test_clobber_x4: - mov x4, xzr - ret - - -.globl _abi_test_clobber_x5 -.private_extern _abi_test_clobber_x5 -.align 4 -_abi_test_clobber_x5: - mov x5, xzr - ret - - -.globl _abi_test_clobber_x6 -.private_extern _abi_test_clobber_x6 -.align 4 -_abi_test_clobber_x6: - mov x6, xzr - ret - - -.globl _abi_test_clobber_x7 -.private_extern _abi_test_clobber_x7 -.align 4 -_abi_test_clobber_x7: - mov x7, xzr - ret - - -.globl _abi_test_clobber_x8 -.private_extern _abi_test_clobber_x8 -.align 4 -_abi_test_clobber_x8: - mov x8, xzr - ret - - -.globl _abi_test_clobber_x9 -.private_extern _abi_test_clobber_x9 -.align 4 -_abi_test_clobber_x9: - mov x9, xzr - ret - - -.globl _abi_test_clobber_x10 -.private_extern _abi_test_clobber_x10 -.align 4 -_abi_test_clobber_x10: - mov x10, xzr - ret - - -.globl _abi_test_clobber_x11 -.private_extern _abi_test_clobber_x11 -.align 4 -_abi_test_clobber_x11: - mov x11, xzr - ret - - -.globl _abi_test_clobber_x12 -.private_extern _abi_test_clobber_x12 -.align 4 -_abi_test_clobber_x12: - mov x12, xzr - ret - - -.globl _abi_test_clobber_x13 -.private_extern _abi_test_clobber_x13 -.align 4 -_abi_test_clobber_x13: - mov x13, xzr - ret - - -.globl _abi_test_clobber_x14 -.private_extern _abi_test_clobber_x14 -.align 4 -_abi_test_clobber_x14: - mov x14, xzr - ret - - -.globl _abi_test_clobber_x15 -.private_extern _abi_test_clobber_x15 -.align 4 -_abi_test_clobber_x15: - mov x15, xzr - ret - - -.globl _abi_test_clobber_x16 -.private_extern _abi_test_clobber_x16 -.align 4 -_abi_test_clobber_x16: - mov x16, xzr - ret - - -.globl _abi_test_clobber_x17 -.private_extern _abi_test_clobber_x17 -.align 4 -_abi_test_clobber_x17: - mov x17, xzr - ret - - -.globl _abi_test_clobber_x19 -.private_extern _abi_test_clobber_x19 -.align 4 -_abi_test_clobber_x19: - mov x19, xzr - ret - - -.globl _abi_test_clobber_x20 -.private_extern _abi_test_clobber_x20 -.align 4 -_abi_test_clobber_x20: - mov x20, xzr - ret - - -.globl _abi_test_clobber_x21 -.private_extern _abi_test_clobber_x21 -.align 4 -_abi_test_clobber_x21: - mov x21, xzr - ret - - -.globl _abi_test_clobber_x22 -.private_extern _abi_test_clobber_x22 -.align 4 -_abi_test_clobber_x22: - mov x22, xzr - ret - - -.globl _abi_test_clobber_x23 -.private_extern _abi_test_clobber_x23 -.align 4 -_abi_test_clobber_x23: - mov x23, xzr - ret - - -.globl _abi_test_clobber_x24 -.private_extern _abi_test_clobber_x24 -.align 4 -_abi_test_clobber_x24: - mov x24, xzr - ret - - -.globl _abi_test_clobber_x25 -.private_extern _abi_test_clobber_x25 -.align 4 -_abi_test_clobber_x25: - mov x25, xzr - ret - - -.globl _abi_test_clobber_x26 -.private_extern _abi_test_clobber_x26 -.align 4 -_abi_test_clobber_x26: - mov x26, xzr - ret - - -.globl _abi_test_clobber_x27 -.private_extern _abi_test_clobber_x27 -.align 4 -_abi_test_clobber_x27: - mov x27, xzr - ret - - -.globl _abi_test_clobber_x28 -.private_extern _abi_test_clobber_x28 -.align 4 -_abi_test_clobber_x28: - mov x28, xzr - ret - - -.globl _abi_test_clobber_x29 -.private_extern _abi_test_clobber_x29 -.align 4 -_abi_test_clobber_x29: - mov x29, xzr - ret - - -.globl _abi_test_clobber_d0 -.private_extern _abi_test_clobber_d0 -.align 4 -_abi_test_clobber_d0: - fmov d0, xzr - ret - - -.globl _abi_test_clobber_d1 -.private_extern _abi_test_clobber_d1 -.align 4 -_abi_test_clobber_d1: - fmov d1, xzr - ret - - -.globl _abi_test_clobber_d2 -.private_extern _abi_test_clobber_d2 -.align 4 -_abi_test_clobber_d2: - fmov d2, xzr - ret - - -.globl _abi_test_clobber_d3 -.private_extern _abi_test_clobber_d3 -.align 4 -_abi_test_clobber_d3: - fmov d3, xzr - ret - - -.globl _abi_test_clobber_d4 -.private_extern _abi_test_clobber_d4 -.align 4 -_abi_test_clobber_d4: - fmov d4, xzr - ret - - -.globl _abi_test_clobber_d5 -.private_extern _abi_test_clobber_d5 -.align 4 -_abi_test_clobber_d5: - fmov d5, xzr - ret - - -.globl _abi_test_clobber_d6 -.private_extern _abi_test_clobber_d6 -.align 4 -_abi_test_clobber_d6: - fmov d6, xzr - ret - - -.globl _abi_test_clobber_d7 -.private_extern _abi_test_clobber_d7 -.align 4 -_abi_test_clobber_d7: - fmov d7, xzr - ret - - -.globl _abi_test_clobber_d8 -.private_extern _abi_test_clobber_d8 -.align 4 -_abi_test_clobber_d8: - fmov d8, xzr - ret - - -.globl _abi_test_clobber_d9 -.private_extern _abi_test_clobber_d9 -.align 4 -_abi_test_clobber_d9: - fmov d9, xzr - ret - - -.globl _abi_test_clobber_d10 -.private_extern _abi_test_clobber_d10 -.align 4 -_abi_test_clobber_d10: - fmov d10, xzr - ret - - -.globl _abi_test_clobber_d11 -.private_extern _abi_test_clobber_d11 -.align 4 -_abi_test_clobber_d11: - fmov d11, xzr - ret - - -.globl _abi_test_clobber_d12 -.private_extern _abi_test_clobber_d12 -.align 4 -_abi_test_clobber_d12: - fmov d12, xzr - ret - - -.globl _abi_test_clobber_d13 -.private_extern _abi_test_clobber_d13 -.align 4 -_abi_test_clobber_d13: - fmov d13, xzr - ret - - -.globl _abi_test_clobber_d14 -.private_extern _abi_test_clobber_d14 -.align 4 -_abi_test_clobber_d14: - fmov d14, xzr - ret - - -.globl _abi_test_clobber_d15 -.private_extern _abi_test_clobber_d15 -.align 4 -_abi_test_clobber_d15: - fmov d15, xzr - ret - - -.globl _abi_test_clobber_d16 -.private_extern _abi_test_clobber_d16 -.align 4 -_abi_test_clobber_d16: - fmov d16, xzr - ret - - -.globl _abi_test_clobber_d17 -.private_extern _abi_test_clobber_d17 -.align 4 -_abi_test_clobber_d17: - fmov d17, xzr - ret - - -.globl _abi_test_clobber_d18 -.private_extern _abi_test_clobber_d18 -.align 4 -_abi_test_clobber_d18: - fmov d18, xzr - ret - - -.globl _abi_test_clobber_d19 -.private_extern _abi_test_clobber_d19 -.align 4 -_abi_test_clobber_d19: - fmov d19, xzr - ret - - -.globl _abi_test_clobber_d20 -.private_extern _abi_test_clobber_d20 -.align 4 -_abi_test_clobber_d20: - fmov d20, xzr - ret - - -.globl _abi_test_clobber_d21 -.private_extern _abi_test_clobber_d21 -.align 4 -_abi_test_clobber_d21: - fmov d21, xzr - ret - - -.globl _abi_test_clobber_d22 -.private_extern _abi_test_clobber_d22 -.align 4 -_abi_test_clobber_d22: - fmov d22, xzr - ret - - -.globl _abi_test_clobber_d23 -.private_extern _abi_test_clobber_d23 -.align 4 -_abi_test_clobber_d23: - fmov d23, xzr - ret - - -.globl _abi_test_clobber_d24 -.private_extern _abi_test_clobber_d24 -.align 4 -_abi_test_clobber_d24: - fmov d24, xzr - ret - - -.globl _abi_test_clobber_d25 -.private_extern _abi_test_clobber_d25 -.align 4 -_abi_test_clobber_d25: - fmov d25, xzr - ret - - -.globl _abi_test_clobber_d26 -.private_extern _abi_test_clobber_d26 -.align 4 -_abi_test_clobber_d26: - fmov d26, xzr - ret - - -.globl _abi_test_clobber_d27 -.private_extern _abi_test_clobber_d27 -.align 4 -_abi_test_clobber_d27: - fmov d27, xzr - ret - - -.globl _abi_test_clobber_d28 -.private_extern _abi_test_clobber_d28 -.align 4 -_abi_test_clobber_d28: - fmov d28, xzr - ret - - -.globl _abi_test_clobber_d29 -.private_extern _abi_test_clobber_d29 -.align 4 -_abi_test_clobber_d29: - fmov d29, xzr - ret - - -.globl _abi_test_clobber_d30 -.private_extern _abi_test_clobber_d30 -.align 4 -_abi_test_clobber_d30: - fmov d30, xzr - ret - - -.globl _abi_test_clobber_d31 -.private_extern _abi_test_clobber_d31 -.align 4 -_abi_test_clobber_d31: - fmov d31, xzr - ret - - -.globl _abi_test_clobber_v8_upper -.private_extern _abi_test_clobber_v8_upper -.align 4 -_abi_test_clobber_v8_upper: - fmov v8.d[1], xzr - ret - - -.globl _abi_test_clobber_v9_upper -.private_extern _abi_test_clobber_v9_upper -.align 4 -_abi_test_clobber_v9_upper: - fmov v9.d[1], xzr - ret - - -.globl _abi_test_clobber_v10_upper -.private_extern _abi_test_clobber_v10_upper -.align 4 -_abi_test_clobber_v10_upper: - fmov v10.d[1], xzr - ret - - -.globl _abi_test_clobber_v11_upper -.private_extern _abi_test_clobber_v11_upper -.align 4 -_abi_test_clobber_v11_upper: - fmov v11.d[1], xzr - ret - - -.globl _abi_test_clobber_v12_upper -.private_extern _abi_test_clobber_v12_upper -.align 4 -_abi_test_clobber_v12_upper: - fmov v12.d[1], xzr - ret - - -.globl _abi_test_clobber_v13_upper -.private_extern _abi_test_clobber_v13_upper -.align 4 -_abi_test_clobber_v13_upper: - fmov v13.d[1], xzr - ret - - -.globl _abi_test_clobber_v14_upper -.private_extern _abi_test_clobber_v14_upper -.align 4 -_abi_test_clobber_v14_upper: - fmov v14.d[1], xzr - ret - - -.globl _abi_test_clobber_v15_upper -.private_extern _abi_test_clobber_v15_upper -.align 4 -_abi_test_clobber_v15_upper: - fmov v15.d[1], xzr - ret - -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-aarch64/crypto/third_party/sike/asm/fp-armv8.S b/packager/third_party/boringssl/ios-aarch64/crypto/third_party/sike/asm/fp-armv8.S deleted file mode 100644 index c48863f65f..0000000000 --- a/packager/third_party/boringssl/ios-aarch64/crypto/third_party/sike/asm/fp-armv8.S +++ /dev/null @@ -1,996 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.section __TEXT,__const - -# p434 x 2 -Lp434x2: -.quad 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF -.quad 0xFB82ECF5C5FFFFFF, 0xF78CB8F062B15D47 -.quad 0xD9F8BFAD038A40AC, 0x0004683E4E2EE688 - -# p434 + 1 -Lp434p1: -.quad 0xFDC1767AE3000000, 0x7BC65C783158AEA3 -.quad 0x6CFC5FD681C52056, 0x0002341F27177344 - -.text -.globl _sike_mpmul -.private_extern _sike_mpmul -.align 4 -_sike_mpmul: - stp x29, x30, [sp,#-96]! - add x29, sp, #0 - stp x19, x20, [sp,#16] - stp x21, x22, [sp,#32] - stp x23, x24, [sp,#48] - stp x25, x26, [sp,#64] - stp x27, x28, [sp,#80] - - ldp x3, x4, [x0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x10, x11, [x1,#0] - ldp x12, x13, [x1,#16] - ldp x14, x15, [x1,#32] - ldr x16, [x1,#48] - - // x3-x7 <- AH + AL, x7 <- carry - adds x3, x3, x7 - adcs x4, x4, x8 - adcs x5, x5, x9 - adcs x6, x6, xzr - adc x7, xzr, xzr - - // x10-x13 <- BH + BL, x8 <- carry - adds x10, x10, x14 - adcs x11, x11, x15 - adcs x12, x12, x16 - adcs x13, x13, xzr - adc x8, xzr, xzr - - // x9 <- combined carry - and x9, x7, x8 - // x7-x8 <- mask - sub x7, xzr, x7 - sub x8, xzr, x8 - - // x15-x19 <- masked (BH + BL) - and x14, x10, x7 - and x15, x11, x7 - and x16, x12, x7 - and x17, x13, x7 - - // x20-x23 <- masked (AH + AL) - and x20, x3, x8 - and x21, x4, x8 - and x22, x5, x8 - and x23, x6, x8 - - // x15-x19, x7 <- masked (AH+AL) + masked (BH+BL), step 1 - adds x14, x14, x20 - adcs x15, x15, x21 - adcs x16, x16, x22 - adcs x17, x17, x23 - adc x7, x9, xzr - - // x8-x9,x19,x20-x24 <- (AH+AL) x (BH+BL), low part - stp x3, x4, [x2,#0] - // A0-A1 <- AH + AL, T0 <- mask - adds x3, x3, x5 - adcs x4, x4, x6 - adc x25, xzr, xzr - - // C6, T1 <- BH + BL, C7 <- mask - adds x23, x10, x12 - adcs x26, x11, x13 - adc x24, xzr, xzr - - // C0-C1 <- masked (BH + BL) - sub x19, xzr, x25 - sub x20, xzr, x24 - and x8, x23, x19 - and x9, x26, x19 - - // C4-C5 <- masked (AH + AL), T0 <- combined carry - and x21, x3, x20 - and x22, x4, x20 - mul x19, x3, x23 - mul x20, x3, x26 - and x25, x25, x24 - - // C0-C1, T0 <- (AH+AL) x (BH+BL), part 1 - adds x8, x21, x8 - umulh x21, x3, x26 - adcs x9, x22, x9 - umulh x22, x3, x23 - adc x25, x25, xzr - - // C2-C5 <- (AH+AL) x (BH+BL), low part - mul x3, x4, x23 - umulh x23, x4, x23 - adds x20, x20, x22 - adc x21, x21, xzr - - mul x24, x4, x26 - umulh x26, x4, x26 - adds x20, x20, x3 - adcs x21, x21, x23 - adc x22, xzr, xzr - - adds x21, x21, x24 - adc x22, x22, x26 - - ldp x3, x4, [x2,#0] - - // C2-C5, T0 <- (AH+AL) x (BH+BL), final part - adds x21, x8, x21 - umulh x24, x3, x10 - umulh x26, x3, x11 - adcs x22, x9, x22 - mul x8, x3, x10 - mul x9, x3, x11 - adc x25, x25, xzr - - // C0-C1, T1, C7 <- AL x BL - mul x3, x4, x10 - umulh x10, x4, x10 - adds x9, x9, x24 - adc x26, x26, xzr - - mul x23, x4, x11 - umulh x11, x4, x11 - adds x9, x9, x3 - adcs x26, x26, x10 - adc x24, xzr, xzr - - adds x26, x26, x23 - adc x24, x24, x11 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - mul x3, x5, x12 - umulh x10, x5, x12 - subs x19, x19, x8 - sbcs x20, x20, x9 - sbcs x21, x21, x26 - mul x4, x5, x13 - umulh x23, x5, x13 - sbcs x22, x22, x24 - sbc x25, x25, xzr - - // A0, A1, C6, B0 <- AH x BH - mul x5, x6, x12 - umulh x12, x6, x12 - adds x4, x4, x10 - adc x23, x23, xzr - - mul x11, x6, x13 - umulh x13, x6, x13 - adds x4, x4, x5 - adcs x23, x23, x12 - adc x10, xzr, xzr - - adds x23, x23, x11 - adc x10, x10, x13 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH - subs x19, x19, x3 - sbcs x20, x20, x4 - sbcs x21, x21, x23 - sbcs x22, x22, x10 - sbc x25, x25, xzr - - adds x19, x19, x26 - adcs x20, x20, x24 - adcs x21, x21, x3 - adcs x22, x22, x4 - adcs x23, x25, x23 - adc x24, x10, xzr - - - // x15-x19, x7 <- (AH+AL) x (BH+BL), final step - adds x14, x14, x21 - adcs x15, x15, x22 - adcs x16, x16, x23 - adcs x17, x17, x24 - adc x7, x7, xzr - - // Load AL - ldp x3, x4, [x0] - ldp x5, x6, [x0,#16] - // Load BL - ldp x10, x11, [x1,#0] - ldp x12, x13, [x1,#16] - - // Temporarily store x8 in x2 - stp x8, x9, [x2,#0] - // x21-x28 <- AL x BL - // A0-A1 <- AH + AL, T0 <- mask - adds x3, x3, x5 - adcs x4, x4, x6 - adc x8, xzr, xzr - - // C6, T1 <- BH + BL, C7 <- mask - adds x27, x10, x12 - adcs x9, x11, x13 - adc x28, xzr, xzr - - // C0-C1 <- masked (BH + BL) - sub x23, xzr, x8 - sub x24, xzr, x28 - and x21, x27, x23 - and x22, x9, x23 - - // C4-C5 <- masked (AH + AL), T0 <- combined carry - and x25, x3, x24 - and x26, x4, x24 - mul x23, x3, x27 - mul x24, x3, x9 - and x8, x8, x28 - - // C0-C1, T0 <- (AH+AL) x (BH+BL), part 1 - adds x21, x25, x21 - umulh x25, x3, x9 - adcs x22, x26, x22 - umulh x26, x3, x27 - adc x8, x8, xzr - - // C2-C5 <- (AH+AL) x (BH+BL), low part - mul x3, x4, x27 - umulh x27, x4, x27 - adds x24, x24, x26 - adc x25, x25, xzr - - mul x28, x4, x9 - umulh x9, x4, x9 - adds x24, x24, x3 - adcs x25, x25, x27 - adc x26, xzr, xzr - - adds x25, x25, x28 - adc x26, x26, x9 - - ldp x3, x4, [x0,#0] - - // C2-C5, T0 <- (AH+AL) x (BH+BL), final part - adds x25, x21, x25 - umulh x28, x3, x10 - umulh x9, x3, x11 - adcs x26, x22, x26 - mul x21, x3, x10 - mul x22, x3, x11 - adc x8, x8, xzr - - // C0-C1, T1, C7 <- AL x BL - mul x3, x4, x10 - umulh x10, x4, x10 - adds x22, x22, x28 - adc x9, x9, xzr - - mul x27, x4, x11 - umulh x11, x4, x11 - adds x22, x22, x3 - adcs x9, x9, x10 - adc x28, xzr, xzr - - adds x9, x9, x27 - adc x28, x28, x11 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - mul x3, x5, x12 - umulh x10, x5, x12 - subs x23, x23, x21 - sbcs x24, x24, x22 - sbcs x25, x25, x9 - mul x4, x5, x13 - umulh x27, x5, x13 - sbcs x26, x26, x28 - sbc x8, x8, xzr - - // A0, A1, C6, B0 <- AH x BH - mul x5, x6, x12 - umulh x12, x6, x12 - adds x4, x4, x10 - adc x27, x27, xzr - - mul x11, x6, x13 - umulh x13, x6, x13 - adds x4, x4, x5 - adcs x27, x27, x12 - adc x10, xzr, xzr - - adds x27, x27, x11 - adc x10, x10, x13 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH - subs x23, x23, x3 - sbcs x24, x24, x4 - sbcs x25, x25, x27 - sbcs x26, x26, x10 - sbc x8, x8, xzr - - adds x23, x23, x9 - adcs x24, x24, x28 - adcs x25, x25, x3 - adcs x26, x26, x4 - adcs x27, x8, x27 - adc x28, x10, xzr - - // Restore x8 - ldp x8, x9, [x2,#0] - - // x8-x10,x20,x15-x17,x19 <- maskd (AH+AL) x (BH+BL) - ALxBL - subs x8, x8, x21 - sbcs x9, x9, x22 - sbcs x19, x19, x23 - sbcs x20, x20, x24 - sbcs x14, x14, x25 - sbcs x15, x15, x26 - sbcs x16, x16, x27 - sbcs x17, x17, x28 - sbc x7, x7, xzr - - // Store ALxBL, low - stp x21, x22, [x2] - stp x23, x24, [x2,#16] - - // Load AH - ldp x3, x4, [x0,#32] - ldr x5, [x0,#48] - // Load BH - ldp x10, x11, [x1,#32] - ldr x12, [x1,#48] - - adds x8, x8, x25 - adcs x9, x9, x26 - adcs x19, x19, x27 - adcs x20, x20, x28 - adc x1, xzr, xzr - - add x0, x0, #32 - // Temporarily store x8,x9 in x2 - stp x8,x9, [x2,#32] - // x21-x28 <- AH x BH - - // A0 * B0 - mul x21, x3, x10 // C0 - umulh x24, x3, x10 - - // A0 * B1 - mul x22, x3, x11 - umulh x23, x3, x11 - - // A1 * B0 - mul x8, x4, x10 - umulh x9, x4, x10 - adds x22, x22, x24 - adc x23, x23, xzr - - // A0 * B2 - mul x27, x3, x12 - umulh x28, x3, x12 - adds x22, x22, x8 // C1 - adcs x23, x23, x9 - adc x24, xzr, xzr - - // A2 * B0 - mul x8, x5, x10 - umulh x25, x5, x10 - adds x23, x23, x27 - adcs x24, x24, x25 - adc x25, xzr, xzr - - // A1 * B1 - mul x27, x4, x11 - umulh x9, x4, x11 - adds x23, x23, x8 - adcs x24, x24, x28 - adc x25, x25, xzr - - // A1 * B2 - mul x8, x4, x12 - umulh x28, x4, x12 - adds x23, x23, x27 // C2 - adcs x24, x24, x9 - adc x25, x25, xzr - - // A2 * B1 - mul x27, x5, x11 - umulh x9, x5, x11 - adds x24, x24, x8 - adcs x25, x25, x28 - adc x26, xzr, xzr - - // A2 * B2 - mul x8, x5, x12 - umulh x28, x5, x12 - adds x24, x24, x27 // C3 - adcs x25, x25, x9 - adc x26, x26, xzr - - adds x25, x25, x8 // C4 - adc x26, x26, x28 // C5 - - // Restore x8,x9 - ldp x8,x9, [x2,#32] - - neg x1, x1 - - // x8-x9,x19,x20,x14-x17 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH - subs x8, x8, x21 - sbcs x9, x9, x22 - sbcs x19, x19, x23 - sbcs x20, x20, x24 - sbcs x14, x14, x25 - sbcs x15, x15, x26 - sbcs x16, x16, xzr - sbcs x17, x17, xzr - sbc x7, x7, xzr - - // Store (AH+AL) x (BH+BL) - ALxBL - AHxBH, low - stp x8, x9, [x2,#32] - stp x19, x20, [x2,#48] - - adds x1, x1, #1 - adcs x14, x14, x21 - adcs x15, x15, x22 - adcs x16, x16, x23 - adcs x17, x17, x24 - adcs x25, x7, x25 - adc x26, x26, xzr - - stp x14, x15, [x2,#64] - stp x16, x17, [x2,#80] - stp x25, x26, [x2,#96] - - ldp x19, x20, [x29,#16] - ldp x21, x22, [x29,#32] - ldp x23, x24, [x29,#48] - ldp x25, x26, [x29,#64] - ldp x27, x28, [x29,#80] - ldp x29, x30, [sp],#96 - ret -.globl _sike_fprdc -.private_extern _sike_fprdc -.align 4 -_sike_fprdc: - stp x29, x30, [sp, #-96]! - add x29, sp, xzr - stp x19, x20, [sp,#16] - stp x21, x22, [sp,#32] - stp x23, x24, [sp,#48] - stp x25, x26, [sp,#64] - stp x27, x28, [sp,#80] - - ldp x2, x3, [x0,#0] // a[0-1] - - // Load the prime constant - adrp x26, Lp434p1@PAGE - add x26, x26, Lp434p1@PAGEOFF - ldp x23, x24, [x26, #0x0] - ldp x25, x26, [x26,#0x10] - - // a[0-1] * p434+1 - mul x4, x2, x23 // C0 - umulh x7, x2, x23 - - mul x5, x2, x24 - umulh x6, x2, x24 - - mul x10, x3, x23 - umulh x11, x3, x23 - adds x5, x5, x7 - adc x6, x6, xzr - - mul x27, x2, x25 - umulh x28, x2, x25 - adds x5, x5, x10 // C1 - adcs x6, x6, x11 - adc x7, xzr, xzr - - mul x10, x3, x24 - umulh x11, x3, x24 - adds x6, x6, x27 - adcs x7, x7, x28 - adc x8, xzr, xzr - - mul x27, x2, x26 - umulh x28, x2, x26 - adds x6, x6, x10 // C2 - adcs x7, x7, x11 - adc x8, x8, xzr - - mul x10, x3, x25 - umulh x11, x3, x25 - adds x7, x7, x27 - adcs x8, x8, x28 - adc x9, xzr, xzr - - mul x27, x3, x26 - umulh x28, x3, x26 - adds x7, x7, x10 // C3 - adcs x8, x8, x11 - adc x9, x9, xzr - adds x8, x8, x27 // C4 - adc x9, x9, x28 // C5 - - - - ldp x10, x11, [x0, #0x18] - ldp x12, x13, [x0, #0x28] - ldp x14, x15, [x0, #0x38] - ldp x16, x17, [x0, #0x48] - ldp x19, x20, [x0, #0x58] - ldr x21, [x0, #0x68] - - adds x10, x10, x4 - adcs x11, x11, x5 - adcs x12, x12, x6 - adcs x13, x13, x7 - adcs x14, x14, x8 - adcs x15, x15, x9 - adcs x22, x16, xzr - adcs x17, x17, xzr - adcs x19, x19, xzr - adcs x20, x20, xzr - adc x21, x21, xzr - - ldr x2, [x0,#0x10] // a[2] - // a[2-3] * p434+1 - mul x4, x2, x23 // C0 - umulh x7, x2, x23 - - mul x5, x2, x24 - umulh x6, x2, x24 - - mul x0, x10, x23 - umulh x3, x10, x23 - adds x5, x5, x7 - adc x6, x6, xzr - - mul x27, x2, x25 - umulh x28, x2, x25 - adds x5, x5, x0 // C1 - adcs x6, x6, x3 - adc x7, xzr, xzr - - mul x0, x10, x24 - umulh x3, x10, x24 - adds x6, x6, x27 - adcs x7, x7, x28 - adc x8, xzr, xzr - - mul x27, x2, x26 - umulh x28, x2, x26 - adds x6, x6, x0 // C2 - adcs x7, x7, x3 - adc x8, x8, xzr - - mul x0, x10, x25 - umulh x3, x10, x25 - adds x7, x7, x27 - adcs x8, x8, x28 - adc x9, xzr, xzr - - mul x27, x10, x26 - umulh x28, x10, x26 - adds x7, x7, x0 // C3 - adcs x8, x8, x3 - adc x9, x9, xzr - adds x8, x8, x27 // C4 - adc x9, x9, x28 // C5 - - - - adds x12, x12, x4 - adcs x13, x13, x5 - adcs x14, x14, x6 - adcs x15, x15, x7 - adcs x16, x22, x8 - adcs x17, x17, x9 - adcs x22, x19, xzr - adcs x20, x20, xzr - adc x21, x21, xzr - - mul x4, x11, x23 // C0 - umulh x7, x11, x23 - - mul x5, x11, x24 - umulh x6, x11, x24 - - mul x10, x12, x23 - umulh x3, x12, x23 - adds x5, x5, x7 - adc x6, x6, xzr - - mul x27, x11, x25 - umulh x28, x11, x25 - adds x5, x5, x10 // C1 - adcs x6, x6, x3 - adc x7, xzr, xzr - - mul x10, x12, x24 - umulh x3, x12, x24 - adds x6, x6, x27 - adcs x7, x7, x28 - adc x8, xzr, xzr - - mul x27, x11, x26 - umulh x28, x11, x26 - adds x6, x6, x10 // C2 - adcs x7, x7, x3 - adc x8, x8, xzr - - mul x10, x12, x25 - umulh x3, x12, x25 - adds x7, x7, x27 - adcs x8, x8, x28 - adc x9, xzr, xzr - - mul x27, x12, x26 - umulh x28, x12, x26 - adds x7, x7, x10 // C3 - adcs x8, x8, x3 - adc x9, x9, xzr - adds x8, x8, x27 // C4 - adc x9, x9, x28 // C5 - - - adds x14, x14, x4 - adcs x15, x15, x5 - adcs x16, x16, x6 - adcs x17, x17, x7 - adcs x19, x22, x8 - adcs x20, x20, x9 - adc x22, x21, xzr - - stp x14, x15, [x1, #0x0] // C0, C1 - - mul x4, x13, x23 // C0 - umulh x10, x13, x23 - - mul x5, x13, x24 - umulh x27, x13, x24 - adds x5, x5, x10 // C1 - adc x10, xzr, xzr - - mul x6, x13, x25 - umulh x28, x13, x25 - adds x27, x10, x27 - adcs x6, x6, x27 // C2 - adc x10, xzr, xzr - - mul x7, x13, x26 - umulh x8, x13, x26 - adds x28, x10, x28 - adcs x7, x7, x28 // C3 - adc x8, x8, xzr // C4 - - adds x16, x16, x4 - adcs x17, x17, x5 - adcs x19, x19, x6 - adcs x20, x20, x7 - adc x21, x22, x8 - - str x16, [x1, #0x10] - stp x17, x19, [x1, #0x18] - stp x20, x21, [x1, #0x28] - - ldp x19, x20, [x29,#16] - ldp x21, x22, [x29,#32] - ldp x23, x24, [x29,#48] - ldp x25, x26, [x29,#64] - ldp x27, x28, [x29,#80] - ldp x29, x30, [sp],#96 - ret -.globl _sike_fpadd -.private_extern _sike_fpadd -.align 4 -_sike_fpadd: - stp x29,x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - ldr x17, [x1,#48] - - // Add a + b - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x13 - adcs x6, x6, x14 - adcs x7, x7, x15 - adcs x8, x8, x16 - adc x9, x9, x17 - - // Subtract 2xp434 - adrp x17, Lp434x2@PAGE - add x17, x17, Lp434x2@PAGEOFF - ldp x11, x12, [x17, #0] - ldp x13, x14, [x17, #16] - ldp x15, x16, [x17, #32] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x12 - sbcs x6, x6, x13 - sbcs x7, x7, x14 - sbcs x8, x8, x15 - sbcs x9, x9, x16 - sbc x0, xzr, xzr // x0 can be reused now - - // Add 2xp434 anded with the mask in x0 - and x11, x11, x0 - and x12, x12, x0 - and x13, x13, x0 - and x14, x14, x0 - and x15, x15, x0 - and x16, x16, x0 - - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x12 - adcs x6, x6, x13 - adcs x7, x7, x14 - adcs x8, x8, x15 - adc x9, x9, x16 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - str x9, [x2,#48] - - ldp x29, x30, [sp],#16 - ret -.globl _sike_fpsub -.private_extern _sike_fpsub -.align 4 -_sike_fpsub: - stp x29, x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - ldr x17, [x1,#48] - - // Subtract a - b - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - sbcs x9, x9, x17 - sbc x0, xzr, xzr - - // Add 2xp434 anded with the mask in x0 - adrp x17, Lp434x2@PAGE - add x17, x17, Lp434x2@PAGEOFF - - // First half - ldp x11, x12, [x17, #0] - ldp x13, x14, [x17, #16] - ldp x15, x16, [x17, #32] - - // Add 2xp434 anded with the mask in x0 - and x11, x11, x0 - and x12, x12, x0 - and x13, x13, x0 - and x14, x14, x0 - and x15, x15, x0 - and x16, x16, x0 - - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x12 - adcs x6, x6, x13 - adcs x7, x7, x14 - adcs x8, x8, x15 - adc x9, x9, x16 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - str x9, [x2,#48] - - ldp x29, x30, [sp],#16 - ret -.globl _sike_mpadd_asm -.private_extern _sike_mpadd_asm -.align 4 -_sike_mpadd_asm: - stp x29, x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - ldr x17, [x1,#48] - - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x13 - adcs x6, x6, x14 - adcs x7, x7, x15 - adcs x8, x8, x16 - adc x9, x9, x17 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - str x9, [x2,#48] - - ldp x29, x30, [sp],#16 - ret -.globl _sike_mpsubx2_asm -.private_extern _sike_mpsubx2_asm -.align 4 -_sike_mpsubx2_asm: - stp x29, x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - ldp x7, x8, [x0,#32] - ldp x9, x10, [x0,#48] - ldp x11, x12, [x1,#32] - ldp x13, x14, [x1,#48] - sbcs x7, x7, x11 - sbcs x8, x8, x12 - sbcs x9, x9, x13 - sbcs x10, x10, x14 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - stp x9, x10, [x2,#48] - - ldp x3, x4, [x0,#64] - ldp x5, x6, [x0,#80] - ldp x11, x12, [x1,#64] - ldp x13, x14, [x1,#80] - sbcs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - ldp x7, x8, [x0,#96] - ldp x11, x12, [x1,#96] - sbcs x7, x7, x11 - sbcs x8, x8, x12 - sbc x0, xzr, xzr - - stp x3, x4, [x2,#64] - stp x5, x6, [x2,#80] - stp x7, x8, [x2,#96] - - ldp x29, x30, [sp],#16 - ret -.globl _sike_mpdblsubx2_asm -.private_extern _sike_mpdblsubx2_asm -.align 4 -_sike_mpdblsubx2_asm: - stp x29, x30, [sp, #-16]! - add x29, sp, #0 - - ldp x3, x4, [x2, #0] - ldp x5, x6, [x2,#16] - ldp x7, x8, [x2,#32] - - ldp x11, x12, [x0, #0] - ldp x13, x14, [x0,#16] - ldp x15, x16, [x0,#32] - - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - - // x9 stores carry - adc x9, xzr, xzr - - ldp x11, x12, [x1, #0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - adc x9, x9, xzr - - stp x3, x4, [x2, #0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - - ldp x3, x4, [x2,#48] - ldp x5, x6, [x2,#64] - ldp x7, x8, [x2,#80] - - ldp x11, x12, [x0,#48] - ldp x13, x14, [x0,#64] - ldp x15, x16, [x0,#80] - - // x9 = 2 - x9 - neg x9, x9 - add x9, x9, #2 - - subs x3, x3, x9 - sbcs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - adc x9, xzr, xzr - - ldp x11, x12, [x1,#48] - ldp x13, x14, [x1,#64] - ldp x15, x16, [x1,#80] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - adc x9, x9, xzr - - stp x3, x4, [x2,#48] - stp x5, x6, [x2,#64] - stp x7, x8, [x2,#80] - - ldp x3, x4, [x2,#96] - ldp x11, x12, [x0,#96] - ldp x13, x14, [x1,#96] - - // x9 = 2 - x9 - neg x9, x9 - add x9, x9, #2 - - subs x3, x3, x9 - sbcs x3, x3, x11 - sbcs x4, x4, x12 - subs x3, x3, x13 - sbc x4, x4, x14 - stp x3, x4, [x2,#96] - - ldp x29, x30, [sp],#16 - ret -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/chacha/chacha-armv4.S b/packager/third_party/boringssl/ios-arm/crypto/chacha/chacha-armv4.S deleted file mode 100644 index cadf2b623b..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/chacha/chacha-armv4.S +++ /dev/null @@ -1,1498 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. - - -.text -#if defined(__thumb2__) || defined(__clang__) -.syntax unified -#endif -#if defined(__thumb2__) -.thumb -#else -.code 32 -#endif - -#if defined(__thumb2__) || defined(__clang__) -#define ldrhsb ldrbhs -#endif - -.align 5 -Lsigma: -.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral -Lone: -.long 1,0,0,0 -#if __ARM_MAX_ARCH__>=7 -LOPENSSL_armcap: -.word OPENSSL_armcap_P-LChaCha20_ctr32 -#else -.word -1 -#endif - -.globl _ChaCha20_ctr32 -.private_extern _ChaCha20_ctr32 -#ifdef __thumb2__ -.thumb_func _ChaCha20_ctr32 -#endif -.align 5 -_ChaCha20_ctr32: -LChaCha20_ctr32: - ldr r12,[sp,#0] @ pull pointer to counter and nonce - stmdb sp!,{r0,r1,r2,r4-r11,lr} -#if __ARM_ARCH__<7 && !defined(__thumb2__) - sub r14,pc,#16 @ _ChaCha20_ctr32 -#else - adr r14,LChaCha20_ctr32 -#endif - cmp r2,#0 @ len==0? -#ifdef __thumb2__ - itt eq -#endif - addeq sp,sp,#4*3 - beq Lno_data -#if __ARM_MAX_ARCH__>=7 - cmp r2,#192 @ test len - bls Lshort - ldr r4,[r14,#-32] - ldr r4,[r14,r4] -# ifdef __APPLE__ - ldr r4,[r4] -# endif - tst r4,#ARMV7_NEON - bne LChaCha20_neon -Lshort: -#endif - ldmia r12,{r4,r5,r6,r7} @ load counter and nonce - sub sp,sp,#4*(16) @ off-load area - sub r14,r14,#64 @ Lsigma - stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce - ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key - ldmia r14,{r0,r1,r2,r3} @ load sigma - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key - stmdb sp!,{r0,r1,r2,r3} @ copy sigma - str r10,[sp,#4*(16+10)] @ off-load "rx" - str r11,[sp,#4*(16+11)] @ off-load "rx" - b Loop_outer_enter - -.align 4 -Loop_outer: - ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material - str r11,[sp,#4*(32+2)] @ save len - str r12, [sp,#4*(32+1)] @ save inp - str r14, [sp,#4*(32+0)] @ save out -Loop_outer_enter: - ldr r11, [sp,#4*(15)] - ldr r12,[sp,#4*(12)] @ modulo-scheduled load - ldr r10, [sp,#4*(13)] - ldr r14,[sp,#4*(14)] - str r11, [sp,#4*(16+15)] - mov r11,#10 - b Loop - -.align 4 -Loop: - subs r11,r11,#1 - add r0,r0,r4 - mov r12,r12,ror#16 - add r1,r1,r5 - mov r10,r10,ror#16 - eor r12,r12,r0,ror#16 - eor r10,r10,r1,ror#16 - add r8,r8,r12 - mov r4,r4,ror#20 - add r9,r9,r10 - mov r5,r5,ror#20 - eor r4,r4,r8,ror#20 - eor r5,r5,r9,ror#20 - add r0,r0,r4 - mov r12,r12,ror#24 - add r1,r1,r5 - mov r10,r10,ror#24 - eor r12,r12,r0,ror#24 - eor r10,r10,r1,ror#24 - add r8,r8,r12 - mov r4,r4,ror#25 - add r9,r9,r10 - mov r5,r5,ror#25 - str r10,[sp,#4*(16+13)] - ldr r10,[sp,#4*(16+15)] - eor r4,r4,r8,ror#25 - eor r5,r5,r9,ror#25 - str r8,[sp,#4*(16+8)] - ldr r8,[sp,#4*(16+10)] - add r2,r2,r6 - mov r14,r14,ror#16 - str r9,[sp,#4*(16+9)] - ldr r9,[sp,#4*(16+11)] - add r3,r3,r7 - mov r10,r10,ror#16 - eor r14,r14,r2,ror#16 - eor r10,r10,r3,ror#16 - add r8,r8,r14 - mov r6,r6,ror#20 - add r9,r9,r10 - mov r7,r7,ror#20 - eor r6,r6,r8,ror#20 - eor r7,r7,r9,ror#20 - add r2,r2,r6 - mov r14,r14,ror#24 - add r3,r3,r7 - mov r10,r10,ror#24 - eor r14,r14,r2,ror#24 - eor r10,r10,r3,ror#24 - add r8,r8,r14 - mov r6,r6,ror#25 - add r9,r9,r10 - mov r7,r7,ror#25 - eor r6,r6,r8,ror#25 - eor r7,r7,r9,ror#25 - add r0,r0,r5 - mov r10,r10,ror#16 - add r1,r1,r6 - mov r12,r12,ror#16 - eor r10,r10,r0,ror#16 - eor r12,r12,r1,ror#16 - add r8,r8,r10 - mov r5,r5,ror#20 - add r9,r9,r12 - mov r6,r6,ror#20 - eor r5,r5,r8,ror#20 - eor r6,r6,r9,ror#20 - add r0,r0,r5 - mov r10,r10,ror#24 - add r1,r1,r6 - mov r12,r12,ror#24 - eor r10,r10,r0,ror#24 - eor r12,r12,r1,ror#24 - add r8,r8,r10 - mov r5,r5,ror#25 - str r10,[sp,#4*(16+15)] - ldr r10,[sp,#4*(16+13)] - add r9,r9,r12 - mov r6,r6,ror#25 - eor r5,r5,r8,ror#25 - eor r6,r6,r9,ror#25 - str r8,[sp,#4*(16+10)] - ldr r8,[sp,#4*(16+8)] - add r2,r2,r7 - mov r10,r10,ror#16 - str r9,[sp,#4*(16+11)] - ldr r9,[sp,#4*(16+9)] - add r3,r3,r4 - mov r14,r14,ror#16 - eor r10,r10,r2,ror#16 - eor r14,r14,r3,ror#16 - add r8,r8,r10 - mov r7,r7,ror#20 - add r9,r9,r14 - mov r4,r4,ror#20 - eor r7,r7,r8,ror#20 - eor r4,r4,r9,ror#20 - add r2,r2,r7 - mov r10,r10,ror#24 - add r3,r3,r4 - mov r14,r14,ror#24 - eor r10,r10,r2,ror#24 - eor r14,r14,r3,ror#24 - add r8,r8,r10 - mov r7,r7,ror#25 - add r9,r9,r14 - mov r4,r4,ror#25 - eor r7,r7,r8,ror#25 - eor r4,r4,r9,ror#25 - bne Loop - - ldr r11,[sp,#4*(32+2)] @ load len - - str r8, [sp,#4*(16+8)] @ modulo-scheduled store - str r9, [sp,#4*(16+9)] - str r12,[sp,#4*(16+12)] - str r10, [sp,#4*(16+13)] - str r14,[sp,#4*(16+14)] - - @ at this point we have first half of 512-bit result in - @ rx and second half at sp+4*(16+8) - - cmp r11,#64 @ done yet? -#ifdef __thumb2__ - itete lo -#endif - addlo r12,sp,#4*(0) @ shortcut or ... - ldrhs r12,[sp,#4*(32+1)] @ ... load inp - addlo r14,sp,#4*(0) @ shortcut or ... - ldrhs r14,[sp,#4*(32+0)] @ ... load out - - ldr r8,[sp,#4*(0)] @ load key material - ldr r9,[sp,#4*(1)] - -#if __ARM_ARCH__>=6 || !defined(__ARMEB__) -# if __ARM_ARCH__<7 - orr r10,r12,r14 - tst r10,#3 @ are input and output aligned? - ldr r10,[sp,#4*(2)] - bne Lunaligned - cmp r11,#64 @ restore flags -# else - ldr r10,[sp,#4*(2)] -# endif - ldr r11,[sp,#4*(3)] - - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] - - add r2,r2,r10 - add r3,r3,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r0,r0,r8 @ xor with input - eorhs r1,r1,r9 - add r8,sp,#4*(4) - str r0,[r14],#16 @ store output -# ifdef __thumb2__ - itt hs -# endif - eorhs r2,r2,r10 - eorhs r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r1,[r14,#-12] - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - add r5,r5,r9 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] - add r6,r6,r10 - add r7,r7,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r4,r4,r8 - eorhs r5,r5,r9 - add r8,sp,#4*(8) - str r4,[r14],#16 @ store output -# ifdef __thumb2__ - itt hs -# endif - eorhs r6,r6,r10 - eorhs r7,r7,r11 - str r5,[r14,#-12] - ldmia r8,{r8,r9,r10,r11} @ load key material - str r6,[r14,#-8] - add r0,sp,#4*(16+8) - str r7,[r14,#-4] - - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half - - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] -# ifdef __thumb2__ - itt hi -# endif - strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it - strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it - add r2,r2,r10 - add r3,r3,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r0,r0,r8 - eorhs r1,r1,r9 - add r8,sp,#4*(12) - str r0,[r14],#16 @ store output -# ifdef __thumb2__ - itt hs -# endif - eorhs r2,r2,r10 - eorhs r3,r3,r11 - str r1,[r14,#-12] - ldmia r8,{r8,r9,r10,r11} @ load key material - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - add r5,r5,r9 -# ifdef __thumb2__ - itt hi -# endif - addhi r8,r8,#1 @ next counter value - strhi r8,[sp,#4*(12)] @ save next counter value -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] - add r6,r6,r10 - add r7,r7,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r4,r4,r8 - eorhs r5,r5,r9 -# ifdef __thumb2__ - it ne -# endif - ldrne r8,[sp,#4*(32+2)] @ re-load len -# ifdef __thumb2__ - itt hs -# endif - eorhs r6,r6,r10 - eorhs r7,r7,r11 - str r4,[r14],#16 @ store output - str r5,[r14,#-12] -# ifdef __thumb2__ - it hs -# endif - subhs r11,r8,#64 @ len-=64 - str r6,[r14,#-8] - str r7,[r14,#-4] - bhi Loop_outer - - beq Ldone -# if __ARM_ARCH__<7 - b Ltail - -.align 4 -Lunaligned:@ unaligned endian-neutral path - cmp r11,#64 @ restore flags -# endif -#endif -#if __ARM_ARCH__<7 - ldr r11,[sp,#4*(3)] - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 - add r2,r2,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r3,r3,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r0,r8,r0 @ xor with input (or zero) - eor r1,r9,r1 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r2,r10,r2 - strb r0,[r14],#16 @ store output - eor r3,r11,r3 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r1,[r14,#-12] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-8] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r3,[r14,#-4] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-15] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r1,[r14,#-11] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-7] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r3,[r14,#-3] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-14] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r1,[r14,#-10] - strb r2,[r14,#-6] - eor r0,r8,r0,lsr#8 - strb r3,[r14,#-2] - eor r1,r9,r1,lsr#8 - strb r0,[r14,#-13] - eor r2,r10,r2,lsr#8 - strb r1,[r14,#-9] - eor r3,r11,r3,lsr#8 - strb r2,[r14,#-5] - strb r3,[r14,#-1] - add r8,sp,#4*(4+0) - ldmia r8,{r8,r9,r10,r11} @ load key material - add r0,sp,#4*(16+8) - add r4,r4,r8 @ accumulate key material - add r5,r5,r9 - add r6,r6,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r7,r7,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r4,r8,r4 @ xor with input (or zero) - eor r5,r9,r5 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r6,r10,r6 - strb r4,[r14],#16 @ store output - eor r7,r11,r7 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r5,[r14,#-12] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-8] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r7,[r14,#-4] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-15] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r5,[r14,#-11] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-7] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r7,[r14,#-3] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-14] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r5,[r14,#-10] - strb r6,[r14,#-6] - eor r4,r8,r4,lsr#8 - strb r7,[r14,#-2] - eor r5,r9,r5,lsr#8 - strb r4,[r14,#-13] - eor r6,r10,r6,lsr#8 - strb r5,[r14,#-9] - eor r7,r11,r7,lsr#8 - strb r6,[r14,#-5] - strb r7,[r14,#-1] - add r8,sp,#4*(4+4) - ldmia r8,{r8,r9,r10,r11} @ load key material - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half -# ifdef __thumb2__ - itt hi -# endif - strhi r10,[sp,#4*(16+10)] @ copy "rx" - strhi r11,[sp,#4*(16+11)] @ copy "rx" - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 - add r2,r2,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r3,r3,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r0,r8,r0 @ xor with input (or zero) - eor r1,r9,r1 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r2,r10,r2 - strb r0,[r14],#16 @ store output - eor r3,r11,r3 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r1,[r14,#-12] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-8] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r3,[r14,#-4] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-15] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r1,[r14,#-11] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-7] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r3,[r14,#-3] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-14] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r1,[r14,#-10] - strb r2,[r14,#-6] - eor r0,r8,r0,lsr#8 - strb r3,[r14,#-2] - eor r1,r9,r1,lsr#8 - strb r0,[r14,#-13] - eor r2,r10,r2,lsr#8 - strb r1,[r14,#-9] - eor r3,r11,r3,lsr#8 - strb r2,[r14,#-5] - strb r3,[r14,#-1] - add r8,sp,#4*(4+8) - ldmia r8,{r8,r9,r10,r11} @ load key material - add r4,r4,r8 @ accumulate key material -# ifdef __thumb2__ - itt hi -# endif - addhi r8,r8,#1 @ next counter value - strhi r8,[sp,#4*(12)] @ save next counter value - add r5,r5,r9 - add r6,r6,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r7,r7,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r4,r8,r4 @ xor with input (or zero) - eor r5,r9,r5 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r6,r10,r6 - strb r4,[r14],#16 @ store output - eor r7,r11,r7 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r5,[r14,#-12] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-8] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r7,[r14,#-4] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-15] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r5,[r14,#-11] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-7] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r7,[r14,#-3] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-14] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r5,[r14,#-10] - strb r6,[r14,#-6] - eor r4,r8,r4,lsr#8 - strb r7,[r14,#-2] - eor r5,r9,r5,lsr#8 - strb r4,[r14,#-13] - eor r6,r10,r6,lsr#8 - strb r5,[r14,#-9] - eor r7,r11,r7,lsr#8 - strb r6,[r14,#-5] - strb r7,[r14,#-1] -# ifdef __thumb2__ - it ne -# endif - ldrne r8,[sp,#4*(32+2)] @ re-load len -# ifdef __thumb2__ - it hs -# endif - subhs r11,r8,#64 @ len-=64 - bhi Loop_outer - - beq Ldone -#endif - -Ltail: - ldr r12,[sp,#4*(32+1)] @ load inp - add r9,sp,#4*(0) - ldr r14,[sp,#4*(32+0)] @ load out - -Loop_tail: - ldrb r10,[r9],#1 @ read buffer on stack - ldrb r11,[r12],#1 @ read input - subs r8,r8,#1 - eor r11,r11,r10 - strb r11,[r14],#1 @ store output - bne Loop_tail - -Ldone: - add sp,sp,#4*(32+3) -Lno_data: - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} - -#if __ARM_MAX_ARCH__>=7 - - - -#ifdef __thumb2__ -.thumb_func ChaCha20_neon -#endif -.align 5 -ChaCha20_neon: - ldr r12,[sp,#0] @ pull pointer to counter and nonce - stmdb sp!,{r0,r1,r2,r4-r11,lr} -LChaCha20_neon: - adr r14,Lsigma - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so - stmdb sp!,{r0,r1,r2,r3} - - vld1.32 {q1,q2},[r3] @ load key - ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key - - sub sp,sp,#4*(16+16) - vld1.32 {q3},[r12] @ load counter and nonce - add r12,sp,#4*8 - ldmia r14,{r0,r1,r2,r3} @ load sigma - vld1.32 {q0},[r14]! @ load sigma - vld1.32 {q12},[r14] @ one - vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce - vst1.32 {q0,q1},[sp] @ copy sigma|1/2key - - str r10,[sp,#4*(16+10)] @ off-load "rx" - str r11,[sp,#4*(16+11)] @ off-load "rx" - vshl.i32 d26,d24,#1 @ two - vstr d24,[sp,#4*(16+0)] - vshl.i32 d28,d24,#2 @ four - vstr d26,[sp,#4*(16+2)] - vmov q4,q0 - vstr d28,[sp,#4*(16+4)] - vmov q8,q0 - vmov q5,q1 - vmov q9,q1 - b Loop_neon_enter - -.align 4 -Loop_neon_outer: - ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material - cmp r11,#64*2 @ if len<=64*2 - bls Lbreak_neon @ switch to integer-only - vmov q4,q0 - str r11,[sp,#4*(32+2)] @ save len - vmov q8,q0 - str r12, [sp,#4*(32+1)] @ save inp - vmov q5,q1 - str r14, [sp,#4*(32+0)] @ save out - vmov q9,q1 -Loop_neon_enter: - ldr r11, [sp,#4*(15)] - vadd.i32 q7,q3,q12 @ counter+1 - ldr r12,[sp,#4*(12)] @ modulo-scheduled load - vmov q6,q2 - ldr r10, [sp,#4*(13)] - vmov q10,q2 - ldr r14,[sp,#4*(14)] - vadd.i32 q11,q7,q12 @ counter+2 - str r11, [sp,#4*(16+15)] - mov r11,#10 - add r12,r12,#3 @ counter+3 - b Loop_neon - -.align 4 -Loop_neon: - subs r11,r11,#1 - vadd.i32 q0,q0,q1 - add r0,r0,r4 - vadd.i32 q4,q4,q5 - mov r12,r12,ror#16 - vadd.i32 q8,q8,q9 - add r1,r1,r5 - veor q3,q3,q0 - mov r10,r10,ror#16 - veor q7,q7,q4 - eor r12,r12,r0,ror#16 - veor q11,q11,q8 - eor r10,r10,r1,ror#16 - vrev32.16 q3,q3 - add r8,r8,r12 - vrev32.16 q7,q7 - mov r4,r4,ror#20 - vrev32.16 q11,q11 - add r9,r9,r10 - vadd.i32 q2,q2,q3 - mov r5,r5,ror#20 - vadd.i32 q6,q6,q7 - eor r4,r4,r8,ror#20 - vadd.i32 q10,q10,q11 - eor r5,r5,r9,ror#20 - veor q12,q1,q2 - add r0,r0,r4 - veor q13,q5,q6 - mov r12,r12,ror#24 - veor q14,q9,q10 - add r1,r1,r5 - vshr.u32 q1,q12,#20 - mov r10,r10,ror#24 - vshr.u32 q5,q13,#20 - eor r12,r12,r0,ror#24 - vshr.u32 q9,q14,#20 - eor r10,r10,r1,ror#24 - vsli.32 q1,q12,#12 - add r8,r8,r12 - vsli.32 q5,q13,#12 - mov r4,r4,ror#25 - vsli.32 q9,q14,#12 - add r9,r9,r10 - vadd.i32 q0,q0,q1 - mov r5,r5,ror#25 - vadd.i32 q4,q4,q5 - str r10,[sp,#4*(16+13)] - vadd.i32 q8,q8,q9 - ldr r10,[sp,#4*(16+15)] - veor q12,q3,q0 - eor r4,r4,r8,ror#25 - veor q13,q7,q4 - eor r5,r5,r9,ror#25 - veor q14,q11,q8 - str r8,[sp,#4*(16+8)] - vshr.u32 q3,q12,#24 - ldr r8,[sp,#4*(16+10)] - vshr.u32 q7,q13,#24 - add r2,r2,r6 - vshr.u32 q11,q14,#24 - mov r14,r14,ror#16 - vsli.32 q3,q12,#8 - str r9,[sp,#4*(16+9)] - vsli.32 q7,q13,#8 - ldr r9,[sp,#4*(16+11)] - vsli.32 q11,q14,#8 - add r3,r3,r7 - vadd.i32 q2,q2,q3 - mov r10,r10,ror#16 - vadd.i32 q6,q6,q7 - eor r14,r14,r2,ror#16 - vadd.i32 q10,q10,q11 - eor r10,r10,r3,ror#16 - veor q12,q1,q2 - add r8,r8,r14 - veor q13,q5,q6 - mov r6,r6,ror#20 - veor q14,q9,q10 - add r9,r9,r10 - vshr.u32 q1,q12,#25 - mov r7,r7,ror#20 - vshr.u32 q5,q13,#25 - eor r6,r6,r8,ror#20 - vshr.u32 q9,q14,#25 - eor r7,r7,r9,ror#20 - vsli.32 q1,q12,#7 - add r2,r2,r6 - vsli.32 q5,q13,#7 - mov r14,r14,ror#24 - vsli.32 q9,q14,#7 - add r3,r3,r7 - vext.8 q2,q2,q2,#8 - mov r10,r10,ror#24 - vext.8 q6,q6,q6,#8 - eor r14,r14,r2,ror#24 - vext.8 q10,q10,q10,#8 - eor r10,r10,r3,ror#24 - vext.8 q1,q1,q1,#4 - add r8,r8,r14 - vext.8 q5,q5,q5,#4 - mov r6,r6,ror#25 - vext.8 q9,q9,q9,#4 - add r9,r9,r10 - vext.8 q3,q3,q3,#12 - mov r7,r7,ror#25 - vext.8 q7,q7,q7,#12 - eor r6,r6,r8,ror#25 - vext.8 q11,q11,q11,#12 - eor r7,r7,r9,ror#25 - vadd.i32 q0,q0,q1 - add r0,r0,r5 - vadd.i32 q4,q4,q5 - mov r10,r10,ror#16 - vadd.i32 q8,q8,q9 - add r1,r1,r6 - veor q3,q3,q0 - mov r12,r12,ror#16 - veor q7,q7,q4 - eor r10,r10,r0,ror#16 - veor q11,q11,q8 - eor r12,r12,r1,ror#16 - vrev32.16 q3,q3 - add r8,r8,r10 - vrev32.16 q7,q7 - mov r5,r5,ror#20 - vrev32.16 q11,q11 - add r9,r9,r12 - vadd.i32 q2,q2,q3 - mov r6,r6,ror#20 - vadd.i32 q6,q6,q7 - eor r5,r5,r8,ror#20 - vadd.i32 q10,q10,q11 - eor r6,r6,r9,ror#20 - veor q12,q1,q2 - add r0,r0,r5 - veor q13,q5,q6 - mov r10,r10,ror#24 - veor q14,q9,q10 - add r1,r1,r6 - vshr.u32 q1,q12,#20 - mov r12,r12,ror#24 - vshr.u32 q5,q13,#20 - eor r10,r10,r0,ror#24 - vshr.u32 q9,q14,#20 - eor r12,r12,r1,ror#24 - vsli.32 q1,q12,#12 - add r8,r8,r10 - vsli.32 q5,q13,#12 - mov r5,r5,ror#25 - vsli.32 q9,q14,#12 - str r10,[sp,#4*(16+15)] - vadd.i32 q0,q0,q1 - ldr r10,[sp,#4*(16+13)] - vadd.i32 q4,q4,q5 - add r9,r9,r12 - vadd.i32 q8,q8,q9 - mov r6,r6,ror#25 - veor q12,q3,q0 - eor r5,r5,r8,ror#25 - veor q13,q7,q4 - eor r6,r6,r9,ror#25 - veor q14,q11,q8 - str r8,[sp,#4*(16+10)] - vshr.u32 q3,q12,#24 - ldr r8,[sp,#4*(16+8)] - vshr.u32 q7,q13,#24 - add r2,r2,r7 - vshr.u32 q11,q14,#24 - mov r10,r10,ror#16 - vsli.32 q3,q12,#8 - str r9,[sp,#4*(16+11)] - vsli.32 q7,q13,#8 - ldr r9,[sp,#4*(16+9)] - vsli.32 q11,q14,#8 - add r3,r3,r4 - vadd.i32 q2,q2,q3 - mov r14,r14,ror#16 - vadd.i32 q6,q6,q7 - eor r10,r10,r2,ror#16 - vadd.i32 q10,q10,q11 - eor r14,r14,r3,ror#16 - veor q12,q1,q2 - add r8,r8,r10 - veor q13,q5,q6 - mov r7,r7,ror#20 - veor q14,q9,q10 - add r9,r9,r14 - vshr.u32 q1,q12,#25 - mov r4,r4,ror#20 - vshr.u32 q5,q13,#25 - eor r7,r7,r8,ror#20 - vshr.u32 q9,q14,#25 - eor r4,r4,r9,ror#20 - vsli.32 q1,q12,#7 - add r2,r2,r7 - vsli.32 q5,q13,#7 - mov r10,r10,ror#24 - vsli.32 q9,q14,#7 - add r3,r3,r4 - vext.8 q2,q2,q2,#8 - mov r14,r14,ror#24 - vext.8 q6,q6,q6,#8 - eor r10,r10,r2,ror#24 - vext.8 q10,q10,q10,#8 - eor r14,r14,r3,ror#24 - vext.8 q1,q1,q1,#12 - add r8,r8,r10 - vext.8 q5,q5,q5,#12 - mov r7,r7,ror#25 - vext.8 q9,q9,q9,#12 - add r9,r9,r14 - vext.8 q3,q3,q3,#4 - mov r4,r4,ror#25 - vext.8 q7,q7,q7,#4 - eor r7,r7,r8,ror#25 - vext.8 q11,q11,q11,#4 - eor r4,r4,r9,ror#25 - bne Loop_neon - - add r11,sp,#32 - vld1.32 {q12,q13},[sp] @ load key material - vld1.32 {q14,q15},[r11] - - ldr r11,[sp,#4*(32+2)] @ load len - - str r8, [sp,#4*(16+8)] @ modulo-scheduled store - str r9, [sp,#4*(16+9)] - str r12,[sp,#4*(16+12)] - str r10, [sp,#4*(16+13)] - str r14,[sp,#4*(16+14)] - - @ at this point we have first half of 512-bit result in - @ rx and second half at sp+4*(16+8) - - ldr r12,[sp,#4*(32+1)] @ load inp - ldr r14,[sp,#4*(32+0)] @ load out - - vadd.i32 q0,q0,q12 @ accumulate key material - vadd.i32 q4,q4,q12 - vadd.i32 q8,q8,q12 - vldr d24,[sp,#4*(16+0)] @ one - - vadd.i32 q1,q1,q13 - vadd.i32 q5,q5,q13 - vadd.i32 q9,q9,q13 - vldr d26,[sp,#4*(16+2)] @ two - - vadd.i32 q2,q2,q14 - vadd.i32 q6,q6,q14 - vadd.i32 q10,q10,q14 - vadd.i32 d14,d14,d24 @ counter+1 - vadd.i32 d22,d22,d26 @ counter+2 - - vadd.i32 q3,q3,q15 - vadd.i32 q7,q7,q15 - vadd.i32 q11,q11,q15 - - cmp r11,#64*4 - blo Ltail_neon - - vld1.8 {q12,q13},[r12]! @ load input - mov r11,sp - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 @ xor with input - veor q1,q1,q13 - vld1.8 {q12,q13},[r12]! - veor q2,q2,q14 - veor q3,q3,q15 - vld1.8 {q14,q15},[r12]! - - veor q4,q4,q12 - vst1.8 {q0,q1},[r14]! @ store output - veor q5,q5,q13 - vld1.8 {q12,q13},[r12]! - veor q6,q6,q14 - vst1.8 {q2,q3},[r14]! - veor q7,q7,q15 - vld1.8 {q14,q15},[r12]! - - veor q8,q8,q12 - vld1.32 {q0,q1},[r11]! @ load for next iteration - veor d25,d25,d25 - vldr d24,[sp,#4*(16+4)] @ four - veor q9,q9,q13 - vld1.32 {q2,q3},[r11] - veor q10,q10,q14 - vst1.8 {q4,q5},[r14]! - veor q11,q11,q15 - vst1.8 {q6,q7},[r14]! - - vadd.i32 d6,d6,d24 @ next counter value - vldr d24,[sp,#4*(16+0)] @ one - - ldmia sp,{r8,r9,r10,r11} @ load key material - add r0,r0,r8 @ accumulate key material - ldr r8,[r12],#16 @ load input - vst1.8 {q8,q9},[r14]! - add r1,r1,r9 - ldr r9,[r12,#-12] - vst1.8 {q10,q11},[r14]! - add r2,r2,r10 - ldr r10,[r12,#-8] - add r3,r3,r11 - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif - eor r0,r0,r8 @ xor with input - add r8,sp,#4*(4) - eor r1,r1,r9 - str r0,[r14],#16 @ store output - eor r2,r2,r10 - str r1,[r14,#-12] - eor r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - ldr r8,[r12],#16 @ load input - add r5,r5,r9 - ldr r9,[r12,#-12] - add r6,r6,r10 - ldr r10,[r12,#-8] - add r7,r7,r11 - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - eor r4,r4,r8 - add r8,sp,#4*(8) - eor r5,r5,r9 - str r4,[r14],#16 @ store output - eor r6,r6,r10 - str r5,[r14,#-12] - eor r7,r7,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r6,[r14,#-8] - add r0,sp,#4*(16+8) - str r7,[r14,#-4] - - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half - - add r0,r0,r8 @ accumulate key material - ldr r8,[r12],#16 @ load input - add r1,r1,r9 - ldr r9,[r12,#-12] -# ifdef __thumb2__ - it hi -# endif - strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it - add r2,r2,r10 - ldr r10,[r12,#-8] -# ifdef __thumb2__ - it hi -# endif - strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it - add r3,r3,r11 - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif - eor r0,r0,r8 - add r8,sp,#4*(12) - eor r1,r1,r9 - str r0,[r14],#16 @ store output - eor r2,r2,r10 - str r1,[r14,#-12] - eor r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - add r8,r8,#4 @ next counter value - add r5,r5,r9 - str r8,[sp,#4*(12)] @ save next counter value - ldr r8,[r12],#16 @ load input - add r6,r6,r10 - add r4,r4,#3 @ counter+3 - ldr r9,[r12,#-12] - add r7,r7,r11 - ldr r10,[r12,#-8] - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - eor r4,r4,r8 -# ifdef __thumb2__ - it hi -# endif - ldrhi r8,[sp,#4*(32+2)] @ re-load len - eor r5,r5,r9 - eor r6,r6,r10 - str r4,[r14],#16 @ store output - eor r7,r7,r11 - str r5,[r14,#-12] - sub r11,r8,#64*4 @ len-=64*4 - str r6,[r14,#-8] - str r7,[r14,#-4] - bhi Loop_neon_outer - - b Ldone_neon - -.align 4 -Lbreak_neon: - @ harmonize NEON and integer-only stack frames: load data - @ from NEON frame, but save to integer-only one; distance - @ between the two is 4*(32+4+16-32)=4*(20). - - str r11, [sp,#4*(20+32+2)] @ save len - add r11,sp,#4*(32+4) - str r12, [sp,#4*(20+32+1)] @ save inp - str r14, [sp,#4*(20+32+0)] @ save out - - ldr r12,[sp,#4*(16+10)] - ldr r14,[sp,#4*(16+11)] - vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement - str r12,[sp,#4*(20+16+10)] @ copy "rx" - str r14,[sp,#4*(20+16+11)] @ copy "rx" - - ldr r11, [sp,#4*(15)] - ldr r12,[sp,#4*(12)] @ modulo-scheduled load - ldr r10, [sp,#4*(13)] - ldr r14,[sp,#4*(14)] - str r11, [sp,#4*(20+16+15)] - add r11,sp,#4*(20) - vst1.32 {q0,q1},[r11]! @ copy key - add sp,sp,#4*(20) @ switch frame - vst1.32 {q2,q3},[r11] - mov r11,#10 - b Loop @ go integer-only - -.align 4 -Ltail_neon: - cmp r11,#64*3 - bhs L192_or_more_neon - cmp r11,#64*2 - bhs L128_or_more_neon - cmp r11,#64*1 - bhs L64_or_more_neon - - add r8,sp,#4*(8) - vst1.8 {q0,q1},[sp] - add r10,sp,#4*(0) - vst1.8 {q2,q3},[r8] - b Loop_tail_neon - -.align 4 -L64_or_more_neon: - vld1.8 {q12,q13},[r12]! - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 - veor q1,q1,q13 - veor q2,q2,q14 - veor q3,q3,q15 - vst1.8 {q0,q1},[r14]! - vst1.8 {q2,q3},[r14]! - - beq Ldone_neon - - add r8,sp,#4*(8) - vst1.8 {q4,q5},[sp] - add r10,sp,#4*(0) - vst1.8 {q6,q7},[r8] - sub r11,r11,#64*1 @ len-=64*1 - b Loop_tail_neon - -.align 4 -L128_or_more_neon: - vld1.8 {q12,q13},[r12]! - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 - veor q1,q1,q13 - vld1.8 {q12,q13},[r12]! - veor q2,q2,q14 - veor q3,q3,q15 - vld1.8 {q14,q15},[r12]! - - veor q4,q4,q12 - veor q5,q5,q13 - vst1.8 {q0,q1},[r14]! - veor q6,q6,q14 - vst1.8 {q2,q3},[r14]! - veor q7,q7,q15 - vst1.8 {q4,q5},[r14]! - vst1.8 {q6,q7},[r14]! - - beq Ldone_neon - - add r8,sp,#4*(8) - vst1.8 {q8,q9},[sp] - add r10,sp,#4*(0) - vst1.8 {q10,q11},[r8] - sub r11,r11,#64*2 @ len-=64*2 - b Loop_tail_neon - -.align 4 -L192_or_more_neon: - vld1.8 {q12,q13},[r12]! - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 - veor q1,q1,q13 - vld1.8 {q12,q13},[r12]! - veor q2,q2,q14 - veor q3,q3,q15 - vld1.8 {q14,q15},[r12]! - - veor q4,q4,q12 - veor q5,q5,q13 - vld1.8 {q12,q13},[r12]! - veor q6,q6,q14 - vst1.8 {q0,q1},[r14]! - veor q7,q7,q15 - vld1.8 {q14,q15},[r12]! - - veor q8,q8,q12 - vst1.8 {q2,q3},[r14]! - veor q9,q9,q13 - vst1.8 {q4,q5},[r14]! - veor q10,q10,q14 - vst1.8 {q6,q7},[r14]! - veor q11,q11,q15 - vst1.8 {q8,q9},[r14]! - vst1.8 {q10,q11},[r14]! - - beq Ldone_neon - - ldmia sp,{r8,r9,r10,r11} @ load key material - add r0,r0,r8 @ accumulate key material - add r8,sp,#4*(4) - add r1,r1,r9 - add r2,r2,r10 - add r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - - add r4,r4,r8 @ accumulate key material - add r8,sp,#4*(8) - add r5,r5,r9 - add r6,r6,r10 - add r7,r7,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7} - add r0,sp,#4*(16+8) - - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half - - add r0,r0,r8 @ accumulate key material - add r8,sp,#4*(12) - add r1,r1,r9 - add r2,r2,r10 - add r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - - add r4,r4,r8 @ accumulate key material - add r8,sp,#4*(8) - add r5,r5,r9 - add r4,r4,#3 @ counter+3 - add r6,r6,r10 - add r7,r7,r11 - ldr r11,[sp,#4*(32+2)] @ re-load len -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7} - add r10,sp,#4*(0) - sub r11,r11,#64*3 @ len-=64*3 - -Loop_tail_neon: - ldrb r8,[r10],#1 @ read buffer on stack - ldrb r9,[r12],#1 @ read input - subs r11,r11,#1 - eor r8,r8,r9 - strb r8,[r14],#1 @ store output - bne Loop_tail_neon - -Ldone_neon: - add sp,sp,#4*(32+4) - vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15} - add sp,sp,#4*(16+3) - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} - -.comm _OPENSSL_armcap_P,4 -.non_lazy_symbol_pointer -OPENSSL_armcap_P: -.indirect_symbol _OPENSSL_armcap_P -.long 0 -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/aes-armv4.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/aes-armv4.S deleted file mode 100644 index 63e2ec7163..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/aes-armv4.S +++ /dev/null @@ -1,1233 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ ==================================================================== - -@ AES for ARMv4 - -@ January 2007. -@ -@ Code uses single 1K S-box and is >2 times faster than code generated -@ by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which -@ allows to merge logical or arithmetic operation with shift or rotate -@ in one instruction and emit combined result every cycle. The module -@ is endian-neutral. The performance is ~42 cycles/byte for 128-bit -@ key [on single-issue Xscale PXA250 core]. - -@ May 2007. -@ -@ AES_set_[en|de]crypt_key is added. - -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 12% improvement on -@ Cortex A8 core and ~25 cycles per byte processed with 128-bit key. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 16% -@ improvement on Cortex A8 core and ~21.5 cycles per byte. - -#ifndef __KERNEL__ -# include -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -#endif - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 AES -@ instructions are in aesv8-armx.pl.) - - -.text -#if defined(__thumb2__) && !defined(__APPLE__) -.syntax unified -.thumb -#else -.code 32 -#undef __thumb2__ -#endif - - -.align 5 -AES_Te: -.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d -.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554 -.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d -.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a -.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87 -.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b -.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea -.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b -.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a -.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f -.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108 -.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f -.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e -.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5 -.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d -.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f -.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e -.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb -.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce -.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497 -.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c -.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed -.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b -.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a -.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16 -.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594 -.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81 -.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3 -.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a -.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504 -.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163 -.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d -.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f -.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739 -.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47 -.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395 -.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f -.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883 -.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c -.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76 -.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e -.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4 -.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6 -.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b -.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7 -.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0 -.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25 -.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818 -.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72 -.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651 -.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21 -.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85 -.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa -.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12 -.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0 -.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9 -.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133 -.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7 -.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920 -.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a -.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17 -.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8 -.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11 -.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a -@ Te4[256] -.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 -.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 -.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0 -.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 -.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc -.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 -.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a -.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 -.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0 -.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 -.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b -.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf -.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85 -.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 -.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5 -.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 -.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17 -.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 -.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88 -.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb -.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c -.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 -.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9 -.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 -.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6 -.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a -.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e -.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e -.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94 -.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf -.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68 -.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 -@ rcon[] -.word 0x01000000, 0x02000000, 0x04000000, 0x08000000 -.word 0x10000000, 0x20000000, 0x40000000, 0x80000000 -.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0 - - -@ void aes_nohw_encrypt(const unsigned char *in, unsigned char *out, -@ const AES_KEY *key) { -.globl _aes_nohw_encrypt -.private_extern _aes_nohw_encrypt -#ifdef __thumb2__ -.thumb_func _aes_nohw_encrypt -#endif -.align 5 -_aes_nohw_encrypt: -#ifndef __thumb2__ - sub r3,pc,#8 @ _aes_nohw_encrypt -#else - adr r3,. -#endif - stmdb sp!,{r1,r4-r12,lr} -#if defined(__thumb2__) || defined(__APPLE__) - adr r10,AES_Te -#else - sub r10,r3,#_aes_nohw_encrypt-AES_Te @ Te -#endif - mov r12,r0 @ inp - mov r11,r2 -#if __ARM_ARCH__<7 - ldrb r0,[r12,#3] @ load input data in endian-neutral - ldrb r4,[r12,#2] @ manner... - ldrb r5,[r12,#1] - ldrb r6,[r12,#0] - orr r0,r0,r4,lsl#8 - ldrb r1,[r12,#7] - orr r0,r0,r5,lsl#16 - ldrb r4,[r12,#6] - orr r0,r0,r6,lsl#24 - ldrb r5,[r12,#5] - ldrb r6,[r12,#4] - orr r1,r1,r4,lsl#8 - ldrb r2,[r12,#11] - orr r1,r1,r5,lsl#16 - ldrb r4,[r12,#10] - orr r1,r1,r6,lsl#24 - ldrb r5,[r12,#9] - ldrb r6,[r12,#8] - orr r2,r2,r4,lsl#8 - ldrb r3,[r12,#15] - orr r2,r2,r5,lsl#16 - ldrb r4,[r12,#14] - orr r2,r2,r6,lsl#24 - ldrb r5,[r12,#13] - ldrb r6,[r12,#12] - orr r3,r3,r4,lsl#8 - orr r3,r3,r5,lsl#16 - orr r3,r3,r6,lsl#24 -#else - ldr r0,[r12,#0] - ldr r1,[r12,#4] - ldr r2,[r12,#8] - ldr r3,[r12,#12] -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif -#endif - bl _armv4_AES_encrypt - - ldr r12,[sp],#4 @ pop out -#if __ARM_ARCH__>=7 -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif - str r0,[r12,#0] - str r1,[r12,#4] - str r2,[r12,#8] - str r3,[r12,#12] -#else - mov r4,r0,lsr#24 @ write output in endian-neutral - mov r5,r0,lsr#16 @ manner... - mov r6,r0,lsr#8 - strb r4,[r12,#0] - strb r5,[r12,#1] - mov r4,r1,lsr#24 - strb r6,[r12,#2] - mov r5,r1,lsr#16 - strb r0,[r12,#3] - mov r6,r1,lsr#8 - strb r4,[r12,#4] - strb r5,[r12,#5] - mov r4,r2,lsr#24 - strb r6,[r12,#6] - mov r5,r2,lsr#16 - strb r1,[r12,#7] - mov r6,r2,lsr#8 - strb r4,[r12,#8] - strb r5,[r12,#9] - mov r4,r3,lsr#24 - strb r6,[r12,#10] - mov r5,r3,lsr#16 - strb r2,[r12,#11] - mov r6,r3,lsr#8 - strb r4,[r12,#12] - strb r5,[r12,#13] - strb r6,[r12,#14] - strb r3,[r12,#15] -#endif -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - - -#ifdef __thumb2__ -.thumb_func _armv4_AES_encrypt -#endif -.align 2 -_armv4_AES_encrypt: - str lr,[sp,#-4]! @ push lr - ldmia r11!,{r4,r5,r6,r7} - eor r0,r0,r4 - ldr r12,[r11,#240-16] - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 - sub r12,r12,#1 - mov lr,#255 - - and r7,lr,r0 - and r8,lr,r0,lsr#8 - and r9,lr,r0,lsr#16 - mov r0,r0,lsr#24 -Lenc_loop: - ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0] - and r7,lr,r1,lsr#16 @ i0 - ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8] - and r8,lr,r1 - ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16] - and r9,lr,r1,lsr#8 - ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24] - mov r1,r1,lsr#24 - - ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16] - ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0] - ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8] - eor r0,r0,r7,ror#8 - ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24] - and r7,lr,r2,lsr#8 @ i0 - eor r5,r5,r8,ror#8 - and r8,lr,r2,lsr#16 @ i1 - eor r6,r6,r9,ror#8 - and r9,lr,r2 - ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8] - eor r1,r1,r4,ror#24 - ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16] - mov r2,r2,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0] - eor r0,r0,r7,ror#16 - ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24] - and r7,lr,r3 @ i0 - eor r1,r1,r8,ror#8 - and r8,lr,r3,lsr#8 @ i1 - eor r6,r6,r9,ror#16 - and r9,lr,r3,lsr#16 @ i2 - ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0] - eor r2,r2,r5,ror#16 - ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8] - mov r3,r3,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16] - eor r0,r0,r7,ror#24 - ldr r7,[r11],#16 - eor r1,r1,r8,ror#16 - ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24] - eor r2,r2,r9,ror#8 - ldr r4,[r11,#-12] - eor r3,r3,r6,ror#8 - - ldr r5,[r11,#-8] - eor r0,r0,r7 - ldr r6,[r11,#-4] - and r7,lr,r0 - eor r1,r1,r4 - and r8,lr,r0,lsr#8 - eor r2,r2,r5 - and r9,lr,r0,lsr#16 - eor r3,r3,r6 - mov r0,r0,lsr#24 - - subs r12,r12,#1 - bne Lenc_loop - - add r10,r10,#2 - - ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0] - and r7,lr,r1,lsr#16 @ i0 - ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8] - and r8,lr,r1 - ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16] - and r9,lr,r1,lsr#8 - ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24] - mov r1,r1,lsr#24 - - ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16] - ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0] - ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8] - eor r0,r7,r0,lsl#8 - ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24] - and r7,lr,r2,lsr#8 @ i0 - eor r5,r8,r5,lsl#8 - and r8,lr,r2,lsr#16 @ i1 - eor r6,r9,r6,lsl#8 - and r9,lr,r2 - ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8] - eor r1,r4,r1,lsl#24 - ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16] - mov r2,r2,lsr#24 - - ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0] - eor r0,r7,r0,lsl#8 - ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24] - and r7,lr,r3 @ i0 - eor r1,r1,r8,lsl#16 - and r8,lr,r3,lsr#8 @ i1 - eor r6,r9,r6,lsl#8 - and r9,lr,r3,lsr#16 @ i2 - ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0] - eor r2,r5,r2,lsl#24 - ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8] - mov r3,r3,lsr#24 - - ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16] - eor r0,r7,r0,lsl#8 - ldr r7,[r11,#0] - ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24] - eor r1,r1,r8,lsl#8 - ldr r4,[r11,#4] - eor r2,r2,r9,lsl#16 - ldr r5,[r11,#8] - eor r3,r6,r3,lsl#24 - ldr r6,[r11,#12] - - eor r0,r0,r7 - eor r1,r1,r4 - eor r2,r2,r5 - eor r3,r3,r6 - - sub r10,r10,#2 - ldr pc,[sp],#4 @ pop and return - - -.globl _aes_nohw_set_encrypt_key -.private_extern _aes_nohw_set_encrypt_key -#ifdef __thumb2__ -.thumb_func _aes_nohw_set_encrypt_key -#endif -.align 5 -_aes_nohw_set_encrypt_key: -_armv4_AES_set_encrypt_key: -#ifndef __thumb2__ - sub r3,pc,#8 @ _aes_nohw_set_encrypt_key -#else - adr r3,. -#endif - teq r0,#0 -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - moveq r0,#-1 - beq Labrt - teq r2,#0 -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - moveq r0,#-1 - beq Labrt - - teq r1,#128 - beq Lok - teq r1,#192 - beq Lok - teq r1,#256 -#ifdef __thumb2__ - itt ne @ Thumb2 thing, sanity check in ARM -#endif - movne r0,#-1 - bne Labrt - -Lok: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - mov r12,r0 @ inp - mov lr,r1 @ bits - mov r11,r2 @ key - -#if defined(__thumb2__) || defined(__APPLE__) - adr r10,AES_Te+1024 @ Te4 -#else - sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4 -#endif - -#if __ARM_ARCH__<7 - ldrb r0,[r12,#3] @ load input data in endian-neutral - ldrb r4,[r12,#2] @ manner... - ldrb r5,[r12,#1] - ldrb r6,[r12,#0] - orr r0,r0,r4,lsl#8 - ldrb r1,[r12,#7] - orr r0,r0,r5,lsl#16 - ldrb r4,[r12,#6] - orr r0,r0,r6,lsl#24 - ldrb r5,[r12,#5] - ldrb r6,[r12,#4] - orr r1,r1,r4,lsl#8 - ldrb r2,[r12,#11] - orr r1,r1,r5,lsl#16 - ldrb r4,[r12,#10] - orr r1,r1,r6,lsl#24 - ldrb r5,[r12,#9] - ldrb r6,[r12,#8] - orr r2,r2,r4,lsl#8 - ldrb r3,[r12,#15] - orr r2,r2,r5,lsl#16 - ldrb r4,[r12,#14] - orr r2,r2,r6,lsl#24 - ldrb r5,[r12,#13] - ldrb r6,[r12,#12] - orr r3,r3,r4,lsl#8 - str r0,[r11],#16 - orr r3,r3,r5,lsl#16 - str r1,[r11,#-12] - orr r3,r3,r6,lsl#24 - str r2,[r11,#-8] - str r3,[r11,#-4] -#else - ldr r0,[r12,#0] - ldr r1,[r12,#4] - ldr r2,[r12,#8] - ldr r3,[r12,#12] -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif - str r0,[r11],#16 - str r1,[r11,#-12] - str r2,[r11,#-8] - str r3,[r11,#-4] -#endif - - teq lr,#128 - bne Lnot128 - mov r12,#10 - str r12,[r11,#240-16] - add r6,r10,#256 @ rcon - mov lr,#255 - -L128_loop: - and r5,lr,r3,lsr#24 - and r7,lr,r3,lsr#16 - ldrb r5,[r10,r5] - and r8,lr,r3,lsr#8 - ldrb r7,[r10,r7] - and r9,lr,r3 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#24 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r6],#4 @ rcon[i++] - orr r5,r5,r9,lsl#8 - eor r5,r5,r4 - eor r0,r0,r5 @ rk[4]=rk[0]^... - eor r1,r1,r0 @ rk[5]=rk[1]^rk[4] - str r0,[r11],#16 - eor r2,r2,r1 @ rk[6]=rk[2]^rk[5] - str r1,[r11,#-12] - eor r3,r3,r2 @ rk[7]=rk[3]^rk[6] - str r2,[r11,#-8] - subs r12,r12,#1 - str r3,[r11,#-4] - bne L128_loop - sub r2,r11,#176 - b Ldone - -Lnot128: -#if __ARM_ARCH__<7 - ldrb r8,[r12,#19] - ldrb r4,[r12,#18] - ldrb r5,[r12,#17] - ldrb r6,[r12,#16] - orr r8,r8,r4,lsl#8 - ldrb r9,[r12,#23] - orr r8,r8,r5,lsl#16 - ldrb r4,[r12,#22] - orr r8,r8,r6,lsl#24 - ldrb r5,[r12,#21] - ldrb r6,[r12,#20] - orr r9,r9,r4,lsl#8 - orr r9,r9,r5,lsl#16 - str r8,[r11],#8 - orr r9,r9,r6,lsl#24 - str r9,[r11,#-4] -#else - ldr r8,[r12,#16] - ldr r9,[r12,#20] -#ifdef __ARMEL__ - rev r8,r8 - rev r9,r9 -#endif - str r8,[r11],#8 - str r9,[r11,#-4] -#endif - - teq lr,#192 - bne Lnot192 - mov r12,#12 - str r12,[r11,#240-24] - add r6,r10,#256 @ rcon - mov lr,#255 - mov r12,#8 - -L192_loop: - and r5,lr,r9,lsr#24 - and r7,lr,r9,lsr#16 - ldrb r5,[r10,r5] - and r8,lr,r9,lsr#8 - ldrb r7,[r10,r7] - and r9,lr,r9 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#24 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r6],#4 @ rcon[i++] - orr r5,r5,r9,lsl#8 - eor r9,r5,r4 - eor r0,r0,r9 @ rk[6]=rk[0]^... - eor r1,r1,r0 @ rk[7]=rk[1]^rk[6] - str r0,[r11],#24 - eor r2,r2,r1 @ rk[8]=rk[2]^rk[7] - str r1,[r11,#-20] - eor r3,r3,r2 @ rk[9]=rk[3]^rk[8] - str r2,[r11,#-16] - subs r12,r12,#1 - str r3,[r11,#-12] -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - subeq r2,r11,#216 - beq Ldone - - ldr r7,[r11,#-32] - ldr r8,[r11,#-28] - eor r7,r7,r3 @ rk[10]=rk[4]^rk[9] - eor r9,r8,r7 @ rk[11]=rk[5]^rk[10] - str r7,[r11,#-8] - str r9,[r11,#-4] - b L192_loop - -Lnot192: -#if __ARM_ARCH__<7 - ldrb r8,[r12,#27] - ldrb r4,[r12,#26] - ldrb r5,[r12,#25] - ldrb r6,[r12,#24] - orr r8,r8,r4,lsl#8 - ldrb r9,[r12,#31] - orr r8,r8,r5,lsl#16 - ldrb r4,[r12,#30] - orr r8,r8,r6,lsl#24 - ldrb r5,[r12,#29] - ldrb r6,[r12,#28] - orr r9,r9,r4,lsl#8 - orr r9,r9,r5,lsl#16 - str r8,[r11],#8 - orr r9,r9,r6,lsl#24 - str r9,[r11,#-4] -#else - ldr r8,[r12,#24] - ldr r9,[r12,#28] -#ifdef __ARMEL__ - rev r8,r8 - rev r9,r9 -#endif - str r8,[r11],#8 - str r9,[r11,#-4] -#endif - - mov r12,#14 - str r12,[r11,#240-32] - add r6,r10,#256 @ rcon - mov lr,#255 - mov r12,#7 - -L256_loop: - and r5,lr,r9,lsr#24 - and r7,lr,r9,lsr#16 - ldrb r5,[r10,r5] - and r8,lr,r9,lsr#8 - ldrb r7,[r10,r7] - and r9,lr,r9 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#24 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r6],#4 @ rcon[i++] - orr r5,r5,r9,lsl#8 - eor r9,r5,r4 - eor r0,r0,r9 @ rk[8]=rk[0]^... - eor r1,r1,r0 @ rk[9]=rk[1]^rk[8] - str r0,[r11],#32 - eor r2,r2,r1 @ rk[10]=rk[2]^rk[9] - str r1,[r11,#-28] - eor r3,r3,r2 @ rk[11]=rk[3]^rk[10] - str r2,[r11,#-24] - subs r12,r12,#1 - str r3,[r11,#-20] -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - subeq r2,r11,#256 - beq Ldone - - and r5,lr,r3 - and r7,lr,r3,lsr#8 - ldrb r5,[r10,r5] - and r8,lr,r3,lsr#16 - ldrb r7,[r10,r7] - and r9,lr,r3,lsr#24 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#8 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r11,#-48] - orr r5,r5,r9,lsl#24 - - ldr r7,[r11,#-44] - ldr r8,[r11,#-40] - eor r4,r4,r5 @ rk[12]=rk[4]^... - ldr r9,[r11,#-36] - eor r7,r7,r4 @ rk[13]=rk[5]^rk[12] - str r4,[r11,#-16] - eor r8,r8,r7 @ rk[14]=rk[6]^rk[13] - str r7,[r11,#-12] - eor r9,r9,r8 @ rk[15]=rk[7]^rk[14] - str r8,[r11,#-8] - str r9,[r11,#-4] - b L256_loop - -.align 2 -Ldone: mov r0,#0 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} -Labrt: -#if __ARM_ARCH__>=5 - bx lr @ .word 0xe12fff1e -#else - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - - -.globl _aes_nohw_set_decrypt_key -.private_extern _aes_nohw_set_decrypt_key -#ifdef __thumb2__ -.thumb_func _aes_nohw_set_decrypt_key -#endif -.align 5 -_aes_nohw_set_decrypt_key: - str lr,[sp,#-4]! @ push lr - bl _armv4_AES_set_encrypt_key - teq r0,#0 - ldr lr,[sp],#4 @ pop lr - bne Labrt - - mov r0,r2 @ _aes_nohw_set_encrypt_key preserves r2, - mov r1,r2 @ which is AES_KEY *key - b _armv4_AES_set_enc2dec_key - - -@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out) -.globl _AES_set_enc2dec_key -.private_extern _AES_set_enc2dec_key -#ifdef __thumb2__ -.thumb_func _AES_set_enc2dec_key -#endif -.align 5 -_AES_set_enc2dec_key: -_armv4_AES_set_enc2dec_key: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - - ldr r12,[r0,#240] - mov r7,r0 @ input - add r8,r0,r12,lsl#4 - mov r11,r1 @ output - add r10,r1,r12,lsl#4 - str r12,[r1,#240] - -Linv: ldr r0,[r7],#16 - ldr r1,[r7,#-12] - ldr r2,[r7,#-8] - ldr r3,[r7,#-4] - ldr r4,[r8],#-16 - ldr r5,[r8,#16+4] - ldr r6,[r8,#16+8] - ldr r9,[r8,#16+12] - str r0,[r10],#-16 - str r1,[r10,#16+4] - str r2,[r10,#16+8] - str r3,[r10,#16+12] - str r4,[r11],#16 - str r5,[r11,#-12] - str r6,[r11,#-8] - str r9,[r11,#-4] - teq r7,r8 - bne Linv - - ldr r0,[r7] - ldr r1,[r7,#4] - ldr r2,[r7,#8] - ldr r3,[r7,#12] - str r0,[r11] - str r1,[r11,#4] - str r2,[r11,#8] - str r3,[r11,#12] - sub r11,r11,r12,lsl#3 - ldr r0,[r11,#16]! @ prefetch tp1 - mov r7,#0x80 - mov r8,#0x1b - orr r7,r7,#0x8000 - orr r8,r8,#0x1b00 - orr r7,r7,r7,lsl#16 - orr r8,r8,r8,lsl#16 - sub r12,r12,#1 - mvn r9,r7 - mov r12,r12,lsl#2 @ (rounds-1)*4 - -Lmix: and r4,r0,r7 - and r1,r0,r9 - sub r4,r4,r4,lsr#7 - and r4,r4,r8 - eor r1,r4,r1,lsl#1 @ tp2 - - and r4,r1,r7 - and r2,r1,r9 - sub r4,r4,r4,lsr#7 - and r4,r4,r8 - eor r2,r4,r2,lsl#1 @ tp4 - - and r4,r2,r7 - and r3,r2,r9 - sub r4,r4,r4,lsr#7 - and r4,r4,r8 - eor r3,r4,r3,lsl#1 @ tp8 - - eor r4,r1,r2 - eor r5,r0,r3 @ tp9 - eor r4,r4,r3 @ tpe - eor r4,r4,r1,ror#24 - eor r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8) - eor r4,r4,r2,ror#16 - eor r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16) - eor r4,r4,r5,ror#8 @ ^= ROTATE(tp9,24) - - ldr r0,[r11,#4] @ prefetch tp1 - str r4,[r11],#4 - subs r12,r12,#1 - bne Lmix - - mov r0,#0 -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - - - -.align 5 -AES_Td: -.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96 -.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393 -.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25 -.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f -.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1 -.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6 -.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da -.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844 -.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd -.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4 -.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45 -.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94 -.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7 -.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a -.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5 -.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c -.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1 -.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a -.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75 -.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051 -.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46 -.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff -.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77 -.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb -.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000 -.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e -.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927 -.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a -.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e -.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16 -.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d -.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8 -.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd -.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34 -.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163 -.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120 -.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d -.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0 -.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422 -.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef -.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36 -.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4 -.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662 -.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5 -.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3 -.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b -.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8 -.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6 -.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6 -.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0 -.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815 -.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f -.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df -.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f -.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e -.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713 -.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89 -.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c -.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf -.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86 -.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f -.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541 -.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190 -.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 -@ Td4[256] -.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 -.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb -.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 -.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb -.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d -.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e -.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 -.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 -.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 -.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 -.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda -.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 -.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a -.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 -.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 -.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b -.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea -.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 -.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 -.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e -.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 -.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b -.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 -.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 -.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 -.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f -.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d -.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef -.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 -.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 -.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 -.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d - - -@ void aes_nohw_decrypt(const unsigned char *in, unsigned char *out, -@ const AES_KEY *key) { -.globl _aes_nohw_decrypt -.private_extern _aes_nohw_decrypt -#ifdef __thumb2__ -.thumb_func _aes_nohw_decrypt -#endif -.align 5 -_aes_nohw_decrypt: -#ifndef __thumb2__ - sub r3,pc,#8 @ _aes_nohw_decrypt -#else - adr r3,. -#endif - stmdb sp!,{r1,r4-r12,lr} -#if defined(__thumb2__) || defined(__APPLE__) - adr r10,AES_Td -#else - sub r10,r3,#_aes_nohw_decrypt-AES_Td @ Td -#endif - mov r12,r0 @ inp - mov r11,r2 -#if __ARM_ARCH__<7 - ldrb r0,[r12,#3] @ load input data in endian-neutral - ldrb r4,[r12,#2] @ manner... - ldrb r5,[r12,#1] - ldrb r6,[r12,#0] - orr r0,r0,r4,lsl#8 - ldrb r1,[r12,#7] - orr r0,r0,r5,lsl#16 - ldrb r4,[r12,#6] - orr r0,r0,r6,lsl#24 - ldrb r5,[r12,#5] - ldrb r6,[r12,#4] - orr r1,r1,r4,lsl#8 - ldrb r2,[r12,#11] - orr r1,r1,r5,lsl#16 - ldrb r4,[r12,#10] - orr r1,r1,r6,lsl#24 - ldrb r5,[r12,#9] - ldrb r6,[r12,#8] - orr r2,r2,r4,lsl#8 - ldrb r3,[r12,#15] - orr r2,r2,r5,lsl#16 - ldrb r4,[r12,#14] - orr r2,r2,r6,lsl#24 - ldrb r5,[r12,#13] - ldrb r6,[r12,#12] - orr r3,r3,r4,lsl#8 - orr r3,r3,r5,lsl#16 - orr r3,r3,r6,lsl#24 -#else - ldr r0,[r12,#0] - ldr r1,[r12,#4] - ldr r2,[r12,#8] - ldr r3,[r12,#12] -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif -#endif - bl _armv4_AES_decrypt - - ldr r12,[sp],#4 @ pop out -#if __ARM_ARCH__>=7 -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif - str r0,[r12,#0] - str r1,[r12,#4] - str r2,[r12,#8] - str r3,[r12,#12] -#else - mov r4,r0,lsr#24 @ write output in endian-neutral - mov r5,r0,lsr#16 @ manner... - mov r6,r0,lsr#8 - strb r4,[r12,#0] - strb r5,[r12,#1] - mov r4,r1,lsr#24 - strb r6,[r12,#2] - mov r5,r1,lsr#16 - strb r0,[r12,#3] - mov r6,r1,lsr#8 - strb r4,[r12,#4] - strb r5,[r12,#5] - mov r4,r2,lsr#24 - strb r6,[r12,#6] - mov r5,r2,lsr#16 - strb r1,[r12,#7] - mov r6,r2,lsr#8 - strb r4,[r12,#8] - strb r5,[r12,#9] - mov r4,r3,lsr#24 - strb r6,[r12,#10] - mov r5,r3,lsr#16 - strb r2,[r12,#11] - mov r6,r3,lsr#8 - strb r4,[r12,#12] - strb r5,[r12,#13] - strb r6,[r12,#14] - strb r3,[r12,#15] -#endif -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - - -#ifdef __thumb2__ -.thumb_func _armv4_AES_decrypt -#endif -.align 2 -_armv4_AES_decrypt: - str lr,[sp,#-4]! @ push lr - ldmia r11!,{r4,r5,r6,r7} - eor r0,r0,r4 - ldr r12,[r11,#240-16] - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 - sub r12,r12,#1 - mov lr,#255 - - and r7,lr,r0,lsr#16 - and r8,lr,r0,lsr#8 - and r9,lr,r0 - mov r0,r0,lsr#24 -Ldec_loop: - ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16] - and r7,lr,r1 @ i0 - ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8] - and r8,lr,r1,lsr#16 - ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0] - and r9,lr,r1,lsr#8 - ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24] - mov r1,r1,lsr#24 - - ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0] - ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16] - ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8] - eor r0,r0,r7,ror#24 - ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24] - and r7,lr,r2,lsr#8 @ i0 - eor r5,r8,r5,ror#8 - and r8,lr,r2 @ i1 - eor r6,r9,r6,ror#8 - and r9,lr,r2,lsr#16 - ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8] - eor r1,r1,r4,ror#8 - ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0] - mov r2,r2,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16] - eor r0,r0,r7,ror#16 - ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24] - and r7,lr,r3,lsr#16 @ i0 - eor r1,r1,r8,ror#24 - and r8,lr,r3,lsr#8 @ i1 - eor r6,r9,r6,ror#8 - and r9,lr,r3 @ i2 - ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16] - eor r2,r2,r5,ror#8 - ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8] - mov r3,r3,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0] - eor r0,r0,r7,ror#8 - ldr r7,[r11],#16 - eor r1,r1,r8,ror#16 - ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24] - eor r2,r2,r9,ror#24 - - ldr r4,[r11,#-12] - eor r0,r0,r7 - ldr r5,[r11,#-8] - eor r3,r3,r6,ror#8 - ldr r6,[r11,#-4] - and r7,lr,r0,lsr#16 - eor r1,r1,r4 - and r8,lr,r0,lsr#8 - eor r2,r2,r5 - and r9,lr,r0 - eor r3,r3,r6 - mov r0,r0,lsr#24 - - subs r12,r12,#1 - bne Ldec_loop - - add r10,r10,#1024 - - ldr r5,[r10,#0] @ prefetch Td4 - ldr r6,[r10,#32] - ldr r4,[r10,#64] - ldr r5,[r10,#96] - ldr r6,[r10,#128] - ldr r4,[r10,#160] - ldr r5,[r10,#192] - ldr r6,[r10,#224] - - ldrb r0,[r10,r0] @ Td4[s0>>24] - ldrb r4,[r10,r7] @ Td4[s0>>16] - and r7,lr,r1 @ i0 - ldrb r5,[r10,r8] @ Td4[s0>>8] - and r8,lr,r1,lsr#16 - ldrb r6,[r10,r9] @ Td4[s0>>0] - and r9,lr,r1,lsr#8 - - add r1,r10,r1,lsr#24 - ldrb r7,[r10,r7] @ Td4[s1>>0] - ldrb r1,[r1] @ Td4[s1>>24] - ldrb r8,[r10,r8] @ Td4[s1>>16] - eor r0,r7,r0,lsl#24 - ldrb r9,[r10,r9] @ Td4[s1>>8] - eor r1,r4,r1,lsl#8 - and r7,lr,r2,lsr#8 @ i0 - eor r5,r5,r8,lsl#8 - and r8,lr,r2 @ i1 - ldrb r7,[r10,r7] @ Td4[s2>>8] - eor r6,r6,r9,lsl#8 - ldrb r8,[r10,r8] @ Td4[s2>>0] - and r9,lr,r2,lsr#16 - - add r2,r10,r2,lsr#24 - ldrb r2,[r2] @ Td4[s2>>24] - eor r0,r0,r7,lsl#8 - ldrb r9,[r10,r9] @ Td4[s2>>16] - eor r1,r8,r1,lsl#16 - and r7,lr,r3,lsr#16 @ i0 - eor r2,r5,r2,lsl#16 - and r8,lr,r3,lsr#8 @ i1 - ldrb r7,[r10,r7] @ Td4[s3>>16] - eor r6,r6,r9,lsl#16 - ldrb r8,[r10,r8] @ Td4[s3>>8] - and r9,lr,r3 @ i2 - - add r3,r10,r3,lsr#24 - ldrb r9,[r10,r9] @ Td4[s3>>0] - ldrb r3,[r3] @ Td4[s3>>24] - eor r0,r0,r7,lsl#16 - ldr r7,[r11,#0] - eor r1,r1,r8,lsl#8 - ldr r4,[r11,#4] - eor r2,r9,r2,lsl#8 - ldr r5,[r11,#8] - eor r3,r6,r3,lsl#24 - ldr r6,[r11,#12] - - eor r0,r0,r7 - eor r1,r1,r4 - eor r2,r2,r5 - eor r3,r3,r6 - - sub r10,r10,#1024 - ldr pc,[sp],#4 @ pop and return - -.byte 65,69,83,32,102,111,114,32,65,82,77,118,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/aesv8-armx32.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/aesv8-armx32.S deleted file mode 100644 index 7392231df2..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/aesv8-armx32.S +++ /dev/null @@ -1,790 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -#if __ARM_MAX_ARCH__>=7 -.text - - -.code 32 -#undef __thumb2__ -.align 5 -Lrcon: -.long 0x01,0x01,0x01,0x01 -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat -.long 0x1b,0x1b,0x1b,0x1b - -.text - -.globl _aes_hw_set_encrypt_key -.private_extern _aes_hw_set_encrypt_key -#ifdef __thumb2__ -.thumb_func _aes_hw_set_encrypt_key -#endif -.align 5 -_aes_hw_set_encrypt_key: -Lenc_key: - mov r3,#-1 - cmp r0,#0 - beq Lenc_key_abort - cmp r2,#0 - beq Lenc_key_abort - mov r3,#-2 - cmp r1,#128 - blt Lenc_key_abort - cmp r1,#256 - bgt Lenc_key_abort - tst r1,#0x3f - bne Lenc_key_abort - - adr r3,Lrcon - cmp r1,#192 - - veor q0,q0,q0 - vld1.8 {q3},[r0]! - mov r1,#8 @ reuse r1 - vld1.32 {q1,q2},[r3]! - - blt Loop128 - beq L192 - b L256 - -.align 4 -Loop128: - vtbl.8 d20,{q3},d4 - vtbl.8 d21,{q3},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q3},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - subs r1,r1,#1 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - vshl.u8 q1,q1,#1 - veor q3,q3,q10 - bne Loop128 - - vld1.32 {q1},[r3] - - vtbl.8 d20,{q3},d4 - vtbl.8 d21,{q3},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q3},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - vshl.u8 q1,q1,#1 - veor q3,q3,q10 - - vtbl.8 d20,{q3},d4 - vtbl.8 d21,{q3},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q3},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - veor q3,q3,q10 - vst1.32 {q3},[r2] - add r2,r2,#0x50 - - mov r12,#10 - b Ldone - -.align 4 -L192: - vld1.8 {d16},[r0]! - vmov.i8 q10,#8 @ borrow q10 - vst1.32 {q3},[r2]! - vsub.i8 q2,q2,q10 @ adjust the mask - -Loop192: - vtbl.8 d20,{q8},d4 - vtbl.8 d21,{q8},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {d16},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - subs r1,r1,#1 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - - vdup.32 q9,d7[1] - veor q9,q9,q8 - veor q10,q10,q1 - vext.8 q8,q0,q8,#12 - vshl.u8 q1,q1,#1 - veor q8,q8,q9 - veor q3,q3,q10 - veor q8,q8,q10 - vst1.32 {q3},[r2]! - bne Loop192 - - mov r12,#12 - add r2,r2,#0x20 - b Ldone - -.align 4 -L256: - vld1.8 {q8},[r0] - mov r1,#7 - mov r12,#14 - vst1.32 {q3},[r2]! - -Loop256: - vtbl.8 d20,{q8},d4 - vtbl.8 d21,{q8},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q8},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - subs r1,r1,#1 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - vshl.u8 q1,q1,#1 - veor q3,q3,q10 - vst1.32 {q3},[r2]! - beq Ldone - - vdup.32 q10,d7[1] - vext.8 q9,q0,q8,#12 -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - - veor q8,q8,q9 - vext.8 q9,q0,q9,#12 - veor q8,q8,q9 - vext.8 q9,q0,q9,#12 - veor q8,q8,q9 - - veor q8,q8,q10 - b Loop256 - -Ldone: - str r12,[r2] - mov r3,#0 - -Lenc_key_abort: - mov r0,r3 @ return value - - bx lr - - -.globl _aes_hw_set_decrypt_key -.private_extern _aes_hw_set_decrypt_key -#ifdef __thumb2__ -.thumb_func _aes_hw_set_decrypt_key -#endif -.align 5 -_aes_hw_set_decrypt_key: - stmdb sp!,{r4,lr} - bl Lenc_key - - cmp r0,#0 - bne Ldec_key_abort - - sub r2,r2,#240 @ restore original r2 - mov r4,#-16 - add r0,r2,r12,lsl#4 @ end of key schedule - - vld1.32 {q0},[r2] - vld1.32 {q1},[r0] - vst1.32 {q0},[r0],r4 - vst1.32 {q1},[r2]! - -Loop_imc: - vld1.32 {q0},[r2] - vld1.32 {q1},[r0] -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 - vst1.32 {q0},[r0],r4 - vst1.32 {q1},[r2]! - cmp r0,r2 - bhi Loop_imc - - vld1.32 {q0},[r2] -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 - vst1.32 {q0},[r0] - - eor r0,r0,r0 @ return value -Ldec_key_abort: - ldmia sp!,{r4,pc} - -.globl _aes_hw_encrypt -.private_extern _aes_hw_encrypt -#ifdef __thumb2__ -.thumb_func _aes_hw_encrypt -#endif -.align 5 -_aes_hw_encrypt: - ldr r3,[r2,#240] - vld1.32 {q0},[r2]! - vld1.8 {q2},[r0] - sub r3,r3,#2 - vld1.32 {q1},[r2]! - -Loop_enc: -.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 -.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 - vld1.32 {q0},[r2]! - subs r3,r3,#2 -.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 -.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 - vld1.32 {q1},[r2]! - bgt Loop_enc - -.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 -.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 - vld1.32 {q0},[r2] -.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 - veor q2,q2,q0 - - vst1.8 {q2},[r1] - bx lr - -.globl _aes_hw_decrypt -.private_extern _aes_hw_decrypt -#ifdef __thumb2__ -.thumb_func _aes_hw_decrypt -#endif -.align 5 -_aes_hw_decrypt: - ldr r3,[r2,#240] - vld1.32 {q0},[r2]! - vld1.8 {q2},[r0] - sub r3,r3,#2 - vld1.32 {q1},[r2]! - -Loop_dec: -.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 -.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 - vld1.32 {q0},[r2]! - subs r3,r3,#2 -.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 -.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 - vld1.32 {q1},[r2]! - bgt Loop_dec - -.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 -.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 - vld1.32 {q0},[r2] -.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 - veor q2,q2,q0 - - vst1.8 {q2},[r1] - bx lr - -.globl _aes_hw_cbc_encrypt -.private_extern _aes_hw_cbc_encrypt -#ifdef __thumb2__ -.thumb_func _aes_hw_cbc_encrypt -#endif -.align 5 -_aes_hw_cbc_encrypt: - mov ip,sp - stmdb sp!,{r4,r5,r6,r7,r8,lr} - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - ldmia ip,{r4,r5} @ load remaining args - subs r2,r2,#16 - mov r8,#16 - blo Lcbc_abort - moveq r8,#0 - - cmp r5,#0 @ en- or decrypting? - ldr r5,[r3,#240] - and r2,r2,#-16 - vld1.8 {q6},[r4] - vld1.8 {q0},[r0],r8 - - vld1.32 {q8,q9},[r3] @ load key schedule... - sub r5,r5,#6 - add r7,r3,r5,lsl#4 @ pointer to last 7 round keys - sub r5,r5,#2 - vld1.32 {q10,q11},[r7]! - vld1.32 {q12,q13},[r7]! - vld1.32 {q14,q15},[r7]! - vld1.32 {q7},[r7] - - add r7,r3,#32 - mov r6,r5 - beq Lcbc_dec - - cmp r5,#2 - veor q0,q0,q6 - veor q5,q8,q7 - beq Lcbc_enc128 - - vld1.32 {q2,q3},[r7] - add r7,r3,#16 - add r6,r3,#16*4 - add r12,r3,#16*5 -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - add r14,r3,#16*6 - add r3,r3,#16*7 - b Lenter_cbc_enc - -.align 4 -Loop_cbc_enc: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vst1.8 {q6},[r1]! -Lenter_cbc_enc: -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q8},[r6] - cmp r5,#4 -.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q9},[r12] - beq Lcbc_enc192 - -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q8},[r14] -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q9},[r3] - nop - -Lcbc_enc192: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - subs r2,r2,#16 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - moveq r8,#0 -.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.8 {q8},[r0],r8 -.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - veor q8,q8,q5 -.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q9},[r7] @ re-pre-load rndkey[1] -.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 - veor q6,q0,q7 - bhs Loop_cbc_enc - - vst1.8 {q6},[r1]! - b Lcbc_done - -.align 5 -Lcbc_enc128: - vld1.32 {q2,q3},[r7] -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - b Lenter_cbc_enc128 -Loop_cbc_enc128: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vst1.8 {q6},[r1]! -Lenter_cbc_enc128: -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - subs r2,r2,#16 -.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - moveq r8,#0 -.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.8 {q8},[r0],r8 -.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - veor q8,q8,q5 -.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 - veor q6,q0,q7 - bhs Loop_cbc_enc128 - - vst1.8 {q6},[r1]! - b Lcbc_done -.align 5 -Lcbc_dec: - vld1.8 {q10},[r0]! - subs r2,r2,#32 @ bias - add r6,r5,#2 - vorr q3,q0,q0 - vorr q1,q0,q0 - vorr q11,q10,q10 - blo Lcbc_dec_tail - - vorr q1,q10,q10 - vld1.8 {q10},[r0]! - vorr q2,q0,q0 - vorr q3,q1,q1 - vorr q11,q10,q10 - -Loop3x_cbc_dec: -.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q9},[r7]! - bgt Loop3x_cbc_dec - -.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q4,q6,q7 - subs r2,r2,#0x30 - veor q5,q2,q7 - movlo r6,r2 @ r6, r6, is zero at this point -.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q9,q3,q7 - add r0,r0,r6 @ r0 is adjusted in such way that - @ at exit from the loop q1-q10 - @ are loaded with last "words" - vorr q6,q11,q11 - mov r7,r3 -.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.8 {q2},[r0]! -.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.8 {q3},[r0]! -.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.8 {q11},[r0]! -.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15 -.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 -.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 - vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] - add r6,r5,#2 - veor q4,q4,q0 - veor q5,q5,q1 - veor q10,q10,q9 - vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] - vst1.8 {q4},[r1]! - vorr q0,q2,q2 - vst1.8 {q5},[r1]! - vorr q1,q3,q3 - vst1.8 {q10},[r1]! - vorr q10,q11,q11 - bhs Loop3x_cbc_dec - - cmn r2,#0x30 - beq Lcbc_done - nop - -Lcbc_dec_tail: -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q9},[r7]! - bgt Lcbc_dec_tail - -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 -.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - cmn r2,#0x20 -.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q5,q6,q7 -.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q9,q3,q7 -.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 -.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 - beq Lcbc_dec_one - veor q5,q5,q1 - veor q9,q9,q10 - vorr q6,q11,q11 - vst1.8 {q5},[r1]! - vst1.8 {q9},[r1]! - b Lcbc_done - -Lcbc_dec_one: - veor q5,q5,q10 - vorr q6,q11,q11 - vst1.8 {q5},[r1]! - -Lcbc_done: - vst1.8 {q6},[r4] -Lcbc_abort: - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!,{r4,r5,r6,r7,r8,pc} - -.globl _aes_hw_ctr32_encrypt_blocks -.private_extern _aes_hw_ctr32_encrypt_blocks -#ifdef __thumb2__ -.thumb_func _aes_hw_ctr32_encrypt_blocks -#endif -.align 5 -_aes_hw_ctr32_encrypt_blocks: - mov ip,sp - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr} - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - ldr r4, [ip] @ load remaining arg - ldr r5,[r3,#240] - - ldr r8, [r4, #12] - vld1.32 {q0},[r4] - - vld1.32 {q8,q9},[r3] @ load key schedule... - sub r5,r5,#4 - mov r12,#16 - cmp r2,#2 - add r7,r3,r5,lsl#4 @ pointer to last 5 round keys - sub r5,r5,#2 - vld1.32 {q12,q13},[r7]! - vld1.32 {q14,q15},[r7]! - vld1.32 {q7},[r7] - add r7,r3,#32 - mov r6,r5 - movlo r12,#0 -#ifndef __ARMEB__ - rev r8, r8 -#endif - vorr q1,q0,q0 - add r10, r8, #1 - vorr q10,q0,q0 - add r8, r8, #2 - vorr q6,q0,q0 - rev r10, r10 - vmov.32 d3[1],r10 - bls Lctr32_tail - rev r12, r8 - sub r2,r2,#3 @ bias - vmov.32 d21[1],r12 - b Loop3x_ctr32 - -.align 4 -Loop3x_ctr32: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 -.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 -.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 -.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 -.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 - vld1.32 {q9},[r7]! - bgt Loop3x_ctr32 - -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 - vld1.8 {q2},[r0]! - vorr q0,q6,q6 -.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 -.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 - vld1.8 {q3},[r0]! - vorr q1,q6,q6 -.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - vld1.8 {q11},[r0]! - mov r7,r3 -.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 -.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 - vorr q10,q6,q6 - add r9,r8,#1 -.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - veor q2,q2,q7 - add r10,r8,#2 -.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12 -.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - veor q3,q3,q7 - add r8,r8,#3 -.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - veor q11,q11,q7 - rev r9,r9 -.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 -.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d1[1], r9 - rev r10,r10 -.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - vmov.32 d3[1], r10 - rev r12,r8 -.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 -.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d21[1], r12 - subs r2,r2,#3 -.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 -.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 -.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15 - - veor q2,q2,q4 - vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] - vst1.8 {q2},[r1]! - veor q3,q3,q5 - mov r6,r5 - vst1.8 {q3},[r1]! - veor q11,q11,q9 - vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] - vst1.8 {q11},[r1]! - bhs Loop3x_ctr32 - - adds r2,r2,#3 - beq Lctr32_done - cmp r2,#1 - mov r12,#16 - moveq r12,#0 - -Lctr32_tail: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.32 {q9},[r7]! - bgt Lctr32_tail - -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.8 {q2},[r0],r12 -.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.8 {q3},[r0] -.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - veor q2,q2,q7 -.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - veor q3,q3,q7 -.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 -.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15 - - cmp r2,#1 - veor q2,q2,q0 - veor q3,q3,q1 - vst1.8 {q2},[r1]! - beq Lctr32_done - vst1.8 {q3},[r1] - -Lctr32_done: - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc} - -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/armv4-mont.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/armv4-mont.S deleted file mode 100644 index e549d1f163..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/armv4-mont.S +++ /dev/null @@ -1,982 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. - - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - -#if __ARM_MAX_ARCH__>=7 -.align 5 -LOPENSSL_armcap: -.word OPENSSL_armcap_P-Lbn_mul_mont -#endif - -.globl _bn_mul_mont -.private_extern _bn_mul_mont -#ifdef __thumb2__ -.thumb_func _bn_mul_mont -#endif - -.align 5 -_bn_mul_mont: -Lbn_mul_mont: - ldr ip,[sp,#4] @ load num - stmdb sp!,{r0,r2} @ sp points at argument block -#if __ARM_MAX_ARCH__>=7 - tst ip,#7 - bne Lialu - adr r0,Lbn_mul_mont - ldr r2,LOPENSSL_armcap - ldr r0,[r0,r2] -#ifdef __APPLE__ - ldr r0,[r0] -#endif - tst r0,#ARMV7_NEON @ NEON available? - ldmia sp, {r0,r2} - beq Lialu - add sp,sp,#8 - b bn_mul8x_mont_neon -.align 4 -Lialu: -#endif - cmp ip,#2 - mov r0,ip @ load num -#ifdef __thumb2__ - ittt lt -#endif - movlt r0,#0 - addlt sp,sp,#2*4 - blt Labrt - - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers - - mov r0,r0,lsl#2 @ rescale r0 for byte count - sub sp,sp,r0 @ alloca(4*num) - sub sp,sp,#4 @ +extra dword - sub r0,r0,#4 @ "num=num-1" - add r4,r2,r0 @ &bp[num-1] - - add r0,sp,r0 @ r0 to point at &tp[num-1] - ldr r8,[r0,#14*4] @ &n0 - ldr r2,[r2] @ bp[0] - ldr r5,[r1],#4 @ ap[0],ap++ - ldr r6,[r3],#4 @ np[0],np++ - ldr r8,[r8] @ *n0 - str r4,[r0,#15*4] @ save &bp[num] - - umull r10,r11,r5,r2 @ ap[0]*bp[0] - str r8,[r0,#14*4] @ save n0 value - mul r8,r10,r8 @ "tp[0]"*n0 - mov r12,#0 - umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" - mov r4,sp - -L1st: - ldr r5,[r1],#4 @ ap[j],ap++ - mov r10,r11 - ldr r6,[r3],#4 @ np[j],np++ - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[j]*bp[0] - mov r14,#0 - umlal r12,r14,r6,r8 @ np[j]*n0 - adds r12,r12,r10 - str r12,[r4],#4 @ tp[j-1]=,tp++ - adc r12,r14,#0 - cmp r4,r0 - bne L1st - - adds r12,r12,r11 - ldr r4,[r0,#13*4] @ restore bp - mov r14,#0 - ldr r8,[r0,#14*4] @ restore n0 - adc r14,r14,#0 - str r12,[r0] @ tp[num-1]= - mov r7,sp - str r14,[r0,#4] @ tp[num]= - -Louter: - sub r7,r0,r7 @ "original" r0-1 value - sub r1,r1,r7 @ "rewind" ap to &ap[1] - ldr r2,[r4,#4]! @ *(++bp) - sub r3,r3,r7 @ "rewind" np to &np[1] - ldr r5,[r1,#-4] @ ap[0] - ldr r10,[sp] @ tp[0] - ldr r6,[r3,#-4] @ np[0] - ldr r7,[sp,#4] @ tp[1] - - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] - str r4,[r0,#13*4] @ save bp - mul r8,r10,r8 - mov r12,#0 - umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" - mov r4,sp - -Linner: - ldr r5,[r1],#4 @ ap[j],ap++ - adds r10,r11,r7 @ +=tp[j] - ldr r6,[r3],#4 @ np[j],np++ - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[j]*bp[i] - mov r14,#0 - umlal r12,r14,r6,r8 @ np[j]*n0 - adc r11,r11,#0 - ldr r7,[r4,#8] @ tp[j+1] - adds r12,r12,r10 - str r12,[r4],#4 @ tp[j-1]=,tp++ - adc r12,r14,#0 - cmp r4,r0 - bne Linner - - adds r12,r12,r11 - mov r14,#0 - ldr r4,[r0,#13*4] @ restore bp - adc r14,r14,#0 - ldr r8,[r0,#14*4] @ restore n0 - adds r12,r12,r7 - ldr r7,[r0,#15*4] @ restore &bp[num] - adc r14,r14,#0 - str r12,[r0] @ tp[num-1]= - str r14,[r0,#4] @ tp[num]= - - cmp r4,r7 -#ifdef __thumb2__ - itt ne -#endif - movne r7,sp - bne Louter - - ldr r2,[r0,#12*4] @ pull rp - mov r5,sp - add r0,r0,#4 @ r0 to point at &tp[num] - sub r5,r0,r5 @ "original" num value - mov r4,sp @ "rewind" r4 - mov r1,r4 @ "borrow" r1 - sub r3,r3,r5 @ "rewind" r3 to &np[0] - - subs r7,r7,r7 @ "clear" carry flag -Lsub: ldr r7,[r4],#4 - ldr r6,[r3],#4 - sbcs r7,r7,r6 @ tp[j]-np[j] - str r7,[r2],#4 @ rp[j]= - teq r4,r0 @ preserve carry - bne Lsub - sbcs r14,r14,#0 @ upmost carry - mov r4,sp @ "rewind" r4 - sub r2,r2,r5 @ "rewind" r2 - -Lcopy: ldr r7,[r4] @ conditional copy - ldr r5,[r2] - str sp,[r4],#4 @ zap tp -#ifdef __thumb2__ - it cc -#endif - movcc r5,r7 - str r5,[r2],#4 - teq r4,r0 @ preserve carry - bne Lcopy - - mov sp,r0 - add sp,sp,#4 @ skip over tp[num+1] - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers - add sp,sp,#2*4 @ skip over {r0,r2} - mov r0,#1 -Labrt: -#if __ARM_ARCH__>=5 - bx lr @ bx lr -#else - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - -#if __ARM_MAX_ARCH__>=7 - - - -#ifdef __thumb2__ -.thumb_func bn_mul8x_mont_neon -#endif -.align 5 -bn_mul8x_mont_neon: - mov ip,sp - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - ldmia ip,{r4,r5} @ load rest of parameter block - mov ip,sp - - cmp r5,#8 - bhi LNEON_8n - - @ special case for r5==8, everything is in register bank... - - vld1.32 {d28[0]}, [r2,:32]! - veor d8,d8,d8 - sub r7,sp,r5,lsl#4 - vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-( - and r7,r7,#-64 - vld1.32 {d30[0]}, [r4,:32] - mov sp,r7 @ alloca - vzip.16 d28,d8 - - vmull.u32 q6,d28,d0[0] - vmull.u32 q7,d28,d0[1] - vmull.u32 q8,d28,d1[0] - vshl.i64 d29,d13,#16 - vmull.u32 q9,d28,d1[1] - - vadd.u64 d29,d29,d12 - veor d8,d8,d8 - vmul.u32 d29,d29,d30 - - vmull.u32 q10,d28,d2[0] - vld1.32 {d4,d5,d6,d7}, [r3]! - vmull.u32 q11,d28,d2[1] - vmull.u32 q12,d28,d3[0] - vzip.16 d29,d8 - vmull.u32 q13,d28,d3[1] - - vmlal.u32 q6,d29,d4[0] - sub r9,r5,#1 - vmlal.u32 q7,d29,d4[1] - vmlal.u32 q8,d29,d5[0] - vmlal.u32 q9,d29,d5[1] - - vmlal.u32 q10,d29,d6[0] - vmov q5,q6 - vmlal.u32 q11,d29,d6[1] - vmov q6,q7 - vmlal.u32 q12,d29,d7[0] - vmov q7,q8 - vmlal.u32 q13,d29,d7[1] - vmov q8,q9 - vmov q9,q10 - vshr.u64 d10,d10,#16 - vmov q10,q11 - vmov q11,q12 - vadd.u64 d10,d10,d11 - vmov q12,q13 - veor q13,q13 - vshr.u64 d10,d10,#16 - - b LNEON_outer8 - -.align 4 -LNEON_outer8: - vld1.32 {d28[0]}, [r2,:32]! - veor d8,d8,d8 - vzip.16 d28,d8 - vadd.u64 d12,d12,d10 - - vmlal.u32 q6,d28,d0[0] - vmlal.u32 q7,d28,d0[1] - vmlal.u32 q8,d28,d1[0] - vshl.i64 d29,d13,#16 - vmlal.u32 q9,d28,d1[1] - - vadd.u64 d29,d29,d12 - veor d8,d8,d8 - subs r9,r9,#1 - vmul.u32 d29,d29,d30 - - vmlal.u32 q10,d28,d2[0] - vmlal.u32 q11,d28,d2[1] - vmlal.u32 q12,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q13,d28,d3[1] - - vmlal.u32 q6,d29,d4[0] - vmlal.u32 q7,d29,d4[1] - vmlal.u32 q8,d29,d5[0] - vmlal.u32 q9,d29,d5[1] - - vmlal.u32 q10,d29,d6[0] - vmov q5,q6 - vmlal.u32 q11,d29,d6[1] - vmov q6,q7 - vmlal.u32 q12,d29,d7[0] - vmov q7,q8 - vmlal.u32 q13,d29,d7[1] - vmov q8,q9 - vmov q9,q10 - vshr.u64 d10,d10,#16 - vmov q10,q11 - vmov q11,q12 - vadd.u64 d10,d10,d11 - vmov q12,q13 - veor q13,q13 - vshr.u64 d10,d10,#16 - - bne LNEON_outer8 - - vadd.u64 d12,d12,d10 - mov r7,sp - vshr.u64 d10,d12,#16 - mov r8,r5 - vadd.u64 d13,d13,d10 - add r6,sp,#96 - vshr.u64 d10,d13,#16 - vzip.16 d12,d13 - - b LNEON_tail_entry - -.align 4 -LNEON_8n: - veor q6,q6,q6 - sub r7,sp,#128 - veor q7,q7,q7 - sub r7,r7,r5,lsl#4 - veor q8,q8,q8 - and r7,r7,#-64 - veor q9,q9,q9 - mov sp,r7 @ alloca - veor q10,q10,q10 - add r7,r7,#256 - veor q11,q11,q11 - sub r8,r5,#8 - veor q12,q12,q12 - veor q13,q13,q13 - -LNEON_8n_init: - vst1.64 {q6,q7},[r7,:256]! - subs r8,r8,#8 - vst1.64 {q8,q9},[r7,:256]! - vst1.64 {q10,q11},[r7,:256]! - vst1.64 {q12,q13},[r7,:256]! - bne LNEON_8n_init - - add r6,sp,#256 - vld1.32 {d0,d1,d2,d3},[r1]! - add r10,sp,#8 - vld1.32 {d30[0]},[r4,:32] - mov r9,r5 - b LNEON_8n_outer - -.align 4 -LNEON_8n_outer: - vld1.32 {d28[0]},[r2,:32]! @ *b++ - veor d8,d8,d8 - vzip.16 d28,d8 - add r7,sp,#128 - vld1.32 {d4,d5,d6,d7},[r3]! - - vmlal.u32 q6,d28,d0[0] - vmlal.u32 q7,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q8,d28,d1[0] - vshl.i64 d29,d13,#16 - vmlal.u32 q9,d28,d1[1] - vadd.u64 d29,d29,d12 - vmlal.u32 q10,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q11,d28,d2[1] - vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0] - vmlal.u32 q12,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q13,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q6,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q7,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q8,d29,d5[0] - vshr.u64 d12,d12,#16 - vmlal.u32 q9,d29,d5[1] - vmlal.u32 q10,d29,d6[0] - vadd.u64 d12,d12,d13 - vmlal.u32 q11,d29,d6[1] - vshr.u64 d12,d12,#16 - vmlal.u32 q12,d29,d7[0] - vmlal.u32 q13,d29,d7[1] - vadd.u64 d14,d14,d12 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0] - vmlal.u32 q7,d28,d0[0] - vld1.64 {q6},[r6,:128]! - vmlal.u32 q8,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q9,d28,d1[0] - vshl.i64 d29,d15,#16 - vmlal.u32 q10,d28,d1[1] - vadd.u64 d29,d29,d14 - vmlal.u32 q11,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q12,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1] - vmlal.u32 q13,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q6,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q7,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q8,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q9,d29,d5[0] - vshr.u64 d14,d14,#16 - vmlal.u32 q10,d29,d5[1] - vmlal.u32 q11,d29,d6[0] - vadd.u64 d14,d14,d15 - vmlal.u32 q12,d29,d6[1] - vshr.u64 d14,d14,#16 - vmlal.u32 q13,d29,d7[0] - vmlal.u32 q6,d29,d7[1] - vadd.u64 d16,d16,d14 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1] - vmlal.u32 q8,d28,d0[0] - vld1.64 {q7},[r6,:128]! - vmlal.u32 q9,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q10,d28,d1[0] - vshl.i64 d29,d17,#16 - vmlal.u32 q11,d28,d1[1] - vadd.u64 d29,d29,d16 - vmlal.u32 q12,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q13,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2] - vmlal.u32 q6,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q7,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q8,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q9,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q10,d29,d5[0] - vshr.u64 d16,d16,#16 - vmlal.u32 q11,d29,d5[1] - vmlal.u32 q12,d29,d6[0] - vadd.u64 d16,d16,d17 - vmlal.u32 q13,d29,d6[1] - vshr.u64 d16,d16,#16 - vmlal.u32 q6,d29,d7[0] - vmlal.u32 q7,d29,d7[1] - vadd.u64 d18,d18,d16 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2] - vmlal.u32 q9,d28,d0[0] - vld1.64 {q8},[r6,:128]! - vmlal.u32 q10,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q11,d28,d1[0] - vshl.i64 d29,d19,#16 - vmlal.u32 q12,d28,d1[1] - vadd.u64 d29,d29,d18 - vmlal.u32 q13,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q6,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3] - vmlal.u32 q7,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q8,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q9,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q10,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q11,d29,d5[0] - vshr.u64 d18,d18,#16 - vmlal.u32 q12,d29,d5[1] - vmlal.u32 q13,d29,d6[0] - vadd.u64 d18,d18,d19 - vmlal.u32 q6,d29,d6[1] - vshr.u64 d18,d18,#16 - vmlal.u32 q7,d29,d7[0] - vmlal.u32 q8,d29,d7[1] - vadd.u64 d20,d20,d18 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3] - vmlal.u32 q10,d28,d0[0] - vld1.64 {q9},[r6,:128]! - vmlal.u32 q11,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q12,d28,d1[0] - vshl.i64 d29,d21,#16 - vmlal.u32 q13,d28,d1[1] - vadd.u64 d29,d29,d20 - vmlal.u32 q6,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q7,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4] - vmlal.u32 q8,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q9,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q10,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q11,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q12,d29,d5[0] - vshr.u64 d20,d20,#16 - vmlal.u32 q13,d29,d5[1] - vmlal.u32 q6,d29,d6[0] - vadd.u64 d20,d20,d21 - vmlal.u32 q7,d29,d6[1] - vshr.u64 d20,d20,#16 - vmlal.u32 q8,d29,d7[0] - vmlal.u32 q9,d29,d7[1] - vadd.u64 d22,d22,d20 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4] - vmlal.u32 q11,d28,d0[0] - vld1.64 {q10},[r6,:128]! - vmlal.u32 q12,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q13,d28,d1[0] - vshl.i64 d29,d23,#16 - vmlal.u32 q6,d28,d1[1] - vadd.u64 d29,d29,d22 - vmlal.u32 q7,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q8,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5] - vmlal.u32 q9,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q10,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q11,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q12,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q13,d29,d5[0] - vshr.u64 d22,d22,#16 - vmlal.u32 q6,d29,d5[1] - vmlal.u32 q7,d29,d6[0] - vadd.u64 d22,d22,d23 - vmlal.u32 q8,d29,d6[1] - vshr.u64 d22,d22,#16 - vmlal.u32 q9,d29,d7[0] - vmlal.u32 q10,d29,d7[1] - vadd.u64 d24,d24,d22 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5] - vmlal.u32 q12,d28,d0[0] - vld1.64 {q11},[r6,:128]! - vmlal.u32 q13,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q6,d28,d1[0] - vshl.i64 d29,d25,#16 - vmlal.u32 q7,d28,d1[1] - vadd.u64 d29,d29,d24 - vmlal.u32 q8,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q9,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6] - vmlal.u32 q10,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q11,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q12,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q13,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q6,d29,d5[0] - vshr.u64 d24,d24,#16 - vmlal.u32 q7,d29,d5[1] - vmlal.u32 q8,d29,d6[0] - vadd.u64 d24,d24,d25 - vmlal.u32 q9,d29,d6[1] - vshr.u64 d24,d24,#16 - vmlal.u32 q10,d29,d7[0] - vmlal.u32 q11,d29,d7[1] - vadd.u64 d26,d26,d24 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6] - vmlal.u32 q13,d28,d0[0] - vld1.64 {q12},[r6,:128]! - vmlal.u32 q6,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q7,d28,d1[0] - vshl.i64 d29,d27,#16 - vmlal.u32 q8,d28,d1[1] - vadd.u64 d29,d29,d26 - vmlal.u32 q9,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q10,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7] - vmlal.u32 q11,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q12,d28,d3[1] - vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] - vmlal.u32 q13,d29,d4[0] - vld1.32 {d0,d1,d2,d3},[r1]! - vmlal.u32 q6,d29,d4[1] - vmlal.u32 q7,d29,d5[0] - vshr.u64 d26,d26,#16 - vmlal.u32 q8,d29,d5[1] - vmlal.u32 q9,d29,d6[0] - vadd.u64 d26,d26,d27 - vmlal.u32 q10,d29,d6[1] - vshr.u64 d26,d26,#16 - vmlal.u32 q11,d29,d7[0] - vmlal.u32 q12,d29,d7[1] - vadd.u64 d12,d12,d26 - vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7] - add r10,sp,#8 @ rewind - sub r8,r5,#8 - b LNEON_8n_inner - -.align 4 -LNEON_8n_inner: - subs r8,r8,#8 - vmlal.u32 q6,d28,d0[0] - vld1.64 {q13},[r6,:128] - vmlal.u32 q7,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0] - vmlal.u32 q8,d28,d1[0] - vld1.32 {d4,d5,d6,d7},[r3]! - vmlal.u32 q9,d28,d1[1] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q10,d28,d2[0] - vmlal.u32 q11,d28,d2[1] - vmlal.u32 q12,d28,d3[0] - vmlal.u32 q13,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1] - vmlal.u32 q6,d29,d4[0] - vmlal.u32 q7,d29,d4[1] - vmlal.u32 q8,d29,d5[0] - vmlal.u32 q9,d29,d5[1] - vmlal.u32 q10,d29,d6[0] - vmlal.u32 q11,d29,d6[1] - vmlal.u32 q12,d29,d7[0] - vmlal.u32 q13,d29,d7[1] - vst1.64 {q6},[r7,:128]! - vmlal.u32 q7,d28,d0[0] - vld1.64 {q6},[r6,:128] - vmlal.u32 q8,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1] - vmlal.u32 q9,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q10,d28,d1[1] - vmlal.u32 q11,d28,d2[0] - vmlal.u32 q12,d28,d2[1] - vmlal.u32 q13,d28,d3[0] - vmlal.u32 q6,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2] - vmlal.u32 q7,d29,d4[0] - vmlal.u32 q8,d29,d4[1] - vmlal.u32 q9,d29,d5[0] - vmlal.u32 q10,d29,d5[1] - vmlal.u32 q11,d29,d6[0] - vmlal.u32 q12,d29,d6[1] - vmlal.u32 q13,d29,d7[0] - vmlal.u32 q6,d29,d7[1] - vst1.64 {q7},[r7,:128]! - vmlal.u32 q8,d28,d0[0] - vld1.64 {q7},[r6,:128] - vmlal.u32 q9,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2] - vmlal.u32 q10,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q11,d28,d1[1] - vmlal.u32 q12,d28,d2[0] - vmlal.u32 q13,d28,d2[1] - vmlal.u32 q6,d28,d3[0] - vmlal.u32 q7,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3] - vmlal.u32 q8,d29,d4[0] - vmlal.u32 q9,d29,d4[1] - vmlal.u32 q10,d29,d5[0] - vmlal.u32 q11,d29,d5[1] - vmlal.u32 q12,d29,d6[0] - vmlal.u32 q13,d29,d6[1] - vmlal.u32 q6,d29,d7[0] - vmlal.u32 q7,d29,d7[1] - vst1.64 {q8},[r7,:128]! - vmlal.u32 q9,d28,d0[0] - vld1.64 {q8},[r6,:128] - vmlal.u32 q10,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3] - vmlal.u32 q11,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q12,d28,d1[1] - vmlal.u32 q13,d28,d2[0] - vmlal.u32 q6,d28,d2[1] - vmlal.u32 q7,d28,d3[0] - vmlal.u32 q8,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4] - vmlal.u32 q9,d29,d4[0] - vmlal.u32 q10,d29,d4[1] - vmlal.u32 q11,d29,d5[0] - vmlal.u32 q12,d29,d5[1] - vmlal.u32 q13,d29,d6[0] - vmlal.u32 q6,d29,d6[1] - vmlal.u32 q7,d29,d7[0] - vmlal.u32 q8,d29,d7[1] - vst1.64 {q9},[r7,:128]! - vmlal.u32 q10,d28,d0[0] - vld1.64 {q9},[r6,:128] - vmlal.u32 q11,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4] - vmlal.u32 q12,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q13,d28,d1[1] - vmlal.u32 q6,d28,d2[0] - vmlal.u32 q7,d28,d2[1] - vmlal.u32 q8,d28,d3[0] - vmlal.u32 q9,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5] - vmlal.u32 q10,d29,d4[0] - vmlal.u32 q11,d29,d4[1] - vmlal.u32 q12,d29,d5[0] - vmlal.u32 q13,d29,d5[1] - vmlal.u32 q6,d29,d6[0] - vmlal.u32 q7,d29,d6[1] - vmlal.u32 q8,d29,d7[0] - vmlal.u32 q9,d29,d7[1] - vst1.64 {q10},[r7,:128]! - vmlal.u32 q11,d28,d0[0] - vld1.64 {q10},[r6,:128] - vmlal.u32 q12,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5] - vmlal.u32 q13,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q6,d28,d1[1] - vmlal.u32 q7,d28,d2[0] - vmlal.u32 q8,d28,d2[1] - vmlal.u32 q9,d28,d3[0] - vmlal.u32 q10,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6] - vmlal.u32 q11,d29,d4[0] - vmlal.u32 q12,d29,d4[1] - vmlal.u32 q13,d29,d5[0] - vmlal.u32 q6,d29,d5[1] - vmlal.u32 q7,d29,d6[0] - vmlal.u32 q8,d29,d6[1] - vmlal.u32 q9,d29,d7[0] - vmlal.u32 q10,d29,d7[1] - vst1.64 {q11},[r7,:128]! - vmlal.u32 q12,d28,d0[0] - vld1.64 {q11},[r6,:128] - vmlal.u32 q13,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6] - vmlal.u32 q6,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q7,d28,d1[1] - vmlal.u32 q8,d28,d2[0] - vmlal.u32 q9,d28,d2[1] - vmlal.u32 q10,d28,d3[0] - vmlal.u32 q11,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7] - vmlal.u32 q12,d29,d4[0] - vmlal.u32 q13,d29,d4[1] - vmlal.u32 q6,d29,d5[0] - vmlal.u32 q7,d29,d5[1] - vmlal.u32 q8,d29,d6[0] - vmlal.u32 q9,d29,d6[1] - vmlal.u32 q10,d29,d7[0] - vmlal.u32 q11,d29,d7[1] - vst1.64 {q12},[r7,:128]! - vmlal.u32 q13,d28,d0[0] - vld1.64 {q12},[r6,:128] - vmlal.u32 q6,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7] - vmlal.u32 q7,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q8,d28,d1[1] - vmlal.u32 q9,d28,d2[0] - vmlal.u32 q10,d28,d2[1] - vmlal.u32 q11,d28,d3[0] - vmlal.u32 q12,d28,d3[1] - it eq - subeq r1,r1,r5,lsl#2 @ rewind - vmlal.u32 q13,d29,d4[0] - vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] - vmlal.u32 q6,d29,d4[1] - vld1.32 {d0,d1,d2,d3},[r1]! - vmlal.u32 q7,d29,d5[0] - add r10,sp,#8 @ rewind - vmlal.u32 q8,d29,d5[1] - vmlal.u32 q9,d29,d6[0] - vmlal.u32 q10,d29,d6[1] - vmlal.u32 q11,d29,d7[0] - vst1.64 {q13},[r7,:128]! - vmlal.u32 q12,d29,d7[1] - - bne LNEON_8n_inner - add r6,sp,#128 - vst1.64 {q6,q7},[r7,:256]! - veor q2,q2,q2 @ d4-d5 - vst1.64 {q8,q9},[r7,:256]! - veor q3,q3,q3 @ d6-d7 - vst1.64 {q10,q11},[r7,:256]! - vst1.64 {q12},[r7,:128] - - subs r9,r9,#8 - vld1.64 {q6,q7},[r6,:256]! - vld1.64 {q8,q9},[r6,:256]! - vld1.64 {q10,q11},[r6,:256]! - vld1.64 {q12,q13},[r6,:256]! - - itt ne - subne r3,r3,r5,lsl#2 @ rewind - bne LNEON_8n_outer - - add r7,sp,#128 - vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame - vshr.u64 d10,d12,#16 - vst1.64 {q2,q3},[sp,:256]! - vadd.u64 d13,d13,d10 - vst1.64 {q2,q3}, [sp,:256]! - vshr.u64 d10,d13,#16 - vst1.64 {q2,q3}, [sp,:256]! - vzip.16 d12,d13 - - mov r8,r5 - b LNEON_tail_entry - -.align 4 -LNEON_tail: - vadd.u64 d12,d12,d10 - vshr.u64 d10,d12,#16 - vld1.64 {q8,q9}, [r6, :256]! - vadd.u64 d13,d13,d10 - vld1.64 {q10,q11}, [r6, :256]! - vshr.u64 d10,d13,#16 - vld1.64 {q12,q13}, [r6, :256]! - vzip.16 d12,d13 - -LNEON_tail_entry: - vadd.u64 d14,d14,d10 - vst1.32 {d12[0]}, [r7, :32]! - vshr.u64 d10,d14,#16 - vadd.u64 d15,d15,d10 - vshr.u64 d10,d15,#16 - vzip.16 d14,d15 - vadd.u64 d16,d16,d10 - vst1.32 {d14[0]}, [r7, :32]! - vshr.u64 d10,d16,#16 - vadd.u64 d17,d17,d10 - vshr.u64 d10,d17,#16 - vzip.16 d16,d17 - vadd.u64 d18,d18,d10 - vst1.32 {d16[0]}, [r7, :32]! - vshr.u64 d10,d18,#16 - vadd.u64 d19,d19,d10 - vshr.u64 d10,d19,#16 - vzip.16 d18,d19 - vadd.u64 d20,d20,d10 - vst1.32 {d18[0]}, [r7, :32]! - vshr.u64 d10,d20,#16 - vadd.u64 d21,d21,d10 - vshr.u64 d10,d21,#16 - vzip.16 d20,d21 - vadd.u64 d22,d22,d10 - vst1.32 {d20[0]}, [r7, :32]! - vshr.u64 d10,d22,#16 - vadd.u64 d23,d23,d10 - vshr.u64 d10,d23,#16 - vzip.16 d22,d23 - vadd.u64 d24,d24,d10 - vst1.32 {d22[0]}, [r7, :32]! - vshr.u64 d10,d24,#16 - vadd.u64 d25,d25,d10 - vshr.u64 d10,d25,#16 - vzip.16 d24,d25 - vadd.u64 d26,d26,d10 - vst1.32 {d24[0]}, [r7, :32]! - vshr.u64 d10,d26,#16 - vadd.u64 d27,d27,d10 - vshr.u64 d10,d27,#16 - vzip.16 d26,d27 - vld1.64 {q6,q7}, [r6, :256]! - subs r8,r8,#8 - vst1.32 {d26[0]}, [r7, :32]! - bne LNEON_tail - - vst1.32 {d10[0]}, [r7, :32] @ top-most bit - sub r3,r3,r5,lsl#2 @ rewind r3 - subs r1,sp,#0 @ clear carry flag - add r2,sp,r5,lsl#2 - -LNEON_sub: - ldmia r1!, {r4,r5,r6,r7} - ldmia r3!, {r8,r9,r10,r11} - sbcs r8, r4,r8 - sbcs r9, r5,r9 - sbcs r10,r6,r10 - sbcs r11,r7,r11 - teq r1,r2 @ preserves carry - stmia r0!, {r8,r9,r10,r11} - bne LNEON_sub - - ldr r10, [r1] @ load top-most bit - mov r11,sp - veor q0,q0,q0 - sub r11,r2,r11 @ this is num*4 - veor q1,q1,q1 - mov r1,sp - sub r0,r0,r11 @ rewind r0 - mov r3,r2 @ second 3/4th of frame - sbcs r10,r10,#0 @ result is carry flag - -LNEON_copy_n_zap: - ldmia r1!, {r4,r5,r6,r7} - ldmia r0, {r8,r9,r10,r11} - it cc - movcc r8, r4 - vst1.64 {q0,q1}, [r3,:256]! @ wipe - itt cc - movcc r9, r5 - movcc r10,r6 - vst1.64 {q0,q1}, [r3,:256]! @ wipe - it cc - movcc r11,r7 - ldmia r1, {r4,r5,r6,r7} - stmia r0!, {r8,r9,r10,r11} - sub r1,r1,#16 - ldmia r0, {r8,r9,r10,r11} - it cc - movcc r8, r4 - vst1.64 {q0,q1}, [r1,:256]! @ wipe - itt cc - movcc r9, r5 - movcc r10,r6 - vst1.64 {q0,q1}, [r3,:256]! @ wipe - it cc - movcc r11,r7 - teq r1,r2 @ preserves carry - stmia r0!, {r8,r9,r10,r11} - bne LNEON_copy_n_zap - - mov sp,ip - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11} - bx lr @ bx lr - -#endif -.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#if __ARM_MAX_ARCH__>=7 -.comm _OPENSSL_armcap_P,4 -.non_lazy_symbol_pointer -OPENSSL_armcap_P: -.indirect_symbol _OPENSSL_armcap_P -.long 0 -.private_extern _OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/bsaes-armv7.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/bsaes-armv7.S deleted file mode 100644 index 8329a8c202..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/bsaes-armv7.S +++ /dev/null @@ -1,1536 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel -@ of Linaro. Permission to use under GPL terms is granted. -@ ==================================================================== - -@ Bit-sliced AES for ARM NEON -@ -@ February 2012. -@ -@ This implementation is direct adaptation of bsaes-x86_64 module for -@ ARM NEON. Except that this module is endian-neutral [in sense that -@ it can be compiled for either endianness] by courtesy of vld1.8's -@ neutrality. Initial version doesn't implement interface to OpenSSL, -@ only low-level primitives and unsupported entry points, just enough -@ to collect performance results, which for Cortex-A8 core are: -@ -@ encrypt 19.5 cycles per byte processed with 128-bit key -@ decrypt 22.1 cycles per byte processed with 128-bit key -@ key conv. 440 cycles per 128-bit key/0.18 of 8x block -@ -@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, -@ which is [much] worse than anticipated (for further details see -@ http://www.openssl.org/~appro/Snapdragon-S4.html). -@ -@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code -@ manages in 20.0 cycles]. -@ -@ When comparing to x86_64 results keep in mind that NEON unit is -@ [mostly] single-issue and thus can't [fully] benefit from -@ instruction-level parallelism. And when comparing to aes-armv4 -@ results keep in mind key schedule conversion overhead (see -@ bsaes-x86_64.pl for further details)... -@ -@ - -@ April-August 2013 -@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard. - -#ifndef __KERNEL__ -# include - -# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} -# define VFP_ABI_POP vldmia sp!,{d8-d15} -# define VFP_ABI_FRAME 0x40 -#else -# define VFP_ABI_PUSH -# define VFP_ABI_POP -# define VFP_ABI_FRAME 0 -# define BSAES_ASM_EXTENDED_KEY -# define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -#endif - -#ifdef __thumb__ -# define adrl adr -#endif - -#if __ARM_MAX_ARCH__>=7 - - - -.text -.syntax unified @ ARMv7-capable assembler is expected to handle this -#if defined(__thumb2__) && !defined(__APPLE__) -.thumb -#else -.code 32 -# undef __thumb2__ -#endif - -#ifdef __thumb2__ -.thumb_func _bsaes_decrypt8 -#endif -.align 4 -_bsaes_decrypt8: - adr r6,. - vldmia r4!, {q9} @ round 0 key -#if defined(__thumb2__) || defined(__APPLE__) - adr r6,LM0ISR -#else - add r6,r6,#LM0ISR-_bsaes_decrypt8 -#endif - - vldmia r6!, {q8} @ LM0ISR - veor q10, q0, q9 @ xor with round0 key - veor q11, q1, q9 - vtbl.8 d0, {q10}, d16 - vtbl.8 d1, {q10}, d17 - veor q12, q2, q9 - vtbl.8 d2, {q11}, d16 - vtbl.8 d3, {q11}, d17 - veor q13, q3, q9 - vtbl.8 d4, {q12}, d16 - vtbl.8 d5, {q12}, d17 - veor q14, q4, q9 - vtbl.8 d6, {q13}, d16 - vtbl.8 d7, {q13}, d17 - veor q15, q5, q9 - vtbl.8 d8, {q14}, d16 - vtbl.8 d9, {q14}, d17 - veor q10, q6, q9 - vtbl.8 d10, {q15}, d16 - vtbl.8 d11, {q15}, d17 - veor q11, q7, q9 - vtbl.8 d12, {q10}, d16 - vtbl.8 d13, {q10}, d17 - vtbl.8 d14, {q11}, d16 - vtbl.8 d15, {q11}, d17 - vmov.i8 q8,#0x55 @ compose LBS0 - vmov.i8 q9,#0x33 @ compose LBS1 - vshr.u64 q10, q6, #1 - vshr.u64 q11, q4, #1 - veor q10, q10, q7 - veor q11, q11, q5 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #1 - veor q5, q5, q11 - vshl.u64 q11, q11, #1 - veor q6, q6, q10 - veor q4, q4, q11 - vshr.u64 q10, q2, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q3 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q3, q3, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q2, q2, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose LBS2 - vshr.u64 q10, q5, #2 - vshr.u64 q11, q4, #2 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q9 - vand q11, q11, q9 - veor q7, q7, q10 - vshl.u64 q10, q10, #2 - veor q6, q6, q11 - vshl.u64 q11, q11, #2 - veor q5, q5, q10 - veor q4, q4, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q3 - veor q11, q11, q2 - vand q10, q10, q9 - vand q11, q11, q9 - veor q3, q3, q10 - vshl.u64 q10, q10, #2 - veor q2, q2, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q3, #4 - vshr.u64 q11, q2, #4 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q6, q6, q11 - vshl.u64 q11, q11, #4 - veor q3, q3, q10 - veor q2, q2, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q5 - veor q11, q11, q4 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q4, q4, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - sub r5,r5,#1 - b Ldec_sbox -.align 4 -Ldec_loop: - vldmia r4!, {q8,q9,q10,q11} - veor q8, q8, q0 - veor q9, q9, q1 - vtbl.8 d0, {q8}, d24 - vtbl.8 d1, {q8}, d25 - vldmia r4!, {q8} - veor q10, q10, q2 - vtbl.8 d2, {q9}, d24 - vtbl.8 d3, {q9}, d25 - vldmia r4!, {q9} - veor q11, q11, q3 - vtbl.8 d4, {q10}, d24 - vtbl.8 d5, {q10}, d25 - vldmia r4!, {q10} - vtbl.8 d6, {q11}, d24 - vtbl.8 d7, {q11}, d25 - vldmia r4!, {q11} - veor q8, q8, q4 - veor q9, q9, q5 - vtbl.8 d8, {q8}, d24 - vtbl.8 d9, {q8}, d25 - veor q10, q10, q6 - vtbl.8 d10, {q9}, d24 - vtbl.8 d11, {q9}, d25 - veor q11, q11, q7 - vtbl.8 d12, {q10}, d24 - vtbl.8 d13, {q10}, d25 - vtbl.8 d14, {q11}, d24 - vtbl.8 d15, {q11}, d25 -Ldec_sbox: - veor q1, q1, q4 - veor q3, q3, q4 - - veor q4, q4, q7 - veor q1, q1, q6 - veor q2, q2, q7 - veor q6, q6, q4 - - veor q0, q0, q1 - veor q2, q2, q5 - veor q7, q7, q6 - veor q3, q3, q0 - veor q5, q5, q0 - veor q1, q1, q3 - veor q11, q3, q0 - veor q10, q7, q4 - veor q9, q1, q6 - veor q13, q4, q0 - vmov q8, q10 - veor q12, q5, q2 - - vorr q10, q10, q9 - veor q15, q11, q8 - vand q14, q11, q12 - vorr q11, q11, q12 - veor q12, q12, q9 - vand q8, q8, q9 - veor q9, q6, q2 - vand q15, q15, q12 - vand q13, q13, q9 - veor q9, q3, q7 - veor q12, q1, q5 - veor q11, q11, q13 - veor q10, q10, q13 - vand q13, q9, q12 - vorr q9, q9, q12 - veor q11, q11, q15 - veor q8, q8, q13 - veor q10, q10, q14 - veor q9, q9, q15 - veor q8, q8, q14 - vand q12, q4, q6 - veor q9, q9, q14 - vand q13, q0, q2 - vand q14, q7, q1 - vorr q15, q3, q5 - veor q11, q11, q12 - veor q9, q9, q14 - veor q8, q8, q15 - veor q10, q10, q13 - - @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 - - @ new smaller inversion - - vand q14, q11, q9 - vmov q12, q8 - - veor q13, q10, q14 - veor q15, q8, q14 - veor q14, q8, q14 @ q14=q15 - - vbsl q13, q9, q8 - vbsl q15, q11, q10 - veor q11, q11, q10 - - vbsl q12, q13, q14 - vbsl q8, q14, q13 - - vand q14, q12, q15 - veor q9, q9, q8 - - veor q14, q14, q11 - veor q12, q5, q2 - veor q8, q1, q6 - veor q10, q15, q14 - vand q10, q10, q5 - veor q5, q5, q1 - vand q11, q1, q15 - vand q5, q5, q14 - veor q1, q11, q10 - veor q5, q5, q11 - veor q15, q15, q13 - veor q14, q14, q9 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q2 - veor q12, q12, q8 - veor q2, q2, q6 - vand q8, q8, q15 - vand q6, q6, q13 - vand q12, q12, q14 - vand q2, q2, q9 - veor q8, q8, q12 - veor q2, q2, q6 - veor q12, q12, q11 - veor q6, q6, q10 - veor q5, q5, q12 - veor q2, q2, q12 - veor q1, q1, q8 - veor q6, q6, q8 - - veor q12, q3, q0 - veor q8, q7, q4 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q0 - veor q12, q12, q8 - veor q0, q0, q4 - vand q8, q8, q15 - vand q4, q4, q13 - vand q12, q12, q14 - vand q0, q0, q9 - veor q8, q8, q12 - veor q0, q0, q4 - veor q12, q12, q11 - veor q4, q4, q10 - veor q15, q15, q13 - veor q14, q14, q9 - veor q10, q15, q14 - vand q10, q10, q3 - veor q3, q3, q7 - vand q11, q7, q15 - vand q3, q3, q14 - veor q7, q11, q10 - veor q3, q3, q11 - veor q3, q3, q12 - veor q0, q0, q12 - veor q7, q7, q8 - veor q4, q4, q8 - veor q1, q1, q7 - veor q6, q6, q5 - - veor q4, q4, q1 - veor q2, q2, q7 - veor q5, q5, q7 - veor q4, q4, q2 - veor q7, q7, q0 - veor q4, q4, q5 - veor q3, q3, q6 - veor q6, q6, q1 - veor q3, q3, q4 - - veor q4, q4, q0 - veor q7, q7, q3 - subs r5,r5,#1 - bcc Ldec_done - @ multiplication by 0x05-0x00-0x04-0x00 - vext.8 q8, q0, q0, #8 - vext.8 q14, q3, q3, #8 - vext.8 q15, q5, q5, #8 - veor q8, q8, q0 - vext.8 q9, q1, q1, #8 - veor q14, q14, q3 - vext.8 q10, q6, q6, #8 - veor q15, q15, q5 - vext.8 q11, q4, q4, #8 - veor q9, q9, q1 - vext.8 q12, q2, q2, #8 - veor q10, q10, q6 - vext.8 q13, q7, q7, #8 - veor q11, q11, q4 - veor q12, q12, q2 - veor q13, q13, q7 - - veor q0, q0, q14 - veor q1, q1, q14 - veor q6, q6, q8 - veor q2, q2, q10 - veor q4, q4, q9 - veor q1, q1, q15 - veor q6, q6, q15 - veor q2, q2, q14 - veor q7, q7, q11 - veor q4, q4, q14 - veor q3, q3, q12 - veor q2, q2, q15 - veor q7, q7, q15 - veor q5, q5, q13 - vext.8 q8, q0, q0, #12 @ x0 <<< 32 - vext.8 q9, q1, q1, #12 - veor q0, q0, q8 @ x0 ^ (x0 <<< 32) - vext.8 q10, q6, q6, #12 - veor q1, q1, q9 - vext.8 q11, q4, q4, #12 - veor q6, q6, q10 - vext.8 q12, q2, q2, #12 - veor q4, q4, q11 - vext.8 q13, q7, q7, #12 - veor q2, q2, q12 - vext.8 q14, q3, q3, #12 - veor q7, q7, q13 - vext.8 q15, q5, q5, #12 - veor q3, q3, q14 - - veor q9, q9, q0 - veor q5, q5, q15 - vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) - veor q10, q10, q1 - veor q8, q8, q5 - veor q9, q9, q5 - vext.8 q1, q1, q1, #8 - veor q13, q13, q2 - veor q0, q0, q8 - veor q14, q14, q7 - veor q1, q1, q9 - vext.8 q8, q2, q2, #8 - veor q12, q12, q4 - vext.8 q9, q7, q7, #8 - veor q15, q15, q3 - vext.8 q2, q4, q4, #8 - veor q11, q11, q6 - vext.8 q7, q5, q5, #8 - veor q12, q12, q5 - vext.8 q4, q3, q3, #8 - veor q11, q11, q5 - vext.8 q3, q6, q6, #8 - veor q5, q9, q13 - veor q11, q11, q2 - veor q7, q7, q15 - veor q6, q4, q14 - veor q4, q8, q12 - veor q2, q3, q10 - vmov q3, q11 - @ vmov q5, q9 - vldmia r6, {q12} @ LISR - ite eq @ Thumb2 thing, sanity check in ARM - addeq r6,r6,#0x10 - bne Ldec_loop - vldmia r6, {q12} @ LISRM0 - b Ldec_loop -.align 4 -Ldec_done: - vmov.i8 q8,#0x55 @ compose LBS0 - vmov.i8 q9,#0x33 @ compose LBS1 - vshr.u64 q10, q3, #1 - vshr.u64 q11, q2, #1 - veor q10, q10, q5 - veor q11, q11, q7 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #1 - veor q7, q7, q11 - vshl.u64 q11, q11, #1 - veor q3, q3, q10 - veor q2, q2, q11 - vshr.u64 q10, q6, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q4 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q4, q4, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q6, q6, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose LBS2 - vshr.u64 q10, q7, #2 - vshr.u64 q11, q2, #2 - veor q10, q10, q5 - veor q11, q11, q3 - vand q10, q10, q9 - vand q11, q11, q9 - veor q5, q5, q10 - vshl.u64 q10, q10, #2 - veor q3, q3, q11 - vshl.u64 q11, q11, #2 - veor q7, q7, q10 - veor q2, q2, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q4 - veor q11, q11, q6 - vand q10, q10, q9 - vand q11, q11, q9 - veor q4, q4, q10 - vshl.u64 q10, q10, #2 - veor q6, q6, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q4, #4 - vshr.u64 q11, q6, #4 - veor q10, q10, q5 - veor q11, q11, q3 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q3, q3, q11 - vshl.u64 q11, q11, #4 - veor q4, q4, q10 - veor q6, q6, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q7 - veor q11, q11, q2 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q2, q2, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - vldmia r4, {q8} @ last round key - veor q6, q6, q8 - veor q4, q4, q8 - veor q2, q2, q8 - veor q7, q7, q8 - veor q3, q3, q8 - veor q5, q5, q8 - veor q0, q0, q8 - veor q1, q1, q8 - bx lr - - - -.align 6 -_bsaes_const: -LM0ISR:@ InvShiftRows constants -.quad 0x0a0e0206070b0f03, 0x0004080c0d010509 -LISR: -.quad 0x0504070602010003, 0x0f0e0d0c080b0a09 -LISRM0: -.quad 0x01040b0e0205080f, 0x0306090c00070a0d -LM0SR:@ ShiftRows constants -.quad 0x0a0e02060f03070b, 0x0004080c05090d01 -LSR: -.quad 0x0504070600030201, 0x0f0e0d0c0a09080b -LSRM0: -.quad 0x0304090e00050a0f, 0x01060b0c0207080d -LM0: -.quad 0x02060a0e03070b0f, 0x0004080c0105090d -LREVM0SR: -.quad 0x090d01050c000408, 0x03070b0f060a0e02 -.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 6 - - -#ifdef __thumb2__ -.thumb_func _bsaes_encrypt8 -#endif -.align 4 -_bsaes_encrypt8: - adr r6,. - vldmia r4!, {q9} @ round 0 key -#if defined(__thumb2__) || defined(__APPLE__) - adr r6,LM0SR -#else - sub r6,r6,#_bsaes_encrypt8-LM0SR -#endif - - vldmia r6!, {q8} @ LM0SR -_bsaes_encrypt8_alt: - veor q10, q0, q9 @ xor with round0 key - veor q11, q1, q9 - vtbl.8 d0, {q10}, d16 - vtbl.8 d1, {q10}, d17 - veor q12, q2, q9 - vtbl.8 d2, {q11}, d16 - vtbl.8 d3, {q11}, d17 - veor q13, q3, q9 - vtbl.8 d4, {q12}, d16 - vtbl.8 d5, {q12}, d17 - veor q14, q4, q9 - vtbl.8 d6, {q13}, d16 - vtbl.8 d7, {q13}, d17 - veor q15, q5, q9 - vtbl.8 d8, {q14}, d16 - vtbl.8 d9, {q14}, d17 - veor q10, q6, q9 - vtbl.8 d10, {q15}, d16 - vtbl.8 d11, {q15}, d17 - veor q11, q7, q9 - vtbl.8 d12, {q10}, d16 - vtbl.8 d13, {q10}, d17 - vtbl.8 d14, {q11}, d16 - vtbl.8 d15, {q11}, d17 -_bsaes_encrypt8_bitslice: - vmov.i8 q8,#0x55 @ compose LBS0 - vmov.i8 q9,#0x33 @ compose LBS1 - vshr.u64 q10, q6, #1 - vshr.u64 q11, q4, #1 - veor q10, q10, q7 - veor q11, q11, q5 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #1 - veor q5, q5, q11 - vshl.u64 q11, q11, #1 - veor q6, q6, q10 - veor q4, q4, q11 - vshr.u64 q10, q2, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q3 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q3, q3, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q2, q2, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose LBS2 - vshr.u64 q10, q5, #2 - vshr.u64 q11, q4, #2 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q9 - vand q11, q11, q9 - veor q7, q7, q10 - vshl.u64 q10, q10, #2 - veor q6, q6, q11 - vshl.u64 q11, q11, #2 - veor q5, q5, q10 - veor q4, q4, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q3 - veor q11, q11, q2 - vand q10, q10, q9 - vand q11, q11, q9 - veor q3, q3, q10 - vshl.u64 q10, q10, #2 - veor q2, q2, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q3, #4 - vshr.u64 q11, q2, #4 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q6, q6, q11 - vshl.u64 q11, q11, #4 - veor q3, q3, q10 - veor q2, q2, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q5 - veor q11, q11, q4 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q4, q4, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - sub r5,r5,#1 - b Lenc_sbox -.align 4 -Lenc_loop: - vldmia r4!, {q8,q9,q10,q11} - veor q8, q8, q0 - veor q9, q9, q1 - vtbl.8 d0, {q8}, d24 - vtbl.8 d1, {q8}, d25 - vldmia r4!, {q8} - veor q10, q10, q2 - vtbl.8 d2, {q9}, d24 - vtbl.8 d3, {q9}, d25 - vldmia r4!, {q9} - veor q11, q11, q3 - vtbl.8 d4, {q10}, d24 - vtbl.8 d5, {q10}, d25 - vldmia r4!, {q10} - vtbl.8 d6, {q11}, d24 - vtbl.8 d7, {q11}, d25 - vldmia r4!, {q11} - veor q8, q8, q4 - veor q9, q9, q5 - vtbl.8 d8, {q8}, d24 - vtbl.8 d9, {q8}, d25 - veor q10, q10, q6 - vtbl.8 d10, {q9}, d24 - vtbl.8 d11, {q9}, d25 - veor q11, q11, q7 - vtbl.8 d12, {q10}, d24 - vtbl.8 d13, {q10}, d25 - vtbl.8 d14, {q11}, d24 - vtbl.8 d15, {q11}, d25 -Lenc_sbox: - veor q2, q2, q1 - veor q5, q5, q6 - veor q3, q3, q0 - veor q6, q6, q2 - veor q5, q5, q0 - - veor q6, q6, q3 - veor q3, q3, q7 - veor q7, q7, q5 - veor q3, q3, q4 - veor q4, q4, q5 - - veor q2, q2, q7 - veor q3, q3, q1 - veor q1, q1, q5 - veor q11, q7, q4 - veor q10, q1, q2 - veor q9, q5, q3 - veor q13, q2, q4 - vmov q8, q10 - veor q12, q6, q0 - - vorr q10, q10, q9 - veor q15, q11, q8 - vand q14, q11, q12 - vorr q11, q11, q12 - veor q12, q12, q9 - vand q8, q8, q9 - veor q9, q3, q0 - vand q15, q15, q12 - vand q13, q13, q9 - veor q9, q7, q1 - veor q12, q5, q6 - veor q11, q11, q13 - veor q10, q10, q13 - vand q13, q9, q12 - vorr q9, q9, q12 - veor q11, q11, q15 - veor q8, q8, q13 - veor q10, q10, q14 - veor q9, q9, q15 - veor q8, q8, q14 - vand q12, q2, q3 - veor q9, q9, q14 - vand q13, q4, q0 - vand q14, q1, q5 - vorr q15, q7, q6 - veor q11, q11, q12 - veor q9, q9, q14 - veor q8, q8, q15 - veor q10, q10, q13 - - @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 - - @ new smaller inversion - - vand q14, q11, q9 - vmov q12, q8 - - veor q13, q10, q14 - veor q15, q8, q14 - veor q14, q8, q14 @ q14=q15 - - vbsl q13, q9, q8 - vbsl q15, q11, q10 - veor q11, q11, q10 - - vbsl q12, q13, q14 - vbsl q8, q14, q13 - - vand q14, q12, q15 - veor q9, q9, q8 - - veor q14, q14, q11 - veor q12, q6, q0 - veor q8, q5, q3 - veor q10, q15, q14 - vand q10, q10, q6 - veor q6, q6, q5 - vand q11, q5, q15 - vand q6, q6, q14 - veor q5, q11, q10 - veor q6, q6, q11 - veor q15, q15, q13 - veor q14, q14, q9 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q0 - veor q12, q12, q8 - veor q0, q0, q3 - vand q8, q8, q15 - vand q3, q3, q13 - vand q12, q12, q14 - vand q0, q0, q9 - veor q8, q8, q12 - veor q0, q0, q3 - veor q12, q12, q11 - veor q3, q3, q10 - veor q6, q6, q12 - veor q0, q0, q12 - veor q5, q5, q8 - veor q3, q3, q8 - - veor q12, q7, q4 - veor q8, q1, q2 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q4 - veor q12, q12, q8 - veor q4, q4, q2 - vand q8, q8, q15 - vand q2, q2, q13 - vand q12, q12, q14 - vand q4, q4, q9 - veor q8, q8, q12 - veor q4, q4, q2 - veor q12, q12, q11 - veor q2, q2, q10 - veor q15, q15, q13 - veor q14, q14, q9 - veor q10, q15, q14 - vand q10, q10, q7 - veor q7, q7, q1 - vand q11, q1, q15 - vand q7, q7, q14 - veor q1, q11, q10 - veor q7, q7, q11 - veor q7, q7, q12 - veor q4, q4, q12 - veor q1, q1, q8 - veor q2, q2, q8 - veor q7, q7, q0 - veor q1, q1, q6 - veor q6, q6, q0 - veor q4, q4, q7 - veor q0, q0, q1 - - veor q1, q1, q5 - veor q5, q5, q2 - veor q2, q2, q3 - veor q3, q3, q5 - veor q4, q4, q5 - - veor q6, q6, q3 - subs r5,r5,#1 - bcc Lenc_done - vext.8 q8, q0, q0, #12 @ x0 <<< 32 - vext.8 q9, q1, q1, #12 - veor q0, q0, q8 @ x0 ^ (x0 <<< 32) - vext.8 q10, q4, q4, #12 - veor q1, q1, q9 - vext.8 q11, q6, q6, #12 - veor q4, q4, q10 - vext.8 q12, q3, q3, #12 - veor q6, q6, q11 - vext.8 q13, q7, q7, #12 - veor q3, q3, q12 - vext.8 q14, q2, q2, #12 - veor q7, q7, q13 - vext.8 q15, q5, q5, #12 - veor q2, q2, q14 - - veor q9, q9, q0 - veor q5, q5, q15 - vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) - veor q10, q10, q1 - veor q8, q8, q5 - veor q9, q9, q5 - vext.8 q1, q1, q1, #8 - veor q13, q13, q3 - veor q0, q0, q8 - veor q14, q14, q7 - veor q1, q1, q9 - vext.8 q8, q3, q3, #8 - veor q12, q12, q6 - vext.8 q9, q7, q7, #8 - veor q15, q15, q2 - vext.8 q3, q6, q6, #8 - veor q11, q11, q4 - vext.8 q7, q5, q5, #8 - veor q12, q12, q5 - vext.8 q6, q2, q2, #8 - veor q11, q11, q5 - vext.8 q2, q4, q4, #8 - veor q5, q9, q13 - veor q4, q8, q12 - veor q3, q3, q11 - veor q7, q7, q15 - veor q6, q6, q14 - @ vmov q4, q8 - veor q2, q2, q10 - @ vmov q5, q9 - vldmia r6, {q12} @ LSR - ite eq @ Thumb2 thing, samity check in ARM - addeq r6,r6,#0x10 - bne Lenc_loop - vldmia r6, {q12} @ LSRM0 - b Lenc_loop -.align 4 -Lenc_done: - vmov.i8 q8,#0x55 @ compose LBS0 - vmov.i8 q9,#0x33 @ compose LBS1 - vshr.u64 q10, q2, #1 - vshr.u64 q11, q3, #1 - veor q10, q10, q5 - veor q11, q11, q7 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #1 - veor q7, q7, q11 - vshl.u64 q11, q11, #1 - veor q2, q2, q10 - veor q3, q3, q11 - vshr.u64 q10, q4, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q6 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q6, q6, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q4, q4, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose LBS2 - vshr.u64 q10, q7, #2 - vshr.u64 q11, q3, #2 - veor q10, q10, q5 - veor q11, q11, q2 - vand q10, q10, q9 - vand q11, q11, q9 - veor q5, q5, q10 - vshl.u64 q10, q10, #2 - veor q2, q2, q11 - vshl.u64 q11, q11, #2 - veor q7, q7, q10 - veor q3, q3, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q6 - veor q11, q11, q4 - vand q10, q10, q9 - vand q11, q11, q9 - veor q6, q6, q10 - vshl.u64 q10, q10, #2 - veor q4, q4, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q6, #4 - vshr.u64 q11, q4, #4 - veor q10, q10, q5 - veor q11, q11, q2 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q2, q2, q11 - vshl.u64 q11, q11, #4 - veor q6, q6, q10 - veor q4, q4, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q7 - veor q11, q11, q3 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q3, q3, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - vldmia r4, {q8} @ last round key - veor q4, q4, q8 - veor q6, q6, q8 - veor q3, q3, q8 - veor q7, q7, q8 - veor q2, q2, q8 - veor q5, q5, q8 - veor q0, q0, q8 - veor q1, q1, q8 - bx lr - -#ifdef __thumb2__ -.thumb_func _bsaes_key_convert -#endif -.align 4 -_bsaes_key_convert: - adr r6,. - vld1.8 {q7}, [r4]! @ load round 0 key -#if defined(__thumb2__) || defined(__APPLE__) - adr r6,LM0 -#else - sub r6,r6,#_bsaes_key_convert-LM0 -#endif - vld1.8 {q15}, [r4]! @ load round 1 key - - vmov.i8 q8, #0x01 @ bit masks - vmov.i8 q9, #0x02 - vmov.i8 q10, #0x04 - vmov.i8 q11, #0x08 - vmov.i8 q12, #0x10 - vmov.i8 q13, #0x20 - vldmia r6, {q14} @ LM0 - -#ifdef __ARMEL__ - vrev32.8 q7, q7 - vrev32.8 q15, q15 -#endif - sub r5,r5,#1 - vstmia r12!, {q7} @ save round 0 key - b Lkey_loop - -.align 4 -Lkey_loop: - vtbl.8 d14,{q15},d28 - vtbl.8 d15,{q15},d29 - vmov.i8 q6, #0x40 - vmov.i8 q15, #0x80 - - vtst.8 q0, q7, q8 - vtst.8 q1, q7, q9 - vtst.8 q2, q7, q10 - vtst.8 q3, q7, q11 - vtst.8 q4, q7, q12 - vtst.8 q5, q7, q13 - vtst.8 q6, q7, q6 - vtst.8 q7, q7, q15 - vld1.8 {q15}, [r4]! @ load next round key - vmvn q0, q0 @ "pnot" - vmvn q1, q1 - vmvn q5, q5 - vmvn q6, q6 -#ifdef __ARMEL__ - vrev32.8 q15, q15 -#endif - subs r5,r5,#1 - vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key - bne Lkey_loop - - vmov.i8 q7,#0x63 @ compose L63 - @ don't save last round key - bx lr - -.globl _bsaes_cbc_encrypt -.private_extern _bsaes_cbc_encrypt -#ifdef __thumb2__ -.thumb_func _bsaes_cbc_encrypt -#endif -.align 5 -_bsaes_cbc_encrypt: - @ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for - @ short inputs. We patch this out, using bsaes for all input sizes. - - @ it is up to the caller to make sure we are called with enc == 0 - - mov ip, sp - stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} - VFP_ABI_PUSH - ldr r8, [ip] @ IV is 1st arg on the stack - mov r2, r2, lsr#4 @ len in 16 byte blocks - sub sp, #0x10 @ scratch space to carry over the IV - mov r9, sp @ save sp - - ldr r10, [r3, #240] @ get # of rounds -#ifndef BSAES_ASM_EXTENDED_KEY - @ allocate the key schedule on the stack - sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key - add r12, #96 @ sifze of bit-slices key schedule - - @ populate the key schedule - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - mov sp, r12 @ sp is sp - bl _bsaes_key_convert - vldmia sp, {q6} - vstmia r12, {q15} @ save last round key - veor q7, q7, q6 @ fix up round 0 key - vstmia sp, {q7} -#else - ldr r12, [r3, #244] - eors r12, #1 - beq 0f - - @ populate the key schedule - str r12, [r3, #244] - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - add r12, r3, #248 @ pass key schedule - bl _bsaes_key_convert - add r4, r3, #248 - vldmia r4, {q6} - vstmia r12, {q15} @ save last round key - veor q7, q7, q6 @ fix up round 0 key - vstmia r4, {q7} - -.align 2 - -#endif - - vld1.8 {q15}, [r8] @ load IV - b Lcbc_dec_loop - -.align 4 -Lcbc_dec_loop: - subs r2, r2, #0x8 - bmi Lcbc_dec_loop_finish - - vld1.8 {q0,q1}, [r0]! @ load input - vld1.8 {q2,q3}, [r0]! -#ifndef BSAES_ASM_EXTENDED_KEY - mov r4, sp @ pass the key -#else - add r4, r3, #248 -#endif - vld1.8 {q4,q5}, [r0]! - mov r5, r10 - vld1.8 {q6,q7}, [r0] - sub r0, r0, #0x60 - vstmia r9, {q15} @ put aside IV - - bl _bsaes_decrypt8 - - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q12,q13}, [r0]! - veor q4, q4, q10 - veor q2, q2, q11 - vld1.8 {q14,q15}, [r0]! - veor q7, q7, q12 - vst1.8 {q0,q1}, [r1]! @ write output - veor q3, q3, q13 - vst1.8 {q6}, [r1]! - veor q5, q5, q14 - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - vst1.8 {q7}, [r1]! - vst1.8 {q3}, [r1]! - vst1.8 {q5}, [r1]! - - b Lcbc_dec_loop - -Lcbc_dec_loop_finish: - adds r2, r2, #8 - beq Lcbc_dec_done - - @ Set up most parameters for the _bsaes_decrypt8 call. -#ifndef BSAES_ASM_EXTENDED_KEY - mov r4, sp @ pass the key -#else - add r4, r3, #248 -#endif - mov r5, r10 - vstmia r9, {q15} @ put aside IV - - vld1.8 {q0}, [r0]! @ load input - cmp r2, #2 - blo Lcbc_dec_one - vld1.8 {q1}, [r0]! - beq Lcbc_dec_two - vld1.8 {q2}, [r0]! - cmp r2, #4 - blo Lcbc_dec_three - vld1.8 {q3}, [r0]! - beq Lcbc_dec_four - vld1.8 {q4}, [r0]! - cmp r2, #6 - blo Lcbc_dec_five - vld1.8 {q5}, [r0]! - beq Lcbc_dec_six - vld1.8 {q6}, [r0]! - sub r0, r0, #0x70 - - bl _bsaes_decrypt8 - - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q12,q13}, [r0]! - veor q4, q4, q10 - veor q2, q2, q11 - vld1.8 {q15}, [r0]! - veor q7, q7, q12 - vst1.8 {q0,q1}, [r1]! @ write output - veor q3, q3, q13 - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - vst1.8 {q7}, [r1]! - vst1.8 {q3}, [r1]! - b Lcbc_dec_done -.align 4 -Lcbc_dec_six: - sub r0, r0, #0x60 - bl _bsaes_decrypt8 - vldmia r9,{q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q12}, [r0]! - veor q4, q4, q10 - veor q2, q2, q11 - vld1.8 {q15}, [r0]! - veor q7, q7, q12 - vst1.8 {q0,q1}, [r1]! @ write output - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - vst1.8 {q7}, [r1]! - b Lcbc_dec_done -.align 4 -Lcbc_dec_five: - sub r0, r0, #0x50 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q15}, [r0]! - veor q4, q4, q10 - vst1.8 {q0,q1}, [r1]! @ write output - veor q2, q2, q11 - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - b Lcbc_dec_done -.align 4 -Lcbc_dec_four: - sub r0, r0, #0x40 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q15}, [r0]! - veor q4, q4, q10 - vst1.8 {q0,q1}, [r1]! @ write output - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - b Lcbc_dec_done -.align 4 -Lcbc_dec_three: - sub r0, r0, #0x30 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q15}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vst1.8 {q0,q1}, [r1]! @ write output - vst1.8 {q6}, [r1]! - b Lcbc_dec_done -.align 4 -Lcbc_dec_two: - sub r0, r0, #0x20 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q15}, [r0]! @ reload input - veor q1, q1, q8 - vst1.8 {q0,q1}, [r1]! @ write output - b Lcbc_dec_done -.align 4 -Lcbc_dec_one: - sub r0, r0, #0x10 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q15}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vst1.8 {q0}, [r1]! @ write output - -Lcbc_dec_done: -#ifndef BSAES_ASM_EXTENDED_KEY - vmov.i32 q0, #0 - vmov.i32 q1, #0 -Lcbc_dec_bzero:@ wipe key schedule [if any] - vstmia sp!, {q0,q1} - cmp sp, r9 - bne Lcbc_dec_bzero -#endif - - mov sp, r9 - add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb - vst1.8 {q15}, [r8] @ return IV - VFP_ABI_POP - ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} - -.globl _bsaes_ctr32_encrypt_blocks -.private_extern _bsaes_ctr32_encrypt_blocks -#ifdef __thumb2__ -.thumb_func _bsaes_ctr32_encrypt_blocks -#endif -.align 5 -_bsaes_ctr32_encrypt_blocks: - @ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this - @ out to retain a constant-time implementation. - mov ip, sp - stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} - VFP_ABI_PUSH - ldr r8, [ip] @ ctr is 1st arg on the stack - sub sp, sp, #0x10 @ scratch space to carry over the ctr - mov r9, sp @ save sp - - ldr r10, [r3, #240] @ get # of rounds -#ifndef BSAES_ASM_EXTENDED_KEY - @ allocate the key schedule on the stack - sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key - add r12, #96 @ size of bit-sliced key schedule - - @ populate the key schedule - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - mov sp, r12 @ sp is sp - bl _bsaes_key_convert - veor q7,q7,q15 @ fix up last round key - vstmia r12, {q7} @ save last round key - - vld1.8 {q0}, [r8] @ load counter -#ifdef __APPLE__ - mov r8, #:lower16:(LREVM0SR-LM0) - add r8, r6, r8 -#else - add r8, r6, #LREVM0SR-LM0 @ borrow r8 -#endif - vldmia sp, {q4} @ load round0 key -#else - ldr r12, [r3, #244] - eors r12, #1 - beq 0f - - @ populate the key schedule - str r12, [r3, #244] - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - add r12, r3, #248 @ pass key schedule - bl _bsaes_key_convert - veor q7,q7,q15 @ fix up last round key - vstmia r12, {q7} @ save last round key - -.align 2 - add r12, r3, #248 - vld1.8 {q0}, [r8] @ load counter - adrl r8, LREVM0SR @ borrow r8 - vldmia r12, {q4} @ load round0 key - sub sp, #0x10 @ place for adjusted round0 key -#endif - - vmov.i32 q8,#1 @ compose 1<<96 - veor q9,q9,q9 - vrev32.8 q0,q0 - vext.8 q8,q9,q8,#4 - vrev32.8 q4,q4 - vadd.u32 q9,q8,q8 @ compose 2<<96 - vstmia sp, {q4} @ save adjusted round0 key - b Lctr_enc_loop - -.align 4 -Lctr_enc_loop: - vadd.u32 q10, q8, q9 @ compose 3<<96 - vadd.u32 q1, q0, q8 @ +1 - vadd.u32 q2, q0, q9 @ +2 - vadd.u32 q3, q0, q10 @ +3 - vadd.u32 q4, q1, q10 - vadd.u32 q5, q2, q10 - vadd.u32 q6, q3, q10 - vadd.u32 q7, q4, q10 - vadd.u32 q10, q5, q10 @ next counter - - @ Borrow prologue from _bsaes_encrypt8 to use the opportunity - @ to flip byte order in 32-bit counter - - vldmia sp, {q9} @ load round0 key -#ifndef BSAES_ASM_EXTENDED_KEY - add r4, sp, #0x10 @ pass next round key -#else - add r4, r3, #264 -#endif - vldmia r8, {q8} @ LREVM0SR - mov r5, r10 @ pass rounds - vstmia r9, {q10} @ save next counter -#ifdef __APPLE__ - mov r6, #:lower16:(LREVM0SR-LSR) - sub r6, r8, r6 -#else - sub r6, r8, #LREVM0SR-LSR @ pass constants -#endif - - bl _bsaes_encrypt8_alt - - subs r2, r2, #8 - blo Lctr_enc_loop_done - - vld1.8 {q8,q9}, [r0]! @ load input - vld1.8 {q10,q11}, [r0]! - veor q0, q8 - veor q1, q9 - vld1.8 {q12,q13}, [r0]! - veor q4, q10 - veor q6, q11 - vld1.8 {q14,q15}, [r0]! - veor q3, q12 - vst1.8 {q0,q1}, [r1]! @ write output - veor q7, q13 - veor q2, q14 - vst1.8 {q4}, [r1]! - veor q5, q15 - vst1.8 {q6}, [r1]! - vmov.i32 q8, #1 @ compose 1<<96 - vst1.8 {q3}, [r1]! - veor q9, q9, q9 - vst1.8 {q7}, [r1]! - vext.8 q8, q9, q8, #4 - vst1.8 {q2}, [r1]! - vadd.u32 q9,q8,q8 @ compose 2<<96 - vst1.8 {q5}, [r1]! - vldmia r9, {q0} @ load counter - - bne Lctr_enc_loop - b Lctr_enc_done - -.align 4 -Lctr_enc_loop_done: - add r2, r2, #8 - vld1.8 {q8}, [r0]! @ load input - veor q0, q8 - vst1.8 {q0}, [r1]! @ write output - cmp r2, #2 - blo Lctr_enc_done - vld1.8 {q9}, [r0]! - veor q1, q9 - vst1.8 {q1}, [r1]! - beq Lctr_enc_done - vld1.8 {q10}, [r0]! - veor q4, q10 - vst1.8 {q4}, [r1]! - cmp r2, #4 - blo Lctr_enc_done - vld1.8 {q11}, [r0]! - veor q6, q11 - vst1.8 {q6}, [r1]! - beq Lctr_enc_done - vld1.8 {q12}, [r0]! - veor q3, q12 - vst1.8 {q3}, [r1]! - cmp r2, #6 - blo Lctr_enc_done - vld1.8 {q13}, [r0]! - veor q7, q13 - vst1.8 {q7}, [r1]! - beq Lctr_enc_done - vld1.8 {q14}, [r0] - veor q2, q14 - vst1.8 {q2}, [r1]! - -Lctr_enc_done: - vmov.i32 q0, #0 - vmov.i32 q1, #0 -#ifndef BSAES_ASM_EXTENDED_KEY -Lctr_enc_bzero:@ wipe key schedule [if any] - vstmia sp!, {q0,q1} - cmp sp, r9 - bne Lctr_enc_bzero -#else - vstmia sp, {q0,q1} -#endif - - mov sp, r9 - add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb - VFP_ABI_POP - ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return - - @ OpenSSL contains aes_nohw_* fallback code here. We patch this - @ out to retain a constant-time implementation. - -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/ghash-armv4.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/ghash-armv4.S deleted file mode 100644 index fccd57d30e..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/ghash-armv4.S +++ /dev/null @@ -1,600 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL -@ instructions are in aesv8-armx.pl.) - - -.text -#if defined(__thumb2__) || defined(__clang__) -.syntax unified -#define ldrplb ldrbpl -#define ldrneb ldrbne -#endif -#if defined(__thumb2__) -.thumb -#else -.code 32 -#endif - - -.align 5 -rem_4bit: -.short 0x0000,0x1C20,0x3840,0x2460 -.short 0x7080,0x6CA0,0x48C0,0x54E0 -.short 0xE100,0xFD20,0xD940,0xC560 -.short 0x9180,0x8DA0,0xA9C0,0xB5E0 - - -#ifdef __thumb2__ -.thumb_func rem_4bit_get -#endif -rem_4bit_get: -#if defined(__thumb2__) - adr r2,rem_4bit -#else - sub r2,pc,#8+32 @ &rem_4bit -#endif - b Lrem_4bit_got - nop - nop - - -.globl _gcm_ghash_4bit -.private_extern _gcm_ghash_4bit -#ifdef __thumb2__ -.thumb_func _gcm_ghash_4bit -#endif -.align 4 -_gcm_ghash_4bit: -#if defined(__thumb2__) - adr r12,rem_4bit -#else - sub r12,pc,#8+48 @ &rem_4bit -#endif - add r3,r2,r3 @ r3 to point at the end - stmdb sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ save r3/end too - - ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ... - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ ... to stack - - ldrb r12,[r2,#15] - ldrb r14,[r0,#15] -Louter: - eor r12,r12,r14 - and r14,r12,#0xf0 - and r12,r12,#0x0f - mov r3,#14 - - add r7,r1,r12,lsl#4 - ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo] - add r11,r1,r14 - ldrb r12,[r2,#14] - - and r14,r4,#0xf @ rem - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - add r14,r14,r14 - eor r4,r8,r4,lsr#4 - ldrh r8,[sp,r14] @ rem_4bit[rem] - eor r4,r4,r5,lsl#28 - ldrb r14,[r0,#14] - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - eor r12,r12,r14 - and r14,r12,#0xf0 - and r12,r12,#0x0f - eor r7,r7,r8,lsl#16 - -Linner: - add r11,r1,r12,lsl#4 - and r12,r4,#0xf @ rem - subs r3,r3,#1 - add r12,r12,r12 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo] - eor r4,r8,r4,lsr#4 - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - ldrh r8,[sp,r12] @ rem_4bit[rem] - eor r6,r10,r6,lsr#4 -#ifdef __thumb2__ - it pl -#endif - ldrplb r12,[r2,r3] - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - - add r11,r1,r14 - and r14,r4,#0xf @ rem - eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] - add r14,r14,r14 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - eor r4,r8,r4,lsr#4 -#ifdef __thumb2__ - it pl -#endif - ldrplb r8,[r0,r3] - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - ldrh r9,[sp,r14] - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 -#ifdef __thumb2__ - it pl -#endif - eorpl r12,r12,r8 - eor r7,r11,r7,lsr#4 -#ifdef __thumb2__ - itt pl -#endif - andpl r14,r12,#0xf0 - andpl r12,r12,#0x0f - eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem] - bpl Linner - - ldr r3,[sp,#32] @ re-load r3/end - add r2,r2,#16 - mov r14,r4 -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r4,r4 - str r4,[r0,#12] -#elif defined(__ARMEB__) - str r4,[r0,#12] -#else - mov r9,r4,lsr#8 - strb r4,[r0,#12+3] - mov r10,r4,lsr#16 - strb r9,[r0,#12+2] - mov r11,r4,lsr#24 - strb r10,[r0,#12+1] - strb r11,[r0,#12] -#endif - cmp r2,r3 -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r5,r5 - str r5,[r0,#8] -#elif defined(__ARMEB__) - str r5,[r0,#8] -#else - mov r9,r5,lsr#8 - strb r5,[r0,#8+3] - mov r10,r5,lsr#16 - strb r9,[r0,#8+2] - mov r11,r5,lsr#24 - strb r10,[r0,#8+1] - strb r11,[r0,#8] -#endif - -#ifdef __thumb2__ - it ne -#endif - ldrneb r12,[r2,#15] -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r6,r6 - str r6,[r0,#4] -#elif defined(__ARMEB__) - str r6,[r0,#4] -#else - mov r9,r6,lsr#8 - strb r6,[r0,#4+3] - mov r10,r6,lsr#16 - strb r9,[r0,#4+2] - mov r11,r6,lsr#24 - strb r10,[r0,#4+1] - strb r11,[r0,#4] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r7,r7 - str r7,[r0,#0] -#elif defined(__ARMEB__) - str r7,[r0,#0] -#else - mov r9,r7,lsr#8 - strb r7,[r0,#0+3] - mov r10,r7,lsr#16 - strb r9,[r0,#0+2] - mov r11,r7,lsr#24 - strb r10,[r0,#0+1] - strb r11,[r0,#0] -#endif - - bne Louter - - add sp,sp,#36 -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - - -.globl _gcm_gmult_4bit -.private_extern _gcm_gmult_4bit -#ifdef __thumb2__ -.thumb_func _gcm_gmult_4bit -#endif -_gcm_gmult_4bit: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - ldrb r12,[r0,#15] - b rem_4bit_get -Lrem_4bit_got: - and r14,r12,#0xf0 - and r12,r12,#0x0f - mov r3,#14 - - add r7,r1,r12,lsl#4 - ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo] - ldrb r12,[r0,#14] - - add r11,r1,r14 - and r14,r4,#0xf @ rem - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - add r14,r14,r14 - eor r4,r8,r4,lsr#4 - ldrh r8,[r2,r14] @ rem_4bit[rem] - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - and r14,r12,#0xf0 - eor r7,r7,r8,lsl#16 - and r12,r12,#0x0f - -Loop: - add r11,r1,r12,lsl#4 - and r12,r4,#0xf @ rem - subs r3,r3,#1 - add r12,r12,r12 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo] - eor r4,r8,r4,lsr#4 - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - ldrh r8,[r2,r12] @ rem_4bit[rem] - eor r6,r10,r6,lsr#4 -#ifdef __thumb2__ - it pl -#endif - ldrplb r12,[r0,r3] - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - - add r11,r1,r14 - and r14,r4,#0xf @ rem - eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] - add r14,r14,r14 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - eor r4,r8,r4,lsr#4 - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - ldrh r8,[r2,r14] @ rem_4bit[rem] - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 -#ifdef __thumb2__ - itt pl -#endif - andpl r14,r12,#0xf0 - andpl r12,r12,#0x0f - eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] - bpl Loop -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r4,r4 - str r4,[r0,#12] -#elif defined(__ARMEB__) - str r4,[r0,#12] -#else - mov r9,r4,lsr#8 - strb r4,[r0,#12+3] - mov r10,r4,lsr#16 - strb r9,[r0,#12+2] - mov r11,r4,lsr#24 - strb r10,[r0,#12+1] - strb r11,[r0,#12] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r5,r5 - str r5,[r0,#8] -#elif defined(__ARMEB__) - str r5,[r0,#8] -#else - mov r9,r5,lsr#8 - strb r5,[r0,#8+3] - mov r10,r5,lsr#16 - strb r9,[r0,#8+2] - mov r11,r5,lsr#24 - strb r10,[r0,#8+1] - strb r11,[r0,#8] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r6,r6 - str r6,[r0,#4] -#elif defined(__ARMEB__) - str r6,[r0,#4] -#else - mov r9,r6,lsr#8 - strb r6,[r0,#4+3] - mov r10,r6,lsr#16 - strb r9,[r0,#4+2] - mov r11,r6,lsr#24 - strb r10,[r0,#4+1] - strb r11,[r0,#4] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r7,r7 - str r7,[r0,#0] -#elif defined(__ARMEB__) - str r7,[r0,#0] -#else - mov r9,r7,lsr#8 - strb r7,[r0,#0+3] - mov r10,r7,lsr#16 - strb r9,[r0,#0+2] - mov r11,r7,lsr#24 - strb r10,[r0,#0+1] - strb r11,[r0,#0] -#endif - -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - -#if __ARM_MAX_ARCH__>=7 - - - -.globl _gcm_init_neon -.private_extern _gcm_init_neon -#ifdef __thumb2__ -.thumb_func _gcm_init_neon -#endif -.align 4 -_gcm_init_neon: - vld1.64 d7,[r1]! @ load H - vmov.i8 q8,#0xe1 - vld1.64 d6,[r1] - vshl.i64 d17,#57 - vshr.u64 d16,#63 @ t0=0xc2....01 - vdup.8 q9,d7[7] - vshr.u64 d26,d6,#63 - vshr.s8 q9,#7 @ broadcast carry bit - vshl.i64 q3,q3,#1 - vand q8,q8,q9 - vorr d7,d26 @ H<<<=1 - veor q3,q3,q8 @ twisted H - vstmia r0,{q3} - - bx lr @ bx lr - - -.globl _gcm_gmult_neon -.private_extern _gcm_gmult_neon -#ifdef __thumb2__ -.thumb_func _gcm_gmult_neon -#endif -.align 4 -_gcm_gmult_neon: - vld1.64 d7,[r0]! @ load Xi - vld1.64 d6,[r0]! - vmov.i64 d29,#0x0000ffffffffffff - vldmia r1,{d26,d27} @ load twisted H - vmov.i64 d30,#0x00000000ffffffff -#ifdef __ARMEL__ - vrev64.8 q3,q3 -#endif - vmov.i64 d31,#0x000000000000ffff - veor d28,d26,d27 @ Karatsuba pre-processing - mov r3,#16 - b Lgmult_neon - - -.globl _gcm_ghash_neon -.private_extern _gcm_ghash_neon -#ifdef __thumb2__ -.thumb_func _gcm_ghash_neon -#endif -.align 4 -_gcm_ghash_neon: - vld1.64 d1,[r0]! @ load Xi - vld1.64 d0,[r0]! - vmov.i64 d29,#0x0000ffffffffffff - vldmia r1,{d26,d27} @ load twisted H - vmov.i64 d30,#0x00000000ffffffff -#ifdef __ARMEL__ - vrev64.8 q0,q0 -#endif - vmov.i64 d31,#0x000000000000ffff - veor d28,d26,d27 @ Karatsuba pre-processing - -Loop_neon: - vld1.64 d7,[r2]! @ load inp - vld1.64 d6,[r2]! -#ifdef __ARMEL__ - vrev64.8 q3,q3 -#endif - veor q3,q0 @ inp^=Xi -Lgmult_neon: - vext.8 d16, d26, d26, #1 @ A1 - vmull.p8 q8, d16, d6 @ F = A1*B - vext.8 d0, d6, d6, #1 @ B1 - vmull.p8 q0, d26, d0 @ E = A*B1 - vext.8 d18, d26, d26, #2 @ A2 - vmull.p8 q9, d18, d6 @ H = A2*B - vext.8 d22, d6, d6, #2 @ B2 - vmull.p8 q11, d26, d22 @ G = A*B2 - vext.8 d20, d26, d26, #3 @ A3 - veor q8, q8, q0 @ L = E + F - vmull.p8 q10, d20, d6 @ J = A3*B - vext.8 d0, d6, d6, #3 @ B3 - veor q9, q9, q11 @ M = G + H - vmull.p8 q0, d26, d0 @ I = A*B3 - veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 - vand d17, d17, d29 - vext.8 d22, d6, d6, #4 @ B4 - veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 - vand d19, d19, d30 - vmull.p8 q11, d26, d22 @ K = A*B4 - veor q10, q10, q0 @ N = I + J - veor d16, d16, d17 - veor d18, d18, d19 - veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 - vand d21, d21, d31 - vext.8 q8, q8, q8, #15 - veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 - vmov.i64 d23, #0 - vext.8 q9, q9, q9, #14 - veor d20, d20, d21 - vmull.p8 q0, d26, d6 @ D = A*B - vext.8 q11, q11, q11, #12 - vext.8 q10, q10, q10, #13 - veor q8, q8, q9 - veor q10, q10, q11 - veor q0, q0, q8 - veor q0, q0, q10 - veor d6,d6,d7 @ Karatsuba pre-processing - vext.8 d16, d28, d28, #1 @ A1 - vmull.p8 q8, d16, d6 @ F = A1*B - vext.8 d2, d6, d6, #1 @ B1 - vmull.p8 q1, d28, d2 @ E = A*B1 - vext.8 d18, d28, d28, #2 @ A2 - vmull.p8 q9, d18, d6 @ H = A2*B - vext.8 d22, d6, d6, #2 @ B2 - vmull.p8 q11, d28, d22 @ G = A*B2 - vext.8 d20, d28, d28, #3 @ A3 - veor q8, q8, q1 @ L = E + F - vmull.p8 q10, d20, d6 @ J = A3*B - vext.8 d2, d6, d6, #3 @ B3 - veor q9, q9, q11 @ M = G + H - vmull.p8 q1, d28, d2 @ I = A*B3 - veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 - vand d17, d17, d29 - vext.8 d22, d6, d6, #4 @ B4 - veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 - vand d19, d19, d30 - vmull.p8 q11, d28, d22 @ K = A*B4 - veor q10, q10, q1 @ N = I + J - veor d16, d16, d17 - veor d18, d18, d19 - veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 - vand d21, d21, d31 - vext.8 q8, q8, q8, #15 - veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 - vmov.i64 d23, #0 - vext.8 q9, q9, q9, #14 - veor d20, d20, d21 - vmull.p8 q1, d28, d6 @ D = A*B - vext.8 q11, q11, q11, #12 - vext.8 q10, q10, q10, #13 - veor q8, q8, q9 - veor q10, q10, q11 - veor q1, q1, q8 - veor q1, q1, q10 - vext.8 d16, d27, d27, #1 @ A1 - vmull.p8 q8, d16, d7 @ F = A1*B - vext.8 d4, d7, d7, #1 @ B1 - vmull.p8 q2, d27, d4 @ E = A*B1 - vext.8 d18, d27, d27, #2 @ A2 - vmull.p8 q9, d18, d7 @ H = A2*B - vext.8 d22, d7, d7, #2 @ B2 - vmull.p8 q11, d27, d22 @ G = A*B2 - vext.8 d20, d27, d27, #3 @ A3 - veor q8, q8, q2 @ L = E + F - vmull.p8 q10, d20, d7 @ J = A3*B - vext.8 d4, d7, d7, #3 @ B3 - veor q9, q9, q11 @ M = G + H - vmull.p8 q2, d27, d4 @ I = A*B3 - veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 - vand d17, d17, d29 - vext.8 d22, d7, d7, #4 @ B4 - veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 - vand d19, d19, d30 - vmull.p8 q11, d27, d22 @ K = A*B4 - veor q10, q10, q2 @ N = I + J - veor d16, d16, d17 - veor d18, d18, d19 - veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 - vand d21, d21, d31 - vext.8 q8, q8, q8, #15 - veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 - vmov.i64 d23, #0 - vext.8 q9, q9, q9, #14 - veor d20, d20, d21 - vmull.p8 q2, d27, d7 @ D = A*B - vext.8 q11, q11, q11, #12 - vext.8 q10, q10, q10, #13 - veor q8, q8, q9 - veor q10, q10, q11 - veor q2, q2, q8 - veor q2, q2, q10 - veor q1,q1,q0 @ Karatsuba post-processing - veor q1,q1,q2 - veor d1,d1,d2 - veor d4,d4,d3 @ Xh|Xl - 256-bit result - - @ equivalent of reduction_avx from ghash-x86_64.pl - vshl.i64 q9,q0,#57 @ 1st phase - vshl.i64 q10,q0,#62 - veor q10,q10,q9 @ - vshl.i64 q9,q0,#63 - veor q10, q10, q9 @ - veor d1,d1,d20 @ - veor d4,d4,d21 - - vshr.u64 q10,q0,#1 @ 2nd phase - veor q2,q2,q0 - veor q0,q0,q10 @ - vshr.u64 q10,q10,#6 - vshr.u64 q0,q0,#1 @ - veor q0,q0,q2 @ - veor q0,q0,q10 @ - - subs r3,#16 - bne Loop_neon - -#ifdef __ARMEL__ - vrev64.8 q0,q0 -#endif - sub r0,#16 - vst1.64 d1,[r0]! @ write out Xi - vst1.64 d0,[r0] - - bx lr @ bx lr - -#endif -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/ghashv8-armx32.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/ghashv8-armx32.S deleted file mode 100644 index f5de67f037..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/ghashv8-armx32.S +++ /dev/null @@ -1,256 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text - -.code 32 -#undef __thumb2__ -.globl _gcm_init_v8 -.private_extern _gcm_init_v8 -#ifdef __thumb2__ -.thumb_func _gcm_init_v8 -#endif -.align 4 -_gcm_init_v8: - vld1.64 {q9},[r1] @ load input H - vmov.i8 q11,#0xe1 - vshl.i64 q11,q11,#57 @ 0xc2.0 - vext.8 q3,q9,q9,#8 - vshr.u64 q10,q11,#63 - vdup.32 q9,d18[1] - vext.8 q8,q10,q11,#8 @ t0=0xc2....01 - vshr.u64 q10,q3,#63 - vshr.s32 q9,q9,#31 @ broadcast carry bit - vand q10,q10,q8 - vshl.i64 q3,q3,#1 - vext.8 q10,q10,q10,#8 - vand q8,q8,q9 - vorr q3,q3,q10 @ H<<<=1 - veor q12,q3,q8 @ twisted H - vst1.64 {q12},[r0]! @ store Htable[0] - - @ calculate H^2 - vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing -.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12 - veor q8,q8,q12 -.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12 -.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8 - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase - - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - veor q0,q1,q10 - - vext.8 q10,q0,q0,#8 @ 2nd phase -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q10,q10,q2 - veor q14,q0,q10 - - vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing - veor q9,q9,q14 - vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed - vst1.64 {q13,q14},[r0] @ store Htable[1..2] - - bx lr - -.globl _gcm_gmult_v8 -.private_extern _gcm_gmult_v8 -#ifdef __thumb2__ -.thumb_func _gcm_gmult_v8 -#endif -.align 4 -_gcm_gmult_v8: - vld1.64 {q9},[r0] @ load Xi - vmov.i8 q11,#0xe1 - vld1.64 {q12,q13},[r1] @ load twisted H, ... - vshl.u64 q11,q11,#57 -#ifndef __ARMEB__ - vrev64.8 q9,q9 -#endif - vext.8 q3,q9,q9,#8 - -.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo - veor q9,q9,q3 @ Karatsuba pre-processing -.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi -.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction - - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - veor q0,q1,q10 - - vext.8 q10,q0,q0,#8 @ 2nd phase of reduction -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q10,q10,q2 - veor q0,q0,q10 - -#ifndef __ARMEB__ - vrev64.8 q0,q0 -#endif - vext.8 q0,q0,q0,#8 - vst1.64 {q0},[r0] @ write out Xi - - bx lr - -.globl _gcm_ghash_v8 -.private_extern _gcm_ghash_v8 -#ifdef __thumb2__ -.thumb_func _gcm_ghash_v8 -#endif -.align 4 -_gcm_ghash_v8: - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so - vld1.64 {q0},[r0] @ load [rotated] Xi - @ "[rotated]" means that - @ loaded value would have - @ to be rotated in order to - @ make it appear as in - @ algorithm specification - subs r3,r3,#32 @ see if r3 is 32 or larger - mov r12,#16 @ r12 is used as post- - @ increment for input pointer; - @ as loop is modulo-scheduled - @ r12 is zeroed just in time - @ to preclude overstepping - @ inp[len], which means that - @ last block[s] are actually - @ loaded twice, but last - @ copy is not processed - vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2 - vmov.i8 q11,#0xe1 - vld1.64 {q14},[r1] - moveq r12,#0 @ is it time to zero r12? - vext.8 q0,q0,q0,#8 @ rotate Xi - vld1.64 {q8},[r2]! @ load [rotated] I[0] - vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant -#ifndef __ARMEB__ - vrev64.8 q8,q8 - vrev64.8 q0,q0 -#endif - vext.8 q3,q8,q8,#8 @ rotate I[0] - blo Lodd_tail_v8 @ r3 was less than 32 - vld1.64 {q9},[r2],r12 @ load [rotated] I[1] -#ifndef __ARMEB__ - vrev64.8 q9,q9 -#endif - vext.8 q7,q9,q9,#8 - veor q3,q3,q0 @ I[i]^=Xi -.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 - veor q9,q9,q7 @ Karatsuba pre-processing -.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 - b Loop_mod2x_v8 - -.align 4 -Loop_mod2x_v8: - vext.8 q10,q3,q3,#8 - subs r3,r3,#32 @ is there more data? -.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo - movlo r12,#0 @ is it time to zero r12? - -.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 - veor q10,q10,q3 @ Karatsuba pre-processing -.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi - veor q0,q0,q4 @ accumulate -.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) - vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] - - veor q2,q2,q6 - moveq r12,#0 @ is it time to zero r12? - veor q1,q1,q5 - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] -#ifndef __ARMEB__ - vrev64.8 q8,q8 -#endif - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction - -#ifndef __ARMEB__ - vrev64.8 q9,q9 -#endif - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - vext.8 q7,q9,q9,#8 - vext.8 q3,q8,q8,#8 - veor q0,q1,q10 -.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 - veor q3,q3,q2 @ accumulate q3 early - - vext.8 q10,q0,q0,#8 @ 2nd phase of reduction -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q3,q3,q10 - veor q9,q9,q7 @ Karatsuba pre-processing - veor q3,q3,q0 -.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 - bhs Loop_mod2x_v8 @ there was at least 32 more bytes - - veor q2,q2,q10 - vext.8 q3,q8,q8,#8 @ re-construct q3 - adds r3,r3,#32 @ re-construct r3 - veor q0,q0,q2 @ re-construct q0 - beq Ldone_v8 @ is r3 zero? -Lodd_tail_v8: - vext.8 q10,q0,q0,#8 - veor q3,q3,q0 @ inp^=Xi - veor q9,q8,q10 @ q9 is rotated inp^Xi - -.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo - veor q9,q9,q3 @ Karatsuba pre-processing -.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi -.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction - - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - veor q0,q1,q10 - - vext.8 q10,q0,q0,#8 @ 2nd phase of reduction -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q10,q10,q2 - veor q0,q0,q10 - -Ldone_v8: -#ifndef __ARMEB__ - vrev64.8 q0,q0 -#endif - vext.8 q0,q0,q0,#8 - vst1.64 {q0},[r0] @ write out Xi - - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so - bx lr - -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha1-armv4-large.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha1-armv4-large.S deleted file mode 100644 index 82ac8df4fc..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha1-armv4-large.S +++ /dev/null @@ -1,1518 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - -.globl _sha1_block_data_order -.private_extern _sha1_block_data_order -#ifdef __thumb2__ -.thumb_func _sha1_block_data_order -#endif - -.align 5 -_sha1_block_data_order: -#if __ARM_MAX_ARCH__>=7 -Lsha1_block: - adr r3,Lsha1_block - ldr r12,LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P -#ifdef __APPLE__ - ldr r12,[r12] -#endif - tst r12,#ARMV8_SHA1 - bne LARMv8 - tst r12,#ARMV7_NEON - bne LNEON -#endif - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 - ldmia r0,{r3,r4,r5,r6,r7} -Lloop: - ldr r8,LK_00_19 - mov r14,sp - sub sp,sp,#15*4 - mov r5,r5,ror#30 - mov r6,r6,ror#30 - mov r7,r7,ror#30 @ [6] -L_00_15: -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r7,r8,r7,ror#2 @ E+=K_00_19 - eor r10,r5,r6 @ F_xx_xx - add r7,r7,r3,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r4,r10,ror#2 - add r7,r7,r9 @ E+=X[i] - eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r7,r7,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r6,r8,r6,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r4,r5 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r6,r8,r6,ror#2 @ E+=K_00_19 - eor r10,r4,r5 @ F_xx_xx - add r6,r6,r7,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r3,r10,ror#2 - add r6,r6,r9 @ E+=X[i] - eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r6,r6,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r5,r8,r5,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r3,r4 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r5,r8,r5,ror#2 @ E+=K_00_19 - eor r10,r3,r4 @ F_xx_xx - add r5,r5,r6,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r7,r10,ror#2 - add r5,r5,r9 @ E+=X[i] - eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r5,r5,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r4,r8,r4,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r7,r3 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r4,r8,r4,ror#2 @ E+=K_00_19 - eor r10,r7,r3 @ F_xx_xx - add r4,r4,r5,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r6,r10,ror#2 - add r4,r4,r9 @ E+=X[i] - eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r4,r4,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r3,r8,r3,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r6,r7 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r3,r8,r3,ror#2 @ E+=K_00_19 - eor r10,r6,r7 @ F_xx_xx - add r3,r3,r4,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r5,r10,ror#2 - add r3,r3,r9 @ E+=X[i] - eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r3,r3,r10 @ E+=F_00_19(B,C,D) -#if defined(__thumb2__) - mov r12,sp - teq r14,r12 -#else - teq r14,sp -#endif - bne L_00_15 @ [((11+4)*5+2)*3] - sub sp,sp,#25*4 -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r7,r8,r7,ror#2 @ E+=K_00_19 - eor r10,r5,r6 @ F_xx_xx - add r7,r7,r3,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r4,r10,ror#2 - add r7,r7,r9 @ E+=X[i] - eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r7,r7,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r3,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) - add r6,r6,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r7,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) - add r5,r5,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r6,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) - add r4,r4,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r5,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) - add r3,r3,r10 @ E+=F_00_19(B,C,D) - - ldr r8,LK_20_39 @ [+15+16*4] - cmn sp,#0 @ [+3], clear carry to denote 20_39 -L_20_39_or_60_79: - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r7,r8,r7,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r5,r6 @ F_xx_xx - mov r9,r9,ror#31 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r4,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r7,r7,r9 @ E+=X[i] - add r7,r7,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r3,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - add r6,r6,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r7,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - add r5,r5,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r6,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - add r4,r4,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r5,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - add r3,r3,r10 @ E+=F_20_39(B,C,D) -#if defined(__thumb2__) - mov r12,sp - teq r14,r12 -#else - teq r14,sp @ preserve carry -#endif - bne L_20_39_or_60_79 @ [+((12+3)*5+2)*4] - bcs L_done @ [+((12+3)*5+2)*4], spare 300 bytes - - ldr r8,LK_40_59 - sub sp,sp,#20*4 @ [+2] -L_40_59: - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r7,r8,r7,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r5,r6 @ F_xx_xx - mov r9,r9,ror#31 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r4,r10,ror#2 @ F_xx_xx - and r11,r5,r6 @ F_xx_xx - add r7,r7,r9 @ E+=X[i] - add r7,r7,r10 @ E+=F_40_59(B,C,D) - add r7,r7,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r3,r10,ror#2 @ F_xx_xx - and r11,r4,r5 @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - add r6,r6,r10 @ E+=F_40_59(B,C,D) - add r6,r6,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r7,r10,ror#2 @ F_xx_xx - and r11,r3,r4 @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - add r5,r5,r10 @ E+=F_40_59(B,C,D) - add r5,r5,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r6,r10,ror#2 @ F_xx_xx - and r11,r7,r3 @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - add r4,r4,r10 @ E+=F_40_59(B,C,D) - add r4,r4,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r5,r10,ror#2 @ F_xx_xx - and r11,r6,r7 @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - add r3,r3,r10 @ E+=F_40_59(B,C,D) - add r3,r3,r11,ror#2 -#if defined(__thumb2__) - mov r12,sp - teq r14,r12 -#else - teq r14,sp -#endif - bne L_40_59 @ [+((12+5)*5+2)*4] - - ldr r8,LK_60_79 - sub sp,sp,#20*4 - cmp sp,#0 @ set carry to denote 60_79 - b L_20_39_or_60_79 @ [+4], spare 300 bytes -L_done: - add sp,sp,#80*4 @ "deallocate" stack frame - ldmia r0,{r8,r9,r10,r11,r12} - add r3,r8,r3 - add r4,r9,r4 - add r5,r10,r5,ror#2 - add r6,r11,r6,ror#2 - add r7,r12,r7,ror#2 - stmia r0,{r3,r4,r5,r6,r7} - teq r1,r2 - bne Lloop @ [+18], total 1307 - -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - - -.align 5 -LK_00_19:.word 0x5a827999 -LK_20_39:.word 0x6ed9eba1 -LK_40_59:.word 0x8f1bbcdc -LK_60_79:.word 0xca62c1d6 -#if __ARM_MAX_ARCH__>=7 -LOPENSSL_armcap: -.word OPENSSL_armcap_P-Lsha1_block -#endif -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 5 -#if __ARM_MAX_ARCH__>=7 - - - -#ifdef __thumb2__ -.thumb_func sha1_block_data_order_neon -#endif -.align 4 -sha1_block_data_order_neon: -LNEON: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 - @ dmb @ errata #451034 on early Cortex A8 - @ vstmdb sp!,{d8-d15} @ ABI specification says so - mov r14,sp - sub r12,sp,#64 - adr r8,LK_00_19 - bic r12,r12,#15 @ align for 128-bit stores - - ldmia r0,{r3,r4,r5,r6,r7} @ load context - mov sp,r12 @ alloca - - vld1.8 {q0,q1},[r1]! @ handles unaligned - veor q15,q15,q15 - vld1.8 {q2,q3},[r1]! - vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19 - vrev32.8 q0,q0 @ yes, even on - vrev32.8 q1,q1 @ big-endian... - vrev32.8 q2,q2 - vadd.i32 q8,q0,q14 - vrev32.8 q3,q3 - vadd.i32 q9,q1,q14 - vst1.32 {q8},[r12,:128]! - vadd.i32 q10,q2,q14 - vst1.32 {q9},[r12,:128]! - vst1.32 {q10},[r12,:128]! - ldr r9,[sp] @ big RAW stall - -Loop_neon: - vext.8 q8,q0,q1,#8 - bic r10,r6,r4 - add r7,r7,r9 - and r11,r5,r4 - vadd.i32 q13,q3,q14 - ldr r9,[sp,#4] - add r7,r7,r3,ror#27 - vext.8 q12,q3,q15,#4 - eor r11,r11,r10 - mov r4,r4,ror#2 - add r7,r7,r11 - veor q8,q8,q0 - bic r10,r5,r3 - add r6,r6,r9 - veor q12,q12,q2 - and r11,r4,r3 - ldr r9,[sp,#8] - veor q12,q12,q8 - add r6,r6,r7,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q13,q15,q12,#4 - bic r10,r4,r7 - add r5,r5,r9 - vadd.i32 q8,q12,q12 - and r11,r3,r7 - ldr r9,[sp,#12] - vsri.32 q8,q12,#31 - add r5,r5,r6,ror#27 - eor r11,r11,r10 - mov r7,r7,ror#2 - vshr.u32 q12,q13,#30 - add r5,r5,r11 - bic r10,r3,r6 - vshl.u32 q13,q13,#2 - add r4,r4,r9 - and r11,r7,r6 - veor q8,q8,q12 - ldr r9,[sp,#16] - add r4,r4,r5,ror#27 - veor q8,q8,q13 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q9,q1,q2,#8 - bic r10,r7,r5 - add r3,r3,r9 - and r11,r6,r5 - vadd.i32 q13,q8,q14 - ldr r9,[sp,#20] - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r4,ror#27 - vext.8 q12,q8,q15,#4 - eor r11,r11,r10 - mov r5,r5,ror#2 - add r3,r3,r11 - veor q9,q9,q1 - bic r10,r6,r4 - add r7,r7,r9 - veor q12,q12,q3 - and r11,r5,r4 - ldr r9,[sp,#24] - veor q12,q12,q9 - add r7,r7,r3,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q13,q15,q12,#4 - bic r10,r5,r3 - add r6,r6,r9 - vadd.i32 q9,q12,q12 - and r11,r4,r3 - ldr r9,[sp,#28] - vsri.32 q9,q12,#31 - add r6,r6,r7,ror#27 - eor r11,r11,r10 - mov r3,r3,ror#2 - vshr.u32 q12,q13,#30 - add r6,r6,r11 - bic r10,r4,r7 - vshl.u32 q13,q13,#2 - add r5,r5,r9 - and r11,r3,r7 - veor q9,q9,q12 - ldr r9,[sp,#32] - add r5,r5,r6,ror#27 - veor q9,q9,q13 - eor r11,r11,r10 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q10,q2,q3,#8 - bic r10,r3,r6 - add r4,r4,r9 - and r11,r7,r6 - vadd.i32 q13,q9,q14 - ldr r9,[sp,#36] - add r4,r4,r5,ror#27 - vext.8 q12,q9,q15,#4 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - veor q10,q10,q2 - bic r10,r7,r5 - add r3,r3,r9 - veor q12,q12,q8 - and r11,r6,r5 - ldr r9,[sp,#40] - veor q12,q12,q10 - add r3,r3,r4,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q13,q15,q12,#4 - bic r10,r6,r4 - add r7,r7,r9 - vadd.i32 q10,q12,q12 - and r11,r5,r4 - ldr r9,[sp,#44] - vsri.32 q10,q12,#31 - add r7,r7,r3,ror#27 - eor r11,r11,r10 - mov r4,r4,ror#2 - vshr.u32 q12,q13,#30 - add r7,r7,r11 - bic r10,r5,r3 - vshl.u32 q13,q13,#2 - add r6,r6,r9 - and r11,r4,r3 - veor q10,q10,q12 - ldr r9,[sp,#48] - add r6,r6,r7,ror#27 - veor q10,q10,q13 - eor r11,r11,r10 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q11,q3,q8,#8 - bic r10,r4,r7 - add r5,r5,r9 - and r11,r3,r7 - vadd.i32 q13,q10,q14 - ldr r9,[sp,#52] - add r5,r5,r6,ror#27 - vext.8 q12,q10,q15,#4 - eor r11,r11,r10 - mov r7,r7,ror#2 - add r5,r5,r11 - veor q11,q11,q3 - bic r10,r3,r6 - add r4,r4,r9 - veor q12,q12,q9 - and r11,r7,r6 - ldr r9,[sp,#56] - veor q12,q12,q11 - add r4,r4,r5,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q13,q15,q12,#4 - bic r10,r7,r5 - add r3,r3,r9 - vadd.i32 q11,q12,q12 - and r11,r6,r5 - ldr r9,[sp,#60] - vsri.32 q11,q12,#31 - add r3,r3,r4,ror#27 - eor r11,r11,r10 - mov r5,r5,ror#2 - vshr.u32 q12,q13,#30 - add r3,r3,r11 - bic r10,r6,r4 - vshl.u32 q13,q13,#2 - add r7,r7,r9 - and r11,r5,r4 - veor q11,q11,q12 - ldr r9,[sp,#0] - add r7,r7,r3,ror#27 - veor q11,q11,q13 - eor r11,r11,r10 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q10,q11,#8 - bic r10,r5,r3 - add r6,r6,r9 - and r11,r4,r3 - veor q0,q0,q8 - ldr r9,[sp,#4] - add r6,r6,r7,ror#27 - veor q0,q0,q1 - eor r11,r11,r10 - mov r3,r3,ror#2 - vadd.i32 q13,q11,q14 - add r6,r6,r11 - bic r10,r4,r7 - veor q12,q12,q0 - add r5,r5,r9 - and r11,r3,r7 - vshr.u32 q0,q12,#30 - ldr r9,[sp,#8] - add r5,r5,r6,ror#27 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - eor r11,r11,r10 - mov r7,r7,ror#2 - vsli.32 q0,q12,#2 - add r5,r5,r11 - bic r10,r3,r6 - add r4,r4,r9 - and r11,r7,r6 - ldr r9,[sp,#12] - add r4,r4,r5,ror#27 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - bic r10,r7,r5 - add r3,r3,r9 - and r11,r6,r5 - ldr r9,[sp,#16] - add r3,r3,r4,ror#27 - eor r11,r11,r10 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q11,q0,#8 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#20] - veor q1,q1,q9 - eor r11,r10,r5 - add r7,r7,r3,ror#27 - veor q1,q1,q2 - mov r4,r4,ror#2 - add r7,r7,r11 - vadd.i32 q13,q0,q14 - eor r10,r3,r5 - add r6,r6,r9 - veor q12,q12,q1 - ldr r9,[sp,#24] - eor r11,r10,r4 - vshr.u32 q1,q12,#30 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - vst1.32 {q13},[r12,:128]! - add r6,r6,r11 - eor r10,r7,r4 - vsli.32 q1,q12,#2 - add r5,r5,r9 - ldr r9,[sp,#28] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#32] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q12,q0,q1,#8 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#36] - veor q2,q2,q10 - eor r11,r10,r6 - add r3,r3,r4,ror#27 - veor q2,q2,q3 - mov r5,r5,ror#2 - add r3,r3,r11 - vadd.i32 q13,q1,q14 - eor r10,r4,r6 - vld1.32 {d28[],d29[]},[r8,:32]! - add r7,r7,r9 - veor q12,q12,q2 - ldr r9,[sp,#40] - eor r11,r10,r5 - vshr.u32 q2,q12,#30 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - vst1.32 {q13},[r12,:128]! - add r7,r7,r11 - eor r10,r3,r5 - vsli.32 q2,q12,#2 - add r6,r6,r9 - ldr r9,[sp,#44] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#48] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q12,q1,q2,#8 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#52] - veor q3,q3,q11 - eor r11,r10,r7 - add r4,r4,r5,ror#27 - veor q3,q3,q8 - mov r6,r6,ror#2 - add r4,r4,r11 - vadd.i32 q13,q2,q14 - eor r10,r5,r7 - add r3,r3,r9 - veor q12,q12,q3 - ldr r9,[sp,#56] - eor r11,r10,r6 - vshr.u32 q3,q12,#30 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - vst1.32 {q13},[r12,:128]! - add r3,r3,r11 - eor r10,r4,r6 - vsli.32 q3,q12,#2 - add r7,r7,r9 - ldr r9,[sp,#60] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#0] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q12,q2,q3,#8 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#4] - veor q8,q8,q0 - eor r11,r10,r3 - add r5,r5,r6,ror#27 - veor q8,q8,q9 - mov r7,r7,ror#2 - add r5,r5,r11 - vadd.i32 q13,q3,q14 - eor r10,r6,r3 - add r4,r4,r9 - veor q12,q12,q8 - ldr r9,[sp,#8] - eor r11,r10,r7 - vshr.u32 q8,q12,#30 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - add r4,r4,r11 - eor r10,r5,r7 - vsli.32 q8,q12,#2 - add r3,r3,r9 - ldr r9,[sp,#12] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#16] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q3,q8,#8 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#20] - veor q9,q9,q1 - eor r11,r10,r4 - add r6,r6,r7,ror#27 - veor q9,q9,q10 - mov r3,r3,ror#2 - add r6,r6,r11 - vadd.i32 q13,q8,q14 - eor r10,r7,r4 - add r5,r5,r9 - veor q12,q12,q9 - ldr r9,[sp,#24] - eor r11,r10,r3 - vshr.u32 q9,q12,#30 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - vst1.32 {q13},[r12,:128]! - add r5,r5,r11 - eor r10,r6,r3 - vsli.32 q9,q12,#2 - add r4,r4,r9 - ldr r9,[sp,#28] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#32] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q8,q9,#8 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#36] - veor q10,q10,q2 - add r7,r7,r3,ror#27 - eor r11,r5,r6 - veor q10,q10,q11 - add r7,r7,r10 - and r11,r11,r4 - vadd.i32 q13,q9,q14 - mov r4,r4,ror#2 - add r7,r7,r11 - veor q12,q12,q10 - add r6,r6,r9 - and r10,r4,r5 - vshr.u32 q10,q12,#30 - ldr r9,[sp,#40] - add r6,r6,r7,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r4,r5 - add r6,r6,r10 - vsli.32 q10,q12,#2 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#44] - add r5,r5,r6,ror#27 - eor r11,r3,r4 - add r5,r5,r10 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#48] - add r4,r4,r5,ror#27 - eor r11,r7,r3 - add r4,r4,r10 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q12,q9,q10,#8 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#52] - veor q11,q11,q3 - add r3,r3,r4,ror#27 - eor r11,r6,r7 - veor q11,q11,q0 - add r3,r3,r10 - and r11,r11,r5 - vadd.i32 q13,q10,q14 - mov r5,r5,ror#2 - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r11 - veor q12,q12,q11 - add r7,r7,r9 - and r10,r5,r6 - vshr.u32 q11,q12,#30 - ldr r9,[sp,#56] - add r7,r7,r3,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r5,r6 - add r7,r7,r10 - vsli.32 q11,q12,#2 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#60] - add r6,r6,r7,ror#27 - eor r11,r4,r5 - add r6,r6,r10 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#0] - add r5,r5,r6,ror#27 - eor r11,r3,r4 - add r5,r5,r10 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q12,q10,q11,#8 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#4] - veor q0,q0,q8 - add r4,r4,r5,ror#27 - eor r11,r7,r3 - veor q0,q0,q1 - add r4,r4,r10 - and r11,r11,r6 - vadd.i32 q13,q11,q14 - mov r6,r6,ror#2 - add r4,r4,r11 - veor q12,q12,q0 - add r3,r3,r9 - and r10,r6,r7 - vshr.u32 q0,q12,#30 - ldr r9,[sp,#8] - add r3,r3,r4,ror#27 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - eor r11,r6,r7 - add r3,r3,r10 - vsli.32 q0,q12,#2 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#12] - add r7,r7,r3,ror#27 - eor r11,r5,r6 - add r7,r7,r10 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#16] - add r6,r6,r7,ror#27 - eor r11,r4,r5 - add r6,r6,r10 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q12,q11,q0,#8 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#20] - veor q1,q1,q9 - add r5,r5,r6,ror#27 - eor r11,r3,r4 - veor q1,q1,q2 - add r5,r5,r10 - and r11,r11,r7 - vadd.i32 q13,q0,q14 - mov r7,r7,ror#2 - add r5,r5,r11 - veor q12,q12,q1 - add r4,r4,r9 - and r10,r7,r3 - vshr.u32 q1,q12,#30 - ldr r9,[sp,#24] - add r4,r4,r5,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r7,r3 - add r4,r4,r10 - vsli.32 q1,q12,#2 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#28] - add r3,r3,r4,ror#27 - eor r11,r6,r7 - add r3,r3,r10 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#32] - add r7,r7,r3,ror#27 - eor r11,r5,r6 - add r7,r7,r10 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q0,q1,#8 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#36] - veor q2,q2,q10 - add r6,r6,r7,ror#27 - eor r11,r4,r5 - veor q2,q2,q3 - add r6,r6,r10 - and r11,r11,r3 - vadd.i32 q13,q1,q14 - mov r3,r3,ror#2 - add r6,r6,r11 - veor q12,q12,q2 - add r5,r5,r9 - and r10,r3,r4 - vshr.u32 q2,q12,#30 - ldr r9,[sp,#40] - add r5,r5,r6,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r3,r4 - add r5,r5,r10 - vsli.32 q2,q12,#2 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#44] - add r4,r4,r5,ror#27 - eor r11,r7,r3 - add r4,r4,r10 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#48] - add r3,r3,r4,ror#27 - eor r11,r6,r7 - add r3,r3,r10 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q1,q2,#8 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#52] - veor q3,q3,q11 - eor r11,r10,r5 - add r7,r7,r3,ror#27 - veor q3,q3,q8 - mov r4,r4,ror#2 - add r7,r7,r11 - vadd.i32 q13,q2,q14 - eor r10,r3,r5 - add r6,r6,r9 - veor q12,q12,q3 - ldr r9,[sp,#56] - eor r11,r10,r4 - vshr.u32 q3,q12,#30 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - vst1.32 {q13},[r12,:128]! - add r6,r6,r11 - eor r10,r7,r4 - vsli.32 q3,q12,#2 - add r5,r5,r9 - ldr r9,[sp,#60] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#0] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - vadd.i32 q13,q3,q14 - eor r10,r5,r7 - add r3,r3,r9 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - teq r1,r2 - sub r8,r8,#16 - it eq - subeq r1,r1,#64 - vld1.8 {q0,q1},[r1]! - ldr r9,[sp,#4] - eor r11,r10,r6 - vld1.8 {q2,q3},[r1]! - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r11 - eor r10,r4,r6 - vrev32.8 q0,q0 - add r7,r7,r9 - ldr r9,[sp,#8] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#12] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#16] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - vrev32.8 q1,q1 - eor r10,r6,r3 - add r4,r4,r9 - vadd.i32 q8,q0,q14 - ldr r9,[sp,#20] - eor r11,r10,r7 - vst1.32 {q8},[r12,:128]! - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#24] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#28] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#32] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - vrev32.8 q2,q2 - eor r10,r7,r4 - add r5,r5,r9 - vadd.i32 q9,q1,q14 - ldr r9,[sp,#36] - eor r11,r10,r3 - vst1.32 {q9},[r12,:128]! - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#40] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#44] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#48] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - vrev32.8 q3,q3 - eor r10,r3,r5 - add r6,r6,r9 - vadd.i32 q10,q2,q14 - ldr r9,[sp,#52] - eor r11,r10,r4 - vst1.32 {q10},[r12,:128]! - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#56] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#60] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - ldmia r0,{r9,r10,r11,r12} @ accumulate context - add r3,r3,r9 - ldr r9,[r0,#16] - add r4,r4,r10 - add r5,r5,r11 - add r6,r6,r12 - it eq - moveq sp,r14 - add r7,r7,r9 - it ne - ldrne r9,[sp] - stmia r0,{r3,r4,r5,r6,r7} - itt ne - addne r12,sp,#3*16 - bne Loop_neon - - @ vldmia sp!,{d8-d15} - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} - -#endif -#if __ARM_MAX_ARCH__>=7 - -# if defined(__thumb2__) -# define INST(a,b,c,d) .byte c,d|0xf,a,b -# else -# define INST(a,b,c,d) .byte a,b,c,d|0x10 -# endif - -#ifdef __thumb2__ -.thumb_func sha1_block_data_order_armv8 -#endif -.align 5 -sha1_block_data_order_armv8: -LARMv8: - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - - veor q1,q1,q1 - adr r3,LK_00_19 - vld1.32 {q0},[r0]! - vld1.32 {d2[0]},[r0] - sub r0,r0,#16 - vld1.32 {d16[],d17[]},[r3,:32]! - vld1.32 {d18[],d19[]},[r3,:32]! - vld1.32 {d20[],d21[]},[r3,:32]! - vld1.32 {d22[],d23[]},[r3,:32] - -Loop_v8: - vld1.8 {q4,q5},[r1]! - vld1.8 {q6,q7},[r1]! - vrev32.8 q4,q4 - vrev32.8 q5,q5 - - vadd.i32 q12,q8,q4 - vrev32.8 q6,q6 - vmov q14,q0 @ offload - subs r2,r2,#1 - - vadd.i32 q13,q8,q5 - vrev32.8 q7,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 0 - INST(0x68,0x0c,0x02,0xe2) @ sha1c q0,q1,q12 - vadd.i32 q12,q8,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 1 - INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 - vadd.i32 q13,q8,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 2 - INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 - vadd.i32 q12,q8,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 3 - INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 - vadd.i32 q13,q9,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 4 - INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 - vadd.i32 q12,q9,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 5 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q9,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 6 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - vadd.i32 q12,q9,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 7 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q9,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 8 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - vadd.i32 q12,q10,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 9 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q10,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 10 - INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 - vadd.i32 q12,q10,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 11 - INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 - vadd.i32 q13,q10,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 12 - INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 - vadd.i32 q12,q10,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 13 - INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 - vadd.i32 q13,q11,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 14 - INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 - vadd.i32 q12,q11,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 15 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q11,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 16 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - vadd.i32 q12,q11,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 17 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q11,q7 - - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 18 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 19 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - - vadd.i32 q1,q1,q2 - vadd.i32 q0,q0,q14 - bne Loop_v8 - - vst1.32 {q0},[r0]! - vst1.32 {d2[0]},[r0] - - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - bx lr @ bx lr - -#endif -#if __ARM_MAX_ARCH__>=7 -.comm _OPENSSL_armcap_P,4 -.non_lazy_symbol_pointer -OPENSSL_armcap_P: -.indirect_symbol _OPENSSL_armcap_P -.long 0 -.private_extern _OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha256-armv4.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha256-armv4.S deleted file mode 100644 index 0cf36482d4..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha256-armv4.S +++ /dev/null @@ -1,2846 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. -@ ==================================================================== - -@ SHA256 block procedure for ARMv4. May 2007. - -@ Performance is ~2x better than gcc 3.4 generated code and in "abso- -@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per -@ byte [on single-issue Xscale PXA250 core]. - -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 22% improvement on -@ Cortex A8 core and ~20 cycles per processed byte. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 16% -@ improvement on Cortex A8 core and ~15.4 cycles per processed byte. - -@ September 2013. -@ -@ Add NEON implementation. On Cortex A8 it was measured to process one -@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon -@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only -@ code (meaning that latter performs sub-optimally, nothing was done -@ about it). - -@ May 2014. -@ -@ Add ARMv8 code path performing at 2.0 cpb on Apple A7. - -#ifndef __KERNEL__ -# include -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -#endif - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those -@ instructions are manually-encoded. (See unsha256.) - - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - - -.align 5 -K256: -.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - -.word 0 @ terminator -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -LOPENSSL_armcap: -.word OPENSSL_armcap_P-Lsha256_block_data_order -#endif -.align 5 - -.globl _sha256_block_data_order -.private_extern _sha256_block_data_order -#ifdef __thumb2__ -.thumb_func _sha256_block_data_order -#endif -_sha256_block_data_order: -Lsha256_block_data_order: -#if __ARM_ARCH__<7 && !defined(__thumb2__) - sub r3,pc,#8 @ _sha256_block_data_order -#else - adr r3,Lsha256_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P -#ifdef __APPLE__ - ldr r12,[r12] -#endif - tst r12,#ARMV8_SHA256 - bne LARMv8 - tst r12,#ARMV7_NEON - bne LNEON -#endif - add r2,r1,r2,lsl#6 @ len to point at the end of inp - stmdb sp!,{r0,r1,r2,r4-r11,lr} - ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} - sub r14,r3,#256+32 @ K256 - sub sp,sp,#16*4 @ alloca(X[16]) -Loop: -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ magic - eor r12,r12,r12 -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 0 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 0 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 0==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 0<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 1 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 1 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 1==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 1<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 2 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 2 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 2==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 2<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 3 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 3 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 3==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 3<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 4 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 4 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 4==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 4<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 5 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 5==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 5<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 6 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 6 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 6==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 6<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 7 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 7==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 7<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 8 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 8 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 8==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 8<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 9 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 9 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 9==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 9<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 10 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 10 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 10==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 10<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 11 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 11 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 11==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 11<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 12 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 12 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 12==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 12<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 13 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 13 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 13==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 13<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 14 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 14 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 14==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 14<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 15 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 15 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 15==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 15<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -Lrounds_16_xx: - @ ldr r2,[sp,#1*4] @ 16 - @ ldr r1,[sp,#14*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#0*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#9*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 16==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 16<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#2*4] @ 17 - @ ldr r1,[sp,#15*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#1*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#10*4] - - add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 17==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 17<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#3*4] @ 18 - @ ldr r1,[sp,#0*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#2*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#11*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 18==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 18<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#4*4] @ 19 - @ ldr r1,[sp,#1*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#3*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#12*4] - - add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 19==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 19<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#5*4] @ 20 - @ ldr r1,[sp,#2*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#4*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#13*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 20==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 20<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#6*4] @ 21 - @ ldr r1,[sp,#3*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#5*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#14*4] - - add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 21==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 21<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#7*4] @ 22 - @ ldr r1,[sp,#4*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#6*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#15*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 22==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 22<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#8*4] @ 23 - @ ldr r1,[sp,#5*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#7*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#0*4] - - add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 23==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 23<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#9*4] @ 24 - @ ldr r1,[sp,#6*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#8*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#1*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 24==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 24<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#10*4] @ 25 - @ ldr r1,[sp,#7*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#9*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#2*4] - - add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 25==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 25<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#11*4] @ 26 - @ ldr r1,[sp,#8*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#10*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#3*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 26==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 26<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#12*4] @ 27 - @ ldr r1,[sp,#9*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#11*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#4*4] - - add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 27==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 27<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#13*4] @ 28 - @ ldr r1,[sp,#10*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#12*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#5*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 28==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 28<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#14*4] @ 29 - @ ldr r1,[sp,#11*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#13*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#6*4] - - add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 29==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 29<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#15*4] @ 30 - @ ldr r1,[sp,#12*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#14*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#7*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 30==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 30<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#0*4] @ 31 - @ ldr r1,[sp,#13*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#15*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#8*4] - - add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 31==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 31<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - ite eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq r3,[sp,#16*4] @ pull ctx - bne Lrounds_16_xx - - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r0,[r3,#0] - ldr r2,[r3,#4] - ldr r12,[r3,#8] - add r4,r4,r0 - ldr r0,[r3,#12] - add r5,r5,r2 - ldr r2,[r3,#16] - add r6,r6,r12 - ldr r12,[r3,#20] - add r7,r7,r0 - ldr r0,[r3,#24] - add r8,r8,r2 - ldr r2,[r3,#28] - add r9,r9,r12 - ldr r1,[sp,#17*4] @ pull inp - ldr r12,[sp,#18*4] @ pull inp+len - add r10,r10,r0 - add r11,r11,r2 - stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} - cmp r1,r12 - sub r14,r14,#256 @ rewind Ktbl - bne Loop - - add sp,sp,#19*4 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - -#if __ARM_MAX_ARCH__>=7 - - - -.globl _sha256_block_data_order_neon -.private_extern _sha256_block_data_order_neon -#ifdef __thumb2__ -.thumb_func _sha256_block_data_order_neon -#endif -.align 5 -.skip 16 -_sha256_block_data_order_neon: -LNEON: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - - sub r11,sp,#16*4+16 - adr r14,K256 - bic r11,r11,#15 @ align for 128-bit stores - mov r12,sp - mov sp,r11 @ alloca - add r2,r1,r2,lsl#6 @ len to point at the end of inp - - vld1.8 {q0},[r1]! - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - vld1.32 {q8},[r14,:128]! - vld1.32 {q9},[r14,:128]! - vld1.32 {q10},[r14,:128]! - vld1.32 {q11},[r14,:128]! - vrev32.8 q0,q0 @ yes, even on - str r0,[sp,#64] - vrev32.8 q1,q1 @ big-endian - str r1,[sp,#68] - mov r1,sp - vrev32.8 q2,q2 - str r2,[sp,#72] - vrev32.8 q3,q3 - str r12,[sp,#76] @ save original sp - vadd.i32 q8,q8,q0 - vadd.i32 q9,q9,q1 - vst1.32 {q8},[r1,:128]! - vadd.i32 q10,q10,q2 - vst1.32 {q9},[r1,:128]! - vadd.i32 q11,q11,q3 - vst1.32 {q10},[r1,:128]! - vst1.32 {q11},[r1,:128]! - - ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} - sub r1,r1,#64 - ldr r2,[sp,#0] - eor r12,r12,r12 - eor r3,r5,r6 - b L_00_48 - -.align 4 -L_00_48: - vext.8 q8,q0,q1,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q2,q3,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q0,q0,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#4] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d7,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d7,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d7,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q0,q0,q9 - add r10,r10,r2 - ldr r2,[sp,#8] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d7,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d7,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d0,d0,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d0,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d0,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d0,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#12] - and r3,r3,r12 - vshr.u32 d24,d0,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d0,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d1,d1,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q0 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q1,q2,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q3,q0,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q1,q1,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#20] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d1,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d1,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d1,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q1,q1,q9 - add r6,r6,r2 - ldr r2,[sp,#24] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d1,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d1,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d2,d2,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d2,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d2,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d2,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#28] - and r3,r3,r12 - vshr.u32 d24,d2,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d2,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d3,d3,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q1 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vext.8 q8,q2,q3,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q0,q1,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q2,q2,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#36] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d3,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d3,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d3,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q2,q2,q9 - add r10,r10,r2 - ldr r2,[sp,#40] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d3,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d3,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d4,d4,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d4,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d4,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d4,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#44] - and r3,r3,r12 - vshr.u32 d24,d4,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d4,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d5,d5,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q2 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q3,q0,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q1,q2,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q3,q3,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#52] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d5,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d5,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d5,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q3,q3,q9 - add r6,r6,r2 - ldr r2,[sp,#56] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d5,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d5,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d6,d6,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d6,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d6,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d6,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#60] - and r3,r3,r12 - vshr.u32 d24,d6,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d6,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d7,d7,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q3 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[r14] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - teq r2,#0 @ check for K256 terminator - ldr r2,[sp,#0] - sub r1,r1,#64 - bne L_00_48 - - ldr r1,[sp,#68] - ldr r0,[sp,#72] - sub r14,r14,#256 @ rewind r14 - teq r1,r0 - it eq - subeq r1,r1,#64 @ avoid SEGV - vld1.8 {q0},[r1]! @ load next input block - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - it ne - strne r1,[sp,#68] - mov r1,sp - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q0,q0 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q0 - ldr r2,[sp,#4] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#8] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#12] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q1,q1 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q1 - ldr r2,[sp,#20] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#24] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#28] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q2,q2 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q2 - ldr r2,[sp,#36] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#40] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#44] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q3,q3 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q3 - ldr r2,[sp,#52] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#56] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#60] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#64] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - ldr r0,[r2,#0] - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r12,[r2,#4] - ldr r3,[r2,#8] - ldr r1,[r2,#12] - add r4,r4,r0 @ accumulate - ldr r0,[r2,#16] - add r5,r5,r12 - ldr r12,[r2,#20] - add r6,r6,r3 - ldr r3,[r2,#24] - add r7,r7,r1 - ldr r1,[r2,#28] - add r8,r8,r0 - str r4,[r2],#4 - add r9,r9,r12 - str r5,[r2],#4 - add r10,r10,r3 - str r6,[r2],#4 - add r11,r11,r1 - str r7,[r2],#4 - stmia r2,{r8,r9,r10,r11} - - ittte ne - movne r1,sp - ldrne r2,[sp,#0] - eorne r12,r12,r12 - ldreq sp,[sp,#76] @ restore original sp - itt ne - eorne r3,r5,r6 - bne L_00_48 - - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} - -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - -# if defined(__thumb2__) -# define INST(a,b,c,d) .byte c,d|0xc,a,b -# else -# define INST(a,b,c,d) .byte a,b,c,d -# endif - -#ifdef __thumb2__ -.thumb_func sha256_block_data_order_armv8 -#endif -.align 5 -sha256_block_data_order_armv8: -LARMv8: - vld1.32 {q0,q1},[r0] - sub r3,r3,#256+32 - add r2,r1,r2,lsl#6 @ len to point at the end of inp - b Loop_v8 - -.align 4 -Loop_v8: - vld1.8 {q8,q9},[r1]! - vld1.8 {q10,q11},[r1]! - vld1.32 {q12},[r3]! - vrev32.8 q8,q8 - vrev32.8 q9,q9 - vrev32.8 q10,q10 - vrev32.8 q11,q11 - vmov q14,q0 @ offload - vmov q15,q1 - teq r1,r2 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - - vld1.32 {q13},[r3] - vadd.i32 q12,q12,q10 - sub r3,r3,#256-16 @ rewind - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - - vadd.i32 q13,q13,q11 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - - vadd.i32 q0,q0,q14 - vadd.i32 q1,q1,q15 - it ne - bne Loop_v8 - - vst1.32 {q0,q1},[r0] - - bx lr @ bx lr - -#endif -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm _OPENSSL_armcap_P,4 -.non_lazy_symbol_pointer -OPENSSL_armcap_P: -.indirect_symbol _OPENSSL_armcap_P -.long 0 -.private_extern _OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha512-armv4.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha512-armv4.S deleted file mode 100644 index 21913cb2ba..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/sha512-armv4.S +++ /dev/null @@ -1,1899 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. -@ ==================================================================== - -@ SHA512 block procedure for ARMv4. September 2007. - -@ This code is ~4.5 (four and a half) times faster than code generated -@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue -@ Xscale PXA250 core]. -@ -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 6% improvement on -@ Cortex A8 core and ~40 cycles per processed byte. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 7% -@ improvement on Coxtex A8 core and ~38 cycles per byte. - -@ March 2011. -@ -@ Add NEON implementation. On Cortex A8 it was measured to process -@ one byte in 23.3 cycles or ~60% faster than integer-only code. - -@ August 2012. -@ -@ Improve NEON performance by 12% on Snapdragon S4. In absolute -@ terms it's 22.6 cycles per byte, which is disappointing result. -@ Technical writers asserted that 3-way S4 pipeline can sustain -@ multiple NEON instructions per cycle, but dual NEON issue could -@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html -@ for further details. On side note Cortex-A15 processes one byte in -@ 16 cycles. - -@ Byte order [in]dependence. ========================================= -@ -@ Originally caller was expected to maintain specific *dword* order in -@ h[0-7], namely with most significant dword at *lower* address, which -@ was reflected in below two parameters as 0 and 4. Now caller is -@ expected to maintain native byte order for whole 64-bit values. -#ifndef __KERNEL__ -# include -# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} -# define VFP_ABI_POP vldmia sp!,{d8-d15} -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -# define VFP_ABI_PUSH -# define VFP_ABI_POP -#endif - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. - - -#ifdef __ARMEL__ -# define LO 0 -# define HI 4 -# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 -#else -# define HI 0 -# define LO 4 -# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 -#endif - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -# define adrl adr -#else -.code 32 -#endif - - -.align 5 -K512: - WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) - WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) - WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) - WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) - WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) - WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) - WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) - WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) - WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) - WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) - WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) - WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) - WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) - WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) - WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) - WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) - WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) - WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) - WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) - WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) - WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) - WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) - WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) - WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) - WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) - WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) - WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) - WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) - WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) - WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) - WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) - WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) - WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) - WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) - WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) - WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) - WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) - WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) - WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) - WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) - -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -LOPENSSL_armcap: -.word OPENSSL_armcap_P-Lsha512_block_data_order -.skip 32-4 -#else -.skip 32 -#endif - -.globl _sha512_block_data_order -.private_extern _sha512_block_data_order -#ifdef __thumb2__ -.thumb_func _sha512_block_data_order -#endif -_sha512_block_data_order: -Lsha512_block_data_order: -#if __ARM_ARCH__<7 && !defined(__thumb2__) - sub r3,pc,#8 @ _sha512_block_data_order -#else - adr r3,Lsha512_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P -#ifdef __APPLE__ - ldr r12,[r12] -#endif - tst r12,#ARMV7_NEON - bne LNEON -#endif - add r2,r1,r2,lsl#7 @ len to point at the end of inp - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - sub r14,r3,#672 @ K512 - sub sp,sp,#9*8 - - ldr r7,[r0,#32+LO] - ldr r8,[r0,#32+HI] - ldr r9, [r0,#48+LO] - ldr r10, [r0,#48+HI] - ldr r11, [r0,#56+LO] - ldr r12, [r0,#56+HI] -Loop: - str r9, [sp,#48+0] - str r10, [sp,#48+4] - str r11, [sp,#56+0] - str r12, [sp,#56+4] - ldr r5,[r0,#0+LO] - ldr r6,[r0,#0+HI] - ldr r3,[r0,#8+LO] - ldr r4,[r0,#8+HI] - ldr r9, [r0,#16+LO] - ldr r10, [r0,#16+HI] - ldr r11, [r0,#24+LO] - ldr r12, [r0,#24+HI] - str r3,[sp,#8+0] - str r4,[sp,#8+4] - str r9, [sp,#16+0] - str r10, [sp,#16+4] - str r11, [sp,#24+0] - str r12, [sp,#24+4] - ldr r3,[r0,#40+LO] - ldr r4,[r0,#40+HI] - str r3,[sp,#40+0] - str r4,[sp,#40+4] - -L00_15: -#if __ARM_ARCH__<7 - ldrb r3,[r1,#7] - ldrb r9, [r1,#6] - ldrb r10, [r1,#5] - ldrb r11, [r1,#4] - ldrb r4,[r1,#3] - ldrb r12, [r1,#2] - orr r3,r3,r9,lsl#8 - ldrb r9, [r1,#1] - orr r3,r3,r10,lsl#16 - ldrb r10, [r1],#8 - orr r3,r3,r11,lsl#24 - orr r4,r4,r12,lsl#8 - orr r4,r4,r9,lsl#16 - orr r4,r4,r10,lsl#24 -#else - ldr r3,[r1,#4] - ldr r4,[r1],#8 -#ifdef __ARMEL__ - rev r3,r3 - rev r4,r4 -#endif -#endif - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov r9,r7,lsr#14 - str r3,[sp,#64+0] - mov r10,r8,lsr#14 - str r4,[sp,#64+4] - eor r9,r9,r8,lsl#18 - ldr r11,[sp,#56+0] @ h.lo - eor r10,r10,r7,lsl#18 - ldr r12,[sp,#56+4] @ h.hi - eor r9,r9,r7,lsr#18 - eor r10,r10,r8,lsr#18 - eor r9,r9,r8,lsl#14 - eor r10,r10,r7,lsl#14 - eor r9,r9,r8,lsr#9 - eor r10,r10,r7,lsr#9 - eor r9,r9,r7,lsl#23 - eor r10,r10,r8,lsl#23 @ Sigma1(e) - adds r3,r3,r9 - ldr r9,[sp,#40+0] @ f.lo - adc r4,r4,r10 @ T += Sigma1(e) - ldr r10,[sp,#40+4] @ f.hi - adds r3,r3,r11 - ldr r11,[sp,#48+0] @ g.lo - adc r4,r4,r12 @ T += h - ldr r12,[sp,#48+4] @ g.hi - - eor r9,r9,r11 - str r7,[sp,#32+0] - eor r10,r10,r12 - str r8,[sp,#32+4] - and r9,r9,r7 - str r5,[sp,#0+0] - and r10,r10,r8 - str r6,[sp,#0+4] - eor r9,r9,r11 - ldr r11,[r14,#LO] @ K[i].lo - eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#HI] @ K[i].hi - - adds r3,r3,r9 - ldr r7,[sp,#24+0] @ d.lo - adc r4,r4,r10 @ T += Ch(e,f,g) - ldr r8,[sp,#24+4] @ d.hi - adds r3,r3,r11 - and r9,r11,#0xff - adc r4,r4,r12 @ T += K[i] - adds r7,r7,r3 - ldr r11,[sp,#8+0] @ b.lo - adc r8,r8,r4 @ d += T - teq r9,#148 - - ldr r12,[sp,#16+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq r14,r14,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov r9,r5,lsr#28 - mov r10,r6,lsr#28 - eor r9,r9,r6,lsl#4 - eor r10,r10,r5,lsl#4 - eor r9,r9,r6,lsr#2 - eor r10,r10,r5,lsr#2 - eor r9,r9,r5,lsl#30 - eor r10,r10,r6,lsl#30 - eor r9,r9,r6,lsr#7 - eor r10,r10,r5,lsr#7 - eor r9,r9,r5,lsl#25 - eor r10,r10,r6,lsl#25 @ Sigma0(a) - adds r3,r3,r9 - and r9,r5,r11 - adc r4,r4,r10 @ T += Sigma0(a) - - ldr r10,[sp,#8+4] @ b.hi - orr r5,r5,r11 - ldr r11,[sp,#16+4] @ c.hi - and r5,r5,r12 - and r12,r6,r10 - orr r6,r6,r10 - orr r5,r5,r9 @ Maj(a,b,c).lo - and r6,r6,r11 - adds r5,r5,r3 - orr r6,r6,r12 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc r6,r6,r4 @ h += T - tst r14,#1 - add r14,r14,#8 - tst r14,#1 - beq L00_15 - ldr r9,[sp,#184+0] - ldr r10,[sp,#184+4] - bic r14,r14,#1 -L16_79: - @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) - @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 - @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 - mov r3,r9,lsr#1 - ldr r11,[sp,#80+0] - mov r4,r10,lsr#1 - ldr r12,[sp,#80+4] - eor r3,r3,r10,lsl#31 - eor r4,r4,r9,lsl#31 - eor r3,r3,r9,lsr#8 - eor r4,r4,r10,lsr#8 - eor r3,r3,r10,lsl#24 - eor r4,r4,r9,lsl#24 - eor r3,r3,r9,lsr#7 - eor r4,r4,r10,lsr#7 - eor r3,r3,r10,lsl#25 - - @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) - @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 - @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 - mov r9,r11,lsr#19 - mov r10,r12,lsr#19 - eor r9,r9,r12,lsl#13 - eor r10,r10,r11,lsl#13 - eor r9,r9,r12,lsr#29 - eor r10,r10,r11,lsr#29 - eor r9,r9,r11,lsl#3 - eor r10,r10,r12,lsl#3 - eor r9,r9,r11,lsr#6 - eor r10,r10,r12,lsr#6 - ldr r11,[sp,#120+0] - eor r9,r9,r12,lsl#26 - - ldr r12,[sp,#120+4] - adds r3,r3,r9 - ldr r9,[sp,#192+0] - adc r4,r4,r10 - - ldr r10,[sp,#192+4] - adds r3,r3,r11 - adc r4,r4,r12 - adds r3,r3,r9 - adc r4,r4,r10 - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov r9,r7,lsr#14 - str r3,[sp,#64+0] - mov r10,r8,lsr#14 - str r4,[sp,#64+4] - eor r9,r9,r8,lsl#18 - ldr r11,[sp,#56+0] @ h.lo - eor r10,r10,r7,lsl#18 - ldr r12,[sp,#56+4] @ h.hi - eor r9,r9,r7,lsr#18 - eor r10,r10,r8,lsr#18 - eor r9,r9,r8,lsl#14 - eor r10,r10,r7,lsl#14 - eor r9,r9,r8,lsr#9 - eor r10,r10,r7,lsr#9 - eor r9,r9,r7,lsl#23 - eor r10,r10,r8,lsl#23 @ Sigma1(e) - adds r3,r3,r9 - ldr r9,[sp,#40+0] @ f.lo - adc r4,r4,r10 @ T += Sigma1(e) - ldr r10,[sp,#40+4] @ f.hi - adds r3,r3,r11 - ldr r11,[sp,#48+0] @ g.lo - adc r4,r4,r12 @ T += h - ldr r12,[sp,#48+4] @ g.hi - - eor r9,r9,r11 - str r7,[sp,#32+0] - eor r10,r10,r12 - str r8,[sp,#32+4] - and r9,r9,r7 - str r5,[sp,#0+0] - and r10,r10,r8 - str r6,[sp,#0+4] - eor r9,r9,r11 - ldr r11,[r14,#LO] @ K[i].lo - eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#HI] @ K[i].hi - - adds r3,r3,r9 - ldr r7,[sp,#24+0] @ d.lo - adc r4,r4,r10 @ T += Ch(e,f,g) - ldr r8,[sp,#24+4] @ d.hi - adds r3,r3,r11 - and r9,r11,#0xff - adc r4,r4,r12 @ T += K[i] - adds r7,r7,r3 - ldr r11,[sp,#8+0] @ b.lo - adc r8,r8,r4 @ d += T - teq r9,#23 - - ldr r12,[sp,#16+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq r14,r14,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov r9,r5,lsr#28 - mov r10,r6,lsr#28 - eor r9,r9,r6,lsl#4 - eor r10,r10,r5,lsl#4 - eor r9,r9,r6,lsr#2 - eor r10,r10,r5,lsr#2 - eor r9,r9,r5,lsl#30 - eor r10,r10,r6,lsl#30 - eor r9,r9,r6,lsr#7 - eor r10,r10,r5,lsr#7 - eor r9,r9,r5,lsl#25 - eor r10,r10,r6,lsl#25 @ Sigma0(a) - adds r3,r3,r9 - and r9,r5,r11 - adc r4,r4,r10 @ T += Sigma0(a) - - ldr r10,[sp,#8+4] @ b.hi - orr r5,r5,r11 - ldr r11,[sp,#16+4] @ c.hi - and r5,r5,r12 - and r12,r6,r10 - orr r6,r6,r10 - orr r5,r5,r9 @ Maj(a,b,c).lo - and r6,r6,r11 - adds r5,r5,r3 - orr r6,r6,r12 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc r6,r6,r4 @ h += T - tst r14,#1 - add r14,r14,#8 -#if __ARM_ARCH__>=7 - ittt eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq r9,[sp,#184+0] - ldreq r10,[sp,#184+4] - beq L16_79 - bic r14,r14,#1 - - ldr r3,[sp,#8+0] - ldr r4,[sp,#8+4] - ldr r9, [r0,#0+LO] - ldr r10, [r0,#0+HI] - ldr r11, [r0,#8+LO] - ldr r12, [r0,#8+HI] - adds r9,r5,r9 - str r9, [r0,#0+LO] - adc r10,r6,r10 - str r10, [r0,#0+HI] - adds r11,r3,r11 - str r11, [r0,#8+LO] - adc r12,r4,r12 - str r12, [r0,#8+HI] - - ldr r5,[sp,#16+0] - ldr r6,[sp,#16+4] - ldr r3,[sp,#24+0] - ldr r4,[sp,#24+4] - ldr r9, [r0,#16+LO] - ldr r10, [r0,#16+HI] - ldr r11, [r0,#24+LO] - ldr r12, [r0,#24+HI] - adds r9,r5,r9 - str r9, [r0,#16+LO] - adc r10,r6,r10 - str r10, [r0,#16+HI] - adds r11,r3,r11 - str r11, [r0,#24+LO] - adc r12,r4,r12 - str r12, [r0,#24+HI] - - ldr r3,[sp,#40+0] - ldr r4,[sp,#40+4] - ldr r9, [r0,#32+LO] - ldr r10, [r0,#32+HI] - ldr r11, [r0,#40+LO] - ldr r12, [r0,#40+HI] - adds r7,r7,r9 - str r7,[r0,#32+LO] - adc r8,r8,r10 - str r8,[r0,#32+HI] - adds r11,r3,r11 - str r11, [r0,#40+LO] - adc r12,r4,r12 - str r12, [r0,#40+HI] - - ldr r5,[sp,#48+0] - ldr r6,[sp,#48+4] - ldr r3,[sp,#56+0] - ldr r4,[sp,#56+4] - ldr r9, [r0,#48+LO] - ldr r10, [r0,#48+HI] - ldr r11, [r0,#56+LO] - ldr r12, [r0,#56+HI] - adds r9,r5,r9 - str r9, [r0,#48+LO] - adc r10,r6,r10 - str r10, [r0,#48+HI] - adds r11,r3,r11 - str r11, [r0,#56+LO] - adc r12,r4,r12 - str r12, [r0,#56+HI] - - add sp,sp,#640 - sub r14,r14,#640 - - teq r1,r2 - bne Loop - - add sp,sp,#8*9 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif - -#if __ARM_MAX_ARCH__>=7 - - - -.globl _sha512_block_data_order_neon -.private_extern _sha512_block_data_order_neon -#ifdef __thumb2__ -.thumb_func _sha512_block_data_order_neon -#endif -.align 4 -_sha512_block_data_order_neon: -LNEON: - dmb @ errata #451034 on early Cortex A8 - add r2,r1,r2,lsl#7 @ len to point at the end of inp - adr r3,K512 - VFP_ABI_PUSH - vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context -Loop_neon: - vshr.u64 d24,d20,#14 @ 0 -#if 0<16 - vld1.64 {d0},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d20,#18 -#if 0>0 - vadd.i64 d16,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d20,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 0<16 && defined(__ARMEL__) - vrev64.8 d0,d0 -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d0 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 1 -#if 1<16 - vld1.64 {d1},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 1>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 1<16 && defined(__ARMEL__) - vrev64.8 d1,d1 -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d1 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 d24,d18,#14 @ 2 -#if 2<16 - vld1.64 {d2},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d18,#18 -#if 2>0 - vadd.i64 d22,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d18,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 2<16 && defined(__ARMEL__) - vrev64.8 d2,d2 -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d2 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 3 -#if 3<16 - vld1.64 {d3},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 3>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 3<16 && defined(__ARMEL__) - vrev64.8 d3,d3 -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d3 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 d24,d16,#14 @ 4 -#if 4<16 - vld1.64 {d4},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d16,#18 -#if 4>0 - vadd.i64 d20,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d16,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 4<16 && defined(__ARMEL__) - vrev64.8 d4,d4 -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d4 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 5 -#if 5<16 - vld1.64 {d5},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 5>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 5<16 && defined(__ARMEL__) - vrev64.8 d5,d5 -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d5 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 d24,d22,#14 @ 6 -#if 6<16 - vld1.64 {d6},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d22,#18 -#if 6>0 - vadd.i64 d18,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d22,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 6<16 && defined(__ARMEL__) - vrev64.8 d6,d6 -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d6 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 7 -#if 7<16 - vld1.64 {d7},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 7>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 7<16 && defined(__ARMEL__) - vrev64.8 d7,d7 -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d7 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - vshr.u64 d24,d20,#14 @ 8 -#if 8<16 - vld1.64 {d8},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d20,#18 -#if 8>0 - vadd.i64 d16,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d20,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 8<16 && defined(__ARMEL__) - vrev64.8 d8,d8 -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d8 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 9 -#if 9<16 - vld1.64 {d9},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 9>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 9<16 && defined(__ARMEL__) - vrev64.8 d9,d9 -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d9 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 d24,d18,#14 @ 10 -#if 10<16 - vld1.64 {d10},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d18,#18 -#if 10>0 - vadd.i64 d22,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d18,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 10<16 && defined(__ARMEL__) - vrev64.8 d10,d10 -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d10 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 11 -#if 11<16 - vld1.64 {d11},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 11>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 11<16 && defined(__ARMEL__) - vrev64.8 d11,d11 -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d11 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 d24,d16,#14 @ 12 -#if 12<16 - vld1.64 {d12},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d16,#18 -#if 12>0 - vadd.i64 d20,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d16,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 12<16 && defined(__ARMEL__) - vrev64.8 d12,d12 -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d12 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 13 -#if 13<16 - vld1.64 {d13},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 13>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 13<16 && defined(__ARMEL__) - vrev64.8 d13,d13 -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d13 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 d24,d22,#14 @ 14 -#if 14<16 - vld1.64 {d14},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d22,#18 -#if 14>0 - vadd.i64 d18,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d22,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 14<16 && defined(__ARMEL__) - vrev64.8 d14,d14 -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d14 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 15 -#if 15<16 - vld1.64 {d15},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 15>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 15<16 && defined(__ARMEL__) - vrev64.8 d15,d15 -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d15 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - mov r12,#4 -L16_79_neon: - subs r12,#1 - vshr.u64 q12,q7,#19 - vshr.u64 q13,q7,#61 - vadd.i64 d16,d30 @ h+=Maj from the past - vshr.u64 q15,q7,#6 - vsli.64 q12,q7,#45 - vext.8 q14,q0,q1,#8 @ X[i+1] - vsli.64 q13,q7,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q0,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q4,q5,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d20,#14 @ from NEON_00_15 - vadd.i64 q0,q14 - vshr.u64 d25,d20,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d20,#41 @ from NEON_00_15 - vadd.i64 q0,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 16<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d0 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 17 -#if 17<16 - vld1.64 {d1},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 17>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 17<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d1 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 q12,q0,#19 - vshr.u64 q13,q0,#61 - vadd.i64 d22,d30 @ h+=Maj from the past - vshr.u64 q15,q0,#6 - vsli.64 q12,q0,#45 - vext.8 q14,q1,q2,#8 @ X[i+1] - vsli.64 q13,q0,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q1,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q5,q6,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d18,#14 @ from NEON_00_15 - vadd.i64 q1,q14 - vshr.u64 d25,d18,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d18,#41 @ from NEON_00_15 - vadd.i64 q1,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 18<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d2 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 19 -#if 19<16 - vld1.64 {d3},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 19>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 19<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d3 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 q12,q1,#19 - vshr.u64 q13,q1,#61 - vadd.i64 d20,d30 @ h+=Maj from the past - vshr.u64 q15,q1,#6 - vsli.64 q12,q1,#45 - vext.8 q14,q2,q3,#8 @ X[i+1] - vsli.64 q13,q1,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q2,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q6,q7,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d16,#14 @ from NEON_00_15 - vadd.i64 q2,q14 - vshr.u64 d25,d16,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d16,#41 @ from NEON_00_15 - vadd.i64 q2,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 20<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d4 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 21 -#if 21<16 - vld1.64 {d5},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 21>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 21<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d5 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 q12,q2,#19 - vshr.u64 q13,q2,#61 - vadd.i64 d18,d30 @ h+=Maj from the past - vshr.u64 q15,q2,#6 - vsli.64 q12,q2,#45 - vext.8 q14,q3,q4,#8 @ X[i+1] - vsli.64 q13,q2,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q3,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q7,q0,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d22,#14 @ from NEON_00_15 - vadd.i64 q3,q14 - vshr.u64 d25,d22,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d22,#41 @ from NEON_00_15 - vadd.i64 q3,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 22<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d6 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 23 -#if 23<16 - vld1.64 {d7},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 23>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 23<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d7 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - vshr.u64 q12,q3,#19 - vshr.u64 q13,q3,#61 - vadd.i64 d16,d30 @ h+=Maj from the past - vshr.u64 q15,q3,#6 - vsli.64 q12,q3,#45 - vext.8 q14,q4,q5,#8 @ X[i+1] - vsli.64 q13,q3,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q4,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q0,q1,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d20,#14 @ from NEON_00_15 - vadd.i64 q4,q14 - vshr.u64 d25,d20,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d20,#41 @ from NEON_00_15 - vadd.i64 q4,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 24<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d8 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 25 -#if 25<16 - vld1.64 {d9},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 25>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 25<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d9 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 q12,q4,#19 - vshr.u64 q13,q4,#61 - vadd.i64 d22,d30 @ h+=Maj from the past - vshr.u64 q15,q4,#6 - vsli.64 q12,q4,#45 - vext.8 q14,q5,q6,#8 @ X[i+1] - vsli.64 q13,q4,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q5,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q1,q2,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d18,#14 @ from NEON_00_15 - vadd.i64 q5,q14 - vshr.u64 d25,d18,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d18,#41 @ from NEON_00_15 - vadd.i64 q5,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 26<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d10 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 27 -#if 27<16 - vld1.64 {d11},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 27>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 27<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d11 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 q12,q5,#19 - vshr.u64 q13,q5,#61 - vadd.i64 d20,d30 @ h+=Maj from the past - vshr.u64 q15,q5,#6 - vsli.64 q12,q5,#45 - vext.8 q14,q6,q7,#8 @ X[i+1] - vsli.64 q13,q5,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q6,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q2,q3,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d16,#14 @ from NEON_00_15 - vadd.i64 q6,q14 - vshr.u64 d25,d16,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d16,#41 @ from NEON_00_15 - vadd.i64 q6,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 28<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d12 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 29 -#if 29<16 - vld1.64 {d13},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 29>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 29<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d13 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 q12,q6,#19 - vshr.u64 q13,q6,#61 - vadd.i64 d18,d30 @ h+=Maj from the past - vshr.u64 q15,q6,#6 - vsli.64 q12,q6,#45 - vext.8 q14,q7,q0,#8 @ X[i+1] - vsli.64 q13,q6,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q7,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q3,q4,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d22,#14 @ from NEON_00_15 - vadd.i64 q7,q14 - vshr.u64 d25,d22,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d22,#41 @ from NEON_00_15 - vadd.i64 q7,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 30<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d14 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 31 -#if 31<16 - vld1.64 {d15},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 31>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 31<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d15 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - bne L16_79_neon - - vadd.i64 d16,d30 @ h+=Maj from the past - vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp - vadd.i64 q8,q12 @ vectorized accumulate - vadd.i64 q9,q13 - vadd.i64 q10,q14 - vadd.i64 q11,q15 - vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context - teq r1,r2 - sub r3,#640 @ rewind K512 - bne Loop_neon - - VFP_ABI_POP - bx lr @ .word 0xe12fff1e - -#endif -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm _OPENSSL_armcap_P,4 -.non_lazy_symbol_pointer -OPENSSL_armcap_P: -.indirect_symbol _OPENSSL_armcap_P -.long 0 -.private_extern _OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/vpaes-armv7.S b/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/vpaes-armv7.S deleted file mode 100644 index 6aead7cac2..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/fipsmodule/vpaes-armv7.S +++ /dev/null @@ -1,1265 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.syntax unified - - - - -#if defined(__thumb2__) -.thumb -#else -.code 32 -#endif - -.text - - -.align 7 @ totally strategic alignment -_vpaes_consts: -Lk_mc_forward:@ mc_forward -.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 -.quad 0x080B0A0904070605, 0x000302010C0F0E0D -.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 -.quad 0x000302010C0F0E0D, 0x080B0A0904070605 -Lk_mc_backward:@ mc_backward -.quad 0x0605040702010003, 0x0E0D0C0F0A09080B -.quad 0x020100030E0D0C0F, 0x0A09080B06050407 -.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 -.quad 0x0A09080B06050407, 0x020100030E0D0C0F -Lk_sr:@ sr -.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 -.quad 0x030E09040F0A0500, 0x0B06010C07020D08 -.quad 0x0F060D040B020900, 0x070E050C030A0108 -.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 - -@ -@ "Hot" constants -@ -Lk_inv:@ inv, inva -.quad 0x0E05060F0D080180, 0x040703090A0B0C02 -.quad 0x01040A060F0B0780, 0x030D0E0C02050809 -Lk_ipt:@ input transform (lo, hi) -.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 -.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 -Lk_sbo:@ sbou, sbot -.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 -.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA -Lk_sb1:@ sb1u, sb1t -.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF -.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 -Lk_sb2:@ sb2u, sb2t -.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A -.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD - -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 -.align 2 - -.align 6 -@@ -@@ _aes_preheat -@@ -@@ Fills q9-q15 as specified below. -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_preheat -#endif -.align 4 -_vpaes_preheat: - adr r10, Lk_inv - vmov.i8 q9, #0x0f @ Lk_s0F - vld1.64 {q10,q11}, [r10]! @ Lk_inv - add r10, r10, #64 @ Skip Lk_ipt, Lk_sbo - vld1.64 {q12,q13}, [r10]! @ Lk_sb1 - vld1.64 {q14,q15}, [r10] @ Lk_sb2 - bx lr - -@@ -@@ _aes_encrypt_core -@@ -@@ AES-encrypt q0. -@@ -@@ Inputs: -@@ q0 = input -@@ q9-q15 as in _vpaes_preheat -@@ [r2] = scheduled keys -@@ -@@ Output in q0 -@@ Clobbers q1-q5, r8-r11 -@@ Preserves q6-q8 so you get some local vectors -@@ -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_encrypt_core -#endif -.align 4 -_vpaes_encrypt_core: - mov r9, r2 - ldr r8, [r2,#240] @ pull rounds - adr r11, Lk_ipt - @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo - @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi - vld1.64 {q2, q3}, [r11] - adr r11, Lk_mc_forward+16 - vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 - vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1 - vtbl.8 d3, {q2}, d3 - vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2 - vtbl.8 d5, {q3}, d1 - veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0 - veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 - - @ .Lenc_entry ends with a bnz instruction which is normally paired with - @ subs in .Lenc_loop. - tst r8, r8 - b Lenc_entry - -.align 4 -Lenc_loop: - @ middle of middle round - add r10, r11, #0x40 - vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u - vtbl.8 d9, {q13}, d5 - vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] - vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t - vtbl.8 d1, {q12}, d7 - veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u - vtbl.8 d11, {q15}, d5 - veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A - vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t - vtbl.8 d5, {q14}, d7 - vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] - vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B - vtbl.8 d7, {q0}, d3 - veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A - @ Write to q5 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D - vtbl.8 d11, {q0}, d9 - veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B - vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C - vtbl.8 d9, {q3}, d3 - @ Here we restore the original q0/q5 usage. - veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D - and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4 - veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D - subs r8, r8, #1 @ nr-- - -Lenc_entry: - @ top of round - vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i - vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k - vtbl.8 d11, {q11}, d3 - veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j - vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - vtbl.8 d7, {q10}, d1 - vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - vtbl.8 d9, {q10}, d3 - veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - vtbl.8 d5, {q10}, d7 - vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - vtbl.8 d7, {q10}, d9 - veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io - veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 - bne Lenc_loop - - @ middle of last round - add r10, r11, #0x80 - - adr r11, Lk_sbo - @ Read to q1 instead of q4, so the vtbl.8 instruction below does not - @ overlap table and destination registers. - vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou - vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot Lk_sbo+16 - vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - vtbl.8 d9, {q1}, d5 - vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] - @ Write to q2 instead of q0 below, to avoid overlapping table and - @ destination registers. - vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t - vtbl.8 d5, {q0}, d7 - veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A - @ Here we restore the original q0/q2 usage. - vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 - vtbl.8 d1, {q2}, d3 - bx lr - - -.globl _vpaes_encrypt -.private_extern _vpaes_encrypt -#ifdef __thumb2__ -.thumb_func _vpaes_encrypt -#endif -.align 4 -_vpaes_encrypt: - @ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack - @ alignment. - stmdb sp!, {r7,r8,r9,r10,r11,lr} - @ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved. - vstmdb sp!, {d8,d9,d10,d11} - - vld1.64 {q0}, [r0] - bl _vpaes_preheat - bl _vpaes_encrypt_core - vst1.64 {q0}, [r1] - - vldmia sp!, {d8,d9,d10,d11} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return - - -@ -@ Decryption stuff -@ - -.align 4 -_vpaes_decrypt_consts: -Lk_dipt:@ decryption input transform -.quad 0x0F505B040B545F00, 0x154A411E114E451A -.quad 0x86E383E660056500, 0x12771772F491F194 -Lk_dsbo:@ decryption sbox final output -.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D -.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C -Lk_dsb9:@ decryption sbox output *9*u, *9*t -.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 -.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 -Lk_dsbd:@ decryption sbox output *D*u, *D*t -.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 -.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 -Lk_dsbb:@ decryption sbox output *B*u, *B*t -.quad 0xD022649296B44200, 0x602646F6B0F2D404 -.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B -Lk_dsbe:@ decryption sbox output *E*u, *E*t -.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 -.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 - - -@@ -@@ Decryption core -@@ -@@ Same API as encryption core, except it clobbers q12-q15 rather than using -@@ the values from _vpaes_preheat. q9-q11 must still be set from -@@ _vpaes_preheat. -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_decrypt_core -#endif -.align 4 -_vpaes_decrypt_core: - mov r9, r2 - ldr r8, [r2,#240] @ pull rounds - - @ This function performs shuffles with various constants. The x86_64 - @ version loads them on-demand into %xmm0-%xmm5. This does not work well - @ for ARMv7 because those registers are shuffle destinations. The ARMv8 - @ version preloads those constants into registers, but ARMv7 has half - @ the registers to work with. Instead, we load them on-demand into - @ q12-q15, registers normally use for preloaded constants. This is fine - @ because decryption doesn't use those constants. The values are - @ constant, so this does not interfere with potential 2x optimizations. - adr r7, Lk_dipt - - vld1.64 {q12,q13}, [r7] @ vmovdqa Lk_dipt(%rip), %xmm2 # iptlo - lsl r11, r8, #4 @ mov %rax, %r11; shl $4, %r11 - eor r11, r11, #0x30 @ xor $0x30, %r11 - adr r10, Lk_sr - and r11, r11, #0x30 @ and $0x30, %r11 - add r11, r11, r10 - adr r10, Lk_mc_forward+48 - - vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 - vtbl.8 d4, {q12}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 - vtbl.8 d5, {q12}, d3 - vld1.64 {q5}, [r10] @ vmovdqa Lk_mc_forward+48(%rip), %xmm5 - @ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi - vtbl.8 d0, {q13}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 - vtbl.8 d1, {q13}, d1 - veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2 - veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 - - @ .Ldec_entry ends with a bnz instruction which is normally paired with - @ subs in .Ldec_loop. - tst r8, r8 - b Ldec_entry - -.align 4 -Ldec_loop: -@ -@ Inverse mix columns -@ - - @ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of - @ the function. - adr r10, Lk_dsb9 - vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u - @ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t - @ Load sbd* ahead of time. - vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu - @ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt - vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u - vtbl.8 d9, {q12}, d5 - vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t - vtbl.8 d3, {q13}, d7 - veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0 - - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - - @ Load sbb* ahead of time. - vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu - @ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt - - vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu - vtbl.8 d9, {q14}, d5 - @ Write to q1 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch - vtbl.8 d3, {q0}, d11 - @ Here we restore the original q0/q1 usage. This instruction is - @ reordered from the ARMv8 version so we do not clobber the vtbl.8 - @ below. - veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt - vtbl.8 d3, {q15}, d7 - @ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - @ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt - - @ Load sbd* ahead of time. - vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu - @ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet - - vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu - vtbl.8 d9, {q12}, d5 - @ Write to q1 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch - vtbl.8 d3, {q0}, d11 - @ Here we restore the original q0/q1 usage. This instruction is - @ reordered from the ARMv8 version so we do not clobber the vtbl.8 - @ below. - veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt - vtbl.8 d3, {q13}, d7 - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - - vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu - vtbl.8 d9, {q14}, d5 - @ Write to q1 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch - vtbl.8 d3, {q0}, d11 - @ Here we restore the original q0/q1 usage. This instruction is - @ reordered from the ARMv8 version so we do not clobber the vtbl.8 - @ below. - veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet - vtbl.8 d3, {q15}, d7 - vext.8 q5, q5, q5, #12 @ vpalignr $12, %xmm5, %xmm5, %xmm5 - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - subs r8, r8, #1 @ sub $1,%rax # nr-- - -Ldec_entry: - @ top of round - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i - vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - vtbl.8 d5, {q11}, d3 - veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j - vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - vtbl.8 d7, {q10}, d1 - vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - vtbl.8 d9, {q10}, d3 - veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - vtbl.8 d5, {q10}, d7 - vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - vtbl.8 d7, {q10}, d9 - veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io - veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0 - bne Ldec_loop - - @ middle of last round - - adr r10, Lk_dsbo - - @ Write to q1 rather than q4 to avoid overlapping table and destination. - vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou - vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - vtbl.8 d9, {q1}, d5 - @ Write to q2 rather than q1 to avoid overlapping table and destination. - vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot - vtbl.8 d2, {q2}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t - vtbl.8 d3, {q2}, d7 - vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 - veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k - @ Write to q1 rather than q0 so the table and destination registers - @ below do not overlap. - veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A - vtbl.8 d0, {q1}, d4 @ vpshufb %xmm2, %xmm0, %xmm0 - vtbl.8 d1, {q1}, d5 - bx lr - - -.globl _vpaes_decrypt -.private_extern _vpaes_decrypt -#ifdef __thumb2__ -.thumb_func _vpaes_decrypt -#endif -.align 4 -_vpaes_decrypt: - @ _vpaes_decrypt_core uses r7-r11. - stmdb sp!, {r7,r8,r9,r10,r11,lr} - @ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved. - vstmdb sp!, {d8,d9,d10,d11} - - vld1.64 {q0}, [r0] - bl _vpaes_preheat - bl _vpaes_decrypt_core - vst1.64 {q0}, [r1] - - vldmia sp!, {d8,d9,d10,d11} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return - -@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -@@ @@ -@@ AES key schedule @@ -@@ @@ -@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - -@ This function diverges from both x86_64 and armv7 in which constants are -@ pinned. x86_64 has a common preheat function for all operations. aarch64 -@ separates them because it has enough registers to pin nearly all constants. -@ armv7 does not have enough registers, but needing explicit loads and stores -@ also complicates using x86_64's register allocation directly. -@ -@ We pin some constants for convenience and leave q14 and q15 free to load -@ others on demand. - -@ -@ Key schedule constants -@ - -.align 4 -_vpaes_key_consts: -Lk_dksd:@ decryption key schedule: invskew x*D -.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 -.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E -Lk_dksb:@ decryption key schedule: invskew x*B -.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 -.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 -Lk_dkse:@ decryption key schedule: invskew x*E + 0x63 -.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 -.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 -Lk_dks9:@ decryption key schedule: invskew x*9 -.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC -.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE - -Lk_rcon:@ rcon -.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 - -Lk_opt:@ output transform -.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 -.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 -Lk_deskew:@ deskew tables: inverts the sbox's "skew" -.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A -.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 - - -#ifdef __thumb2__ -.thumb_func _vpaes_key_preheat -#endif -.align 4 -_vpaes_key_preheat: - adr r11, Lk_rcon - vmov.i8 q12, #0x5b @ Lk_s63 - adr r10, Lk_inv @ Must be aligned to 8 mod 16. - vmov.i8 q9, #0x0f @ Lk_s0F - vld1.64 {q10,q11}, [r10] @ Lk_inv - vld1.64 {q8}, [r11] @ Lk_rcon - bx lr - - -#ifdef __thumb2__ -.thumb_func _vpaes_schedule_core -#endif -.align 4 -_vpaes_schedule_core: - @ We only need to save lr, but ARM requires an 8-byte stack alignment, - @ so save an extra register. - stmdb sp!, {r3,lr} - - bl _vpaes_key_preheat @ load the tables - - adr r11, Lk_ipt @ Must be aligned to 8 mod 16. - vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned) - - @ input transform - @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not - @ overlap table and destination. - vmov q4, q0 @ vmovdqa %xmm0, %xmm3 - bl _vpaes_schedule_transform - adr r10, Lk_sr @ Must be aligned to 8 mod 16. - vmov q7, q0 @ vmovdqa %xmm0, %xmm7 - - add r8, r8, r10 - tst r3, r3 - bne Lschedule_am_decrypting - - @ encrypting, output zeroth round key after transform - vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) - b Lschedule_go - -Lschedule_am_decrypting: - @ decrypting, output zeroth round key after shiftrows - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 - vtbl.8 d6, {q4}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q4}, d3 - vst1.64 {q3}, [r2] @ vmovdqu %xmm3, (%rdx) - eor r8, r8, #0x30 @ xor $0x30, %r8 - -Lschedule_go: - cmp r1, #192 @ cmp $192, %esi - bhi Lschedule_256 - beq Lschedule_192 - @ 128: fall though - -@@ -@@ .schedule_128 -@@ -@@ 128-bit specific part of key schedule. -@@ -@@ This schedule is really simple, because all its parts -@@ are accomplished by the subroutines. -@@ -Lschedule_128: - mov r0, #10 @ mov $10, %esi - -Loop_schedule_128: - bl _vpaes_schedule_round - subs r0, r0, #1 @ dec %esi - beq Lschedule_mangle_last - bl _vpaes_schedule_mangle @ write output - b Loop_schedule_128 - -@@ -@@ .aes_schedule_192 -@@ -@@ 192-bit specific part of key schedule. -@@ -@@ The main body of this schedule is the same as the 128-bit -@@ schedule, but with more smearing. The long, high side is -@@ stored in q7 as before, and the short, low side is in -@@ the high bits of q6. -@@ -@@ This schedule is somewhat nastier, however, because each -@@ round produces 192 bits of key material, or 1.5 round keys. -@@ Therefore, on each cycle we do 2 rounds and produce 3 round -@@ keys. -@@ -.align 4 -Lschedule_192: - sub r0, r0, #8 - vld1.64 {q0}, [r0] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) - bl _vpaes_schedule_transform @ input transform - vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part - vmov.i8 d12, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4 - @ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros - mov r0, #4 @ mov $4, %esi - -Loop_schedule_192: - bl _vpaes_schedule_round - vext.8 q0, q6, q0, #8 @ vpalignr $8,%xmm6,%xmm0,%xmm0 - bl _vpaes_schedule_mangle @ save key n - bl _vpaes_schedule_192_smear - bl _vpaes_schedule_mangle @ save key n+1 - bl _vpaes_schedule_round - subs r0, r0, #1 @ dec %esi - beq Lschedule_mangle_last - bl _vpaes_schedule_mangle @ save key n+2 - bl _vpaes_schedule_192_smear - b Loop_schedule_192 - -@@ -@@ .aes_schedule_256 -@@ -@@ 256-bit specific part of key schedule. -@@ -@@ The structure here is very similar to the 128-bit -@@ schedule, but with an additional "low side" in -@@ q6. The low side's rounds are the same as the -@@ high side's, except no rcon and no rotation. -@@ -.align 4 -Lschedule_256: - vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) - bl _vpaes_schedule_transform @ input transform - mov r0, #7 @ mov $7, %esi - -Loop_schedule_256: - bl _vpaes_schedule_mangle @ output low result - vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 - - @ high round - bl _vpaes_schedule_round - subs r0, r0, #1 @ dec %esi - beq Lschedule_mangle_last - bl _vpaes_schedule_mangle - - @ low round. swap xmm7 and xmm6 - vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 - vmov.i8 q4, #0 - vmov q5, q7 @ vmovdqa %xmm7, %xmm5 - vmov q7, q6 @ vmovdqa %xmm6, %xmm7 - bl _vpaes_schedule_low_round - vmov q7, q5 @ vmovdqa %xmm5, %xmm7 - - b Loop_schedule_256 - -@@ -@@ .aes_schedule_mangle_last -@@ -@@ Mangler for last round of key schedule -@@ Mangles q0 -@@ when encrypting, outputs out(q0) ^ 63 -@@ when decrypting, outputs unskew(q0) -@@ -@@ Always called right before return... jumps to cleanup and exits -@@ -.align 4 -Lschedule_mangle_last: - @ schedule last round key from xmm0 - adr r11, Lk_deskew @ lea Lk_deskew(%rip),%r11 # prepare to deskew - tst r3, r3 - bne Lschedule_mangle_last_dec - - @ encrypting - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1 - adr r11, Lk_opt @ lea Lk_opt(%rip), %r11 # prepare to output transform - add r2, r2, #32 @ add $32, %rdx - vmov q2, q0 - vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute - vtbl.8 d1, {q2}, d3 - -Lschedule_mangle_last_dec: - sub r2, r2, #16 @ add $-16, %rdx - veor q0, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm0 - bl _vpaes_schedule_transform @ output transform - vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key - - @ cleanup - veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0 - veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 - veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2 - veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3 - veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4 - veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5 - veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6 - veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7 - ldmia sp!, {r3,pc} @ return - - -@@ -@@ .aes_schedule_192_smear -@@ -@@ Smear the short, low side in the 192-bit key schedule. -@@ -@@ Inputs: -@@ q7: high side, b a x y -@@ q6: low side, d c 0 0 -@@ -@@ Outputs: -@@ q6: b+c+d b+c 0 0 -@@ q0: b+c+d b+c b a -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_schedule_192_smear -#endif -.align 4 -_vpaes_schedule_192_smear: - vmov.i8 q1, #0 - vdup.32 q0, d15[1] - vshl.i64 q1, q6, #32 @ vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 - vmov d0, d15 @ vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a - veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 - veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 - veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a - vmov q0, q6 @ vmovdqa %xmm6, %xmm0 - vmov d12, d2 @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros - bx lr - - -@@ -@@ .aes_schedule_round -@@ -@@ Runs one main round of the key schedule on q0, q7 -@@ -@@ Specifically, runs subbytes on the high dword of q0 -@@ then rotates it by one byte and xors into the low dword of -@@ q7. -@@ -@@ Adds rcon from low byte of q8, then rotates q8 for -@@ next rcon. -@@ -@@ Smears the dwords of q7 by xoring the low into the -@@ second low, result into third, result into highest. -@@ -@@ Returns results in q7 = q0. -@@ Clobbers q1-q4, r11. -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_schedule_round -#endif -.align 4 -_vpaes_schedule_round: - @ extract rcon from xmm8 - vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4 - vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1 - vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8 - veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 - - @ rotate - vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 - vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0 - - @ fall through... - - @ low round: same as high round, but no rotation and no rcon. -_vpaes_schedule_low_round: - @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12. - @ We pin other values in _vpaes_key_preheat, so load them now. - adr r11, Lk_sb1 - vld1.64 {q14,q15}, [r11] - - @ smear xmm7 - vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1 - veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 - vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4 - - @ subbytes - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i - veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7 - vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - vtbl.8 d5, {q11}, d3 - veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j - vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - vtbl.8 d7, {q10}, d1 - veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - vtbl.8 d9, {q10}, d3 - veor q7, q7, q12 @ vpxor Lk_s63(%rip), %xmm7, %xmm7 - vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak - vtbl.8 d7, {q10}, d7 - veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak - vtbl.8 d5, {q10}, d9 - veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io - veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo - vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou - vtbl.8 d9, {q15}, d7 - vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t - vtbl.8 d3, {q14}, d5 - veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output - - @ add in smeared stuff - veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0 - veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7 - bx lr - - -@@ -@@ .aes_schedule_transform -@@ -@@ Linear-transform q0 according to tables at [r11] -@@ -@@ Requires that q9 = 0x0F0F... as in preheat -@@ Output in q0 -@@ Clobbers q1, q2, q14, q15 -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_schedule_transform -#endif -.align 4 -_vpaes_schedule_transform: - vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo - @ vmovdqa 16(%r11), %xmm1 # hi - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 - vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d3 - vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 - vtbl.8 d1, {q15}, d1 - veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 - bx lr - - -@@ -@@ .aes_schedule_mangle -@@ -@@ Mangles q0 from (basis-transformed) standard version -@@ to our version. -@@ -@@ On encrypt, -@@ xor with 0x63 -@@ multiply by circulant 0,1,1,1 -@@ apply shiftrows transform -@@ -@@ On decrypt, -@@ xor with 0x63 -@@ multiply by "inverse mixcolumns" circulant E,B,D,9 -@@ deskew -@@ apply shiftrows transform -@@ -@@ -@@ Writes out to [r2], and increments or decrements it -@@ Keeps track of round number mod 4 in r8 -@@ Preserves q0 -@@ Clobbers q1-q5 -@@ -#ifdef __thumb2__ -.thumb_func _vpaes_schedule_mangle -#endif -.align 4 -_vpaes_schedule_mangle: - tst r3, r3 - vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later - adr r11, Lk_mc_forward @ Must be aligned to 8 mod 16. - vld1.64 {q5}, [r11] @ vmovdqa Lk_mc_forward(%rip),%xmm5 - bne Lschedule_mangle_dec - - @ encrypting - @ Write to q2 so we do not overlap table and destination below. - veor q2, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm4 - add r2, r2, #16 @ add $16, %rdx - vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4 - vtbl.8 d9, {q2}, d11 - vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1 - vtbl.8 d3, {q4}, d11 - vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3 - vtbl.8 d7, {q1}, d11 - veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4 - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 - veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3 - - b Lschedule_mangle_both -.align 4 -Lschedule_mangle_dec: - @ inverse mix columns - adr r11, Lk_dksd @ lea Lk_dksd(%rip),%r11 - vshr.u8 q1, q4, #4 @ vpsrlb $4, %xmm4, %xmm1 # 1 = hi - vand q4, q4, q9 @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo - - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2 - @ vmovdqa 0x10(%r11), %xmm3 - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q15}, d3 - @ Load .Lk_dksb ahead of time. - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2 - @ vmovdqa 0x30(%r11), %xmm3 - @ Write to q13 so we do not overlap table and destination. - veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 - vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 - vtbl.8 d7, {q13}, d11 - - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 - vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q15}, d3 - @ Load .Lk_dkse ahead of time. - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2 - @ vmovdqa 0x50(%r11), %xmm3 - @ Write to q13 so we do not overlap table and destination. - veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 - vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 - vtbl.8 d7, {q13}, d11 - - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 - vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q15}, d3 - @ Load .Lk_dkse ahead of time. - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2 - @ vmovdqa 0x70(%r11), %xmm4 - @ Write to q13 so we do not overlap table and destination. - veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 - - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 - vtbl.8 d7, {q13}, d11 - vtbl.8 d8, {q15}, d2 @ vpshufb %xmm1, %xmm4, %xmm4 - vtbl.8 d9, {q15}, d3 - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 - veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 - veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3 - - sub r2, r2, #16 @ add $-16, %rdx - -Lschedule_mangle_both: - @ Write to q2 so table and destination do not overlap. - vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d5, {q3}, d3 - add r8, r8, #64-16 @ add $-16, %r8 - and r8, r8, #~(1<<6) @ and $0x30, %r8 - vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx) - bx lr - - -.globl _vpaes_set_encrypt_key -.private_extern _vpaes_set_encrypt_key -#ifdef __thumb2__ -.thumb_func _vpaes_set_encrypt_key -#endif -.align 4 -_vpaes_set_encrypt_key: - stmdb sp!, {r7,r8,r9,r10,r11, lr} - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - lsr r9, r1, #5 @ shr $5,%eax - add r9, r9, #5 @ $5,%eax - str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - - mov r3, #0 @ mov $0,%ecx - mov r8, #0x30 @ mov $0x30,%r8d - bl _vpaes_schedule_core - eor r0, r0, r0 - - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return - - -.globl _vpaes_set_decrypt_key -.private_extern _vpaes_set_decrypt_key -#ifdef __thumb2__ -.thumb_func _vpaes_set_decrypt_key -#endif -.align 4 -_vpaes_set_decrypt_key: - stmdb sp!, {r7,r8,r9,r10,r11, lr} - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - lsr r9, r1, #5 @ shr $5,%eax - add r9, r9, #5 @ $5,%eax - str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - lsl r9, r9, #4 @ shl $4,%eax - add r2, r2, #16 @ lea 16(%rdx,%rax),%rdx - add r2, r2, r9 - - mov r3, #1 @ mov $1,%ecx - lsr r8, r1, #1 @ shr $1,%r8d - and r8, r8, #32 @ and $32,%r8d - eor r8, r8, #32 @ xor $32,%r8d # nbits==192?0:32 - bl _vpaes_schedule_core - - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return - - -@ Additional constants for converting to bsaes. - -.align 4 -_vpaes_convert_consts: -@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear -@ transform in the AES S-box. 0x63 is incorporated into the low half of the -@ table. This was computed with the following script: -@ -@ def u64s_to_u128(x, y): -@ return x | (y << 64) -@ def u128_to_u64s(w): -@ return w & ((1<<64)-1), w >> 64 -@ def get_byte(w, i): -@ return (w >> (i*8)) & 0xff -@ def apply_table(table, b): -@ lo = b & 0xf -@ hi = b >> 4 -@ return get_byte(table[0], lo) ^ get_byte(table[1], hi) -@ def opt(b): -@ table = [ -@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808), -@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0), -@ ] -@ return apply_table(table, b) -@ def rot_byte(b, n): -@ return 0xff & ((b << n) | (b >> (8-n))) -@ def skew(x): -@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^ -@ rot_byte(x, 4)) -@ table = [0, 0] -@ for i in range(16): -@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8) -@ table[1] |= skew(opt(i<<4)) << (i*8) -@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0])) -@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1])) -Lk_opt_then_skew: -.quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b -.quad 0x1f30062936192f00, 0xb49bad829db284ab - -@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation -@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344 -@ becomes 0x22334411 and then 0x11443322. -Lk_decrypt_transform: -.quad 0x0704050603000102, 0x0f0c0d0e0b08090a - - -@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes); -.globl _vpaes_encrypt_key_to_bsaes -.private_extern _vpaes_encrypt_key_to_bsaes -#ifdef __thumb2__ -.thumb_func _vpaes_encrypt_key_to_bsaes -#endif -.align 4 -_vpaes_encrypt_key_to_bsaes: - stmdb sp!, {r11, lr} - - @ See _vpaes_schedule_core for the key schedule logic. In particular, - @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper), - @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last - @ contain the transformations not in the bsaes representation. This - @ function inverts those transforms. - @ - @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key - @ representation, which does not match the other aes_nohw_* - @ implementations. The ARM aes_nohw_* stores each 32-bit word - @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the - @ cost of extra REV and VREV32 operations in little-endian ARM. - - vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform - adr r2, Lk_mc_forward @ Must be aligned to 8 mod 16. - add r3, r2, 0x90 @ Lk_sr+0x10-Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression) - - vld1.64 {q12}, [r2] - vmov.i8 q10, #0x5b @ Lk_s63 from vpaes-x86_64 - adr r11, Lk_opt @ Must be aligned to 8 mod 16. - vmov.i8 q11, #0x63 @ LK_s63 without Lk_ipt applied - - @ vpaes stores one fewer round count than bsaes, but the number of keys - @ is the same. - ldr r2, [r1,#240] - add r2, r2, #1 - str r2, [r0,#240] - - @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt). - @ Invert this with .Lk_opt. - vld1.64 {q0}, [r1]! - bl _vpaes_schedule_transform - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - - @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied, - @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63, - @ multiplies by the circulant 0,1,1,1, then applies ShiftRows. -Loop_enc_key_to_bsaes: - vld1.64 {q0}, [r1]! - - @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle - @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30. - @ We use r3 rather than r8 to avoid a callee-saved register. - vld1.64 {q1}, [r3] - vtbl.8 d4, {q0}, d2 - vtbl.8 d5, {q0}, d3 - add r3, r3, #16 - and r3, r3, #~(1<<6) - vmov q0, q2 - - @ Handle the last key differently. - subs r2, r2, #1 - beq Loop_enc_key_to_bsaes_last - - @ Multiply by the circulant. This is its own inverse. - vtbl.8 d2, {q0}, d24 - vtbl.8 d3, {q0}, d25 - vmov q0, q1 - vtbl.8 d4, {q1}, d24 - vtbl.8 d5, {q1}, d25 - veor q0, q0, q2 - vtbl.8 d2, {q2}, d24 - vtbl.8 d3, {q2}, d25 - veor q0, q0, q1 - - @ XOR and finish. - veor q0, q0, q10 - bl _vpaes_schedule_transform - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - b Loop_enc_key_to_bsaes - -Loop_enc_key_to_bsaes_last: - @ The final key does not have a basis transform (note - @ .Lschedule_mangle_last inverts the original transform). It only XORs - @ 0x63 and applies ShiftRows. The latter was already inverted in the - @ loop. Note that, because we act on the original representation, we use - @ q11, not q10. - veor q0, q0, q11 - vrev32.8 q0, q0 - vst1.64 {q0}, [r0] - - @ Wipe registers which contained key material. - veor q0, q0, q0 - veor q1, q1, q1 - veor q2, q2, q2 - - ldmia sp!, {r11, pc} @ return - - -@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes); -.globl _vpaes_decrypt_key_to_bsaes -.private_extern _vpaes_decrypt_key_to_bsaes -#ifdef __thumb2__ -.thumb_func _vpaes_decrypt_key_to_bsaes -#endif -.align 4 -_vpaes_decrypt_key_to_bsaes: - stmdb sp!, {r11, lr} - - @ See _vpaes_schedule_core for the key schedule logic. Note vpaes - @ computes the decryption key schedule in reverse. Additionally, - @ aes-x86_64.pl shares some transformations, so we must only partially - @ invert vpaes's transformations. In general, vpaes computes in a - @ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of - @ MixColumns, ShiftRows, and the affine part of the AES S-box (which is - @ split into a linear skew and XOR of 0x63). We undo all but MixColumns. - @ - @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key - @ representation, which does not match the other aes_nohw_* - @ implementations. The ARM aes_nohw_* stores each 32-bit word - @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the - @ cost of extra REV and VREV32 operations in little-endian ARM. - - adr r2, Lk_decrypt_transform - adr r3, Lk_sr+0x30 - adr r11, Lk_opt_then_skew @ Input to _vpaes_schedule_transform. - vld1.64 {q12}, [r2] @ Reuse q12 from encryption. - vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform - - @ vpaes stores one fewer round count than bsaes, but the number of keys - @ is the same. - ldr r2, [r1,#240] - add r2, r2, #1 - str r2, [r0,#240] - - @ Undo the basis change and reapply the S-box affine transform. See - @ .Lschedule_mangle_last. - vld1.64 {q0}, [r1]! - bl _vpaes_schedule_transform - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - - @ See _vpaes_schedule_mangle for the transform on the middle keys. Note - @ it simultaneously inverts MixColumns and the S-box affine transform. - @ See .Lk_dksd through .Lk_dks9. -Loop_dec_key_to_bsaes: - vld1.64 {q0}, [r1]! - - @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going - @ forwards cancels inverting for which direction we cycle r3. We use r3 - @ rather than r8 to avoid a callee-saved register. - vld1.64 {q1}, [r3] - vtbl.8 d4, {q0}, d2 - vtbl.8 d5, {q0}, d3 - add r3, r3, #64-16 - and r3, r3, #~(1<<6) - vmov q0, q2 - - @ Handle the last key differently. - subs r2, r2, #1 - beq Loop_dec_key_to_bsaes_last - - @ Undo the basis change and reapply the S-box affine transform. - bl _vpaes_schedule_transform - - @ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We - @ combine the two operations in .Lk_decrypt_transform. - @ - @ TODO(davidben): Where does the rotation come from? - vtbl.8 d2, {q0}, d24 - vtbl.8 d3, {q0}, d25 - - vst1.64 {q1}, [r0]! - b Loop_dec_key_to_bsaes - -Loop_dec_key_to_bsaes_last: - @ The final key only inverts ShiftRows (already done in the loop). See - @ .Lschedule_am_decrypting. Its basis is not transformed. - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - - @ Wipe registers which contained key material. - veor q0, q0, q0 - veor q1, q1, q1 - veor q2, q2, q2 - - ldmia sp!, {r11, pc} @ return - -.globl _vpaes_ctr32_encrypt_blocks -.private_extern _vpaes_ctr32_encrypt_blocks -#ifdef __thumb2__ -.thumb_func _vpaes_ctr32_encrypt_blocks -#endif -.align 4 -_vpaes_ctr32_encrypt_blocks: - mov ip, sp - stmdb sp!, {r7,r8,r9,r10,r11, lr} - @ This function uses q4-q7 (d8-d15), which are callee-saved. - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - cmp r2, #0 - @ r8 is passed on the stack. - ldr r8, [ip] - beq Lctr32_done - - @ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3. - mov r9, r3 - mov r3, r2 - mov r2, r9 - - @ Load the IV and counter portion. - ldr r7, [r8, #12] - vld1.8 {q7}, [r8] - - bl _vpaes_preheat - rev r7, r7 @ The counter is big-endian. - -Lctr32_loop: - vmov q0, q7 - vld1.8 {q6}, [r0]! @ Load input ahead of time - bl _vpaes_encrypt_core - veor q0, q0, q6 @ XOR input and result - vst1.8 {q0}, [r1]! - subs r3, r3, #1 - @ Update the counter. - add r7, r7, #1 - rev r9, r7 - vmov.32 d15[1], r9 - bne Lctr32_loop - -Lctr32_done: - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return - -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/ios-arm/crypto/test/trampoline-armv4.S b/packager/third_party/boringssl/ios-arm/crypto/test/trampoline-armv4.S deleted file mode 100644 index 51ac249ef5..0000000000 --- a/packager/third_party/boringssl/ios-arm/crypto/test/trampoline-armv4.S +++ /dev/null @@ -1,377 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.syntax unified - - - - -.text - -@ abi_test_trampoline loads callee-saved registers from |state|, calls |func| -@ with |argv|, then saves the callee-saved registers into |state|. It returns -@ the result of |func|. The |unwind| argument is unused. -@ uint32_t abi_test_trampoline(void (*func)(...), CallerState *state, -@ const uint32_t *argv, size_t argc, -@ int unwind); - -.globl _abi_test_trampoline -.private_extern _abi_test_trampoline -.align 4 -_abi_test_trampoline: -Labi_test_trampoline_begin: - @ Save parameters and all callee-saved registers. For convenience, we - @ save r9 on iOS even though it's volatile. - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - stmdb sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} - - @ Reserve stack space for six (10-4) stack parameters, plus an extra 4 - @ bytes to keep it 8-byte-aligned (see AAPCS, section 5.3). - sub sp, sp, #28 - - @ Every register in AAPCS is either non-volatile or a parameter (except - @ r9 on iOS), so this code, by the actual call, loses all its scratch - @ registers. First fill in stack parameters while there are registers - @ to spare. - cmp r3, #4 - bls Lstack_args_done - mov r4, sp @ r4 is the output pointer. - add r5, r2, r3, lsl #2 @ Set r5 to the end of argv. - add r2, r2, #16 @ Skip four arguments. -Lstack_args_loop: - ldr r6, [r2], #4 - cmp r2, r5 - str r6, [r4], #4 - bne Lstack_args_loop - -Lstack_args_done: - @ Load registers from |r1|. - vldmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15} -#if defined(__APPLE__) - @ r9 is not volatile on iOS. - ldmia r1!, {r4,r5,r6,r7,r8,r10-r11} -#else - ldmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11} -#endif - - @ Load register parameters. This uses up our remaining registers, so we - @ repurpose lr as scratch space. - ldr r3, [sp, #40] @ Reload argc. - ldr lr, [sp, #36] @ Load argv into lr. - cmp r3, #3 - bhi Larg_r3 - beq Larg_r2 - cmp r3, #1 - bhi Larg_r1 - beq Larg_r0 - b Largs_done - -Larg_r3: - ldr r3, [lr, #12] @ argv[3] -Larg_r2: - ldr r2, [lr, #8] @ argv[2] -Larg_r1: - ldr r1, [lr, #4] @ argv[1] -Larg_r0: - ldr r0, [lr] @ argv[0] -Largs_done: - - @ With every other register in use, load the function pointer into lr - @ and call the function. - ldr lr, [sp, #28] - blx lr - - @ r1-r3 are free for use again. The trampoline only supports - @ single-return functions. Pass r4-r11 to the caller. - ldr r1, [sp, #32] - vstmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15} -#if defined(__APPLE__) - @ r9 is not volatile on iOS. - stmia r1!, {r4,r5,r6,r7,r8,r10-r11} -#else - stmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11} -#endif - - @ Unwind the stack and restore registers. - add sp, sp, #44 @ 44 = 28+16 - ldmia sp!, {r4,r5,r6,r7,r8,r9,r10,r11,lr} @ Skip r0-r3 (see +16 above). - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - bx lr - - -.globl _abi_test_clobber_r0 -.private_extern _abi_test_clobber_r0 -.align 4 -_abi_test_clobber_r0: - mov r0, #0 - bx lr - - -.globl _abi_test_clobber_r1 -.private_extern _abi_test_clobber_r1 -.align 4 -_abi_test_clobber_r1: - mov r1, #0 - bx lr - - -.globl _abi_test_clobber_r2 -.private_extern _abi_test_clobber_r2 -.align 4 -_abi_test_clobber_r2: - mov r2, #0 - bx lr - - -.globl _abi_test_clobber_r3 -.private_extern _abi_test_clobber_r3 -.align 4 -_abi_test_clobber_r3: - mov r3, #0 - bx lr - - -.globl _abi_test_clobber_r4 -.private_extern _abi_test_clobber_r4 -.align 4 -_abi_test_clobber_r4: - mov r4, #0 - bx lr - - -.globl _abi_test_clobber_r5 -.private_extern _abi_test_clobber_r5 -.align 4 -_abi_test_clobber_r5: - mov r5, #0 - bx lr - - -.globl _abi_test_clobber_r6 -.private_extern _abi_test_clobber_r6 -.align 4 -_abi_test_clobber_r6: - mov r6, #0 - bx lr - - -.globl _abi_test_clobber_r7 -.private_extern _abi_test_clobber_r7 -.align 4 -_abi_test_clobber_r7: - mov r7, #0 - bx lr - - -.globl _abi_test_clobber_r8 -.private_extern _abi_test_clobber_r8 -.align 4 -_abi_test_clobber_r8: - mov r8, #0 - bx lr - - -.globl _abi_test_clobber_r9 -.private_extern _abi_test_clobber_r9 -.align 4 -_abi_test_clobber_r9: - mov r9, #0 - bx lr - - -.globl _abi_test_clobber_r10 -.private_extern _abi_test_clobber_r10 -.align 4 -_abi_test_clobber_r10: - mov r10, #0 - bx lr - - -.globl _abi_test_clobber_r11 -.private_extern _abi_test_clobber_r11 -.align 4 -_abi_test_clobber_r11: - mov r11, #0 - bx lr - - -.globl _abi_test_clobber_r12 -.private_extern _abi_test_clobber_r12 -.align 4 -_abi_test_clobber_r12: - mov r12, #0 - bx lr - - -.globl _abi_test_clobber_d0 -.private_extern _abi_test_clobber_d0 -.align 4 -_abi_test_clobber_d0: - mov r0, #0 - vmov s0, r0 - vmov s1, r0 - bx lr - - -.globl _abi_test_clobber_d1 -.private_extern _abi_test_clobber_d1 -.align 4 -_abi_test_clobber_d1: - mov r0, #0 - vmov s2, r0 - vmov s3, r0 - bx lr - - -.globl _abi_test_clobber_d2 -.private_extern _abi_test_clobber_d2 -.align 4 -_abi_test_clobber_d2: - mov r0, #0 - vmov s4, r0 - vmov s5, r0 - bx lr - - -.globl _abi_test_clobber_d3 -.private_extern _abi_test_clobber_d3 -.align 4 -_abi_test_clobber_d3: - mov r0, #0 - vmov s6, r0 - vmov s7, r0 - bx lr - - -.globl _abi_test_clobber_d4 -.private_extern _abi_test_clobber_d4 -.align 4 -_abi_test_clobber_d4: - mov r0, #0 - vmov s8, r0 - vmov s9, r0 - bx lr - - -.globl _abi_test_clobber_d5 -.private_extern _abi_test_clobber_d5 -.align 4 -_abi_test_clobber_d5: - mov r0, #0 - vmov s10, r0 - vmov s11, r0 - bx lr - - -.globl _abi_test_clobber_d6 -.private_extern _abi_test_clobber_d6 -.align 4 -_abi_test_clobber_d6: - mov r0, #0 - vmov s12, r0 - vmov s13, r0 - bx lr - - -.globl _abi_test_clobber_d7 -.private_extern _abi_test_clobber_d7 -.align 4 -_abi_test_clobber_d7: - mov r0, #0 - vmov s14, r0 - vmov s15, r0 - bx lr - - -.globl _abi_test_clobber_d8 -.private_extern _abi_test_clobber_d8 -.align 4 -_abi_test_clobber_d8: - mov r0, #0 - vmov s16, r0 - vmov s17, r0 - bx lr - - -.globl _abi_test_clobber_d9 -.private_extern _abi_test_clobber_d9 -.align 4 -_abi_test_clobber_d9: - mov r0, #0 - vmov s18, r0 - vmov s19, r0 - bx lr - - -.globl _abi_test_clobber_d10 -.private_extern _abi_test_clobber_d10 -.align 4 -_abi_test_clobber_d10: - mov r0, #0 - vmov s20, r0 - vmov s21, r0 - bx lr - - -.globl _abi_test_clobber_d11 -.private_extern _abi_test_clobber_d11 -.align 4 -_abi_test_clobber_d11: - mov r0, #0 - vmov s22, r0 - vmov s23, r0 - bx lr - - -.globl _abi_test_clobber_d12 -.private_extern _abi_test_clobber_d12 -.align 4 -_abi_test_clobber_d12: - mov r0, #0 - vmov s24, r0 - vmov s25, r0 - bx lr - - -.globl _abi_test_clobber_d13 -.private_extern _abi_test_clobber_d13 -.align 4 -_abi_test_clobber_d13: - mov r0, #0 - vmov s26, r0 - vmov s27, r0 - bx lr - - -.globl _abi_test_clobber_d14 -.private_extern _abi_test_clobber_d14 -.align 4 -_abi_test_clobber_d14: - mov r0, #0 - vmov s28, r0 - vmov s29, r0 - bx lr - - -.globl _abi_test_clobber_d15 -.private_extern _abi_test_clobber_d15 -.align 4 -_abi_test_clobber_d15: - mov r0, #0 - vmov s30, r0 - vmov s31, r0 - bx lr - -#endif // !OPENSSL_NO_ASM diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/chacha/chacha-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/chacha/chacha-armv8.S deleted file mode 100644 index 49449bf532..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/chacha/chacha-armv8.S +++ /dev/null @@ -1,1985 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - - - -.section .rodata - -.align 5 -.Lsigma: -.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral -.Lone: -.long 1,0,0,0 -.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 - -.text - -.globl ChaCha20_ctr32 -.hidden ChaCha20_ctr32 -.type ChaCha20_ctr32,%function -.align 5 -ChaCha20_ctr32: - cbz x2,.Labort -#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 - adrp x5,:pg_hi21_nc:OPENSSL_armcap_P -#else - adrp x5,OPENSSL_armcap_P -#endif - cmp x2,#192 - b.lo .Lshort - ldr w17,[x5,:lo12:OPENSSL_armcap_P] - tst w17,#ARMV7_NEON - b.ne ChaCha20_neon - -.Lshort: - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - - adrp x5,.Lsigma - add x5,x5,:lo12:.Lsigma - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#64 - - ldp x22,x23,[x5] // load sigma - ldp x24,x25,[x3] // load key - ldp x26,x27,[x3,#16] - ldp x28,x30,[x4] // load counter -#ifdef __ARMEB__ - ror x24,x24,#32 - ror x25,x25,#32 - ror x26,x26,#32 - ror x27,x27,#32 - ror x28,x28,#32 - ror x30,x30,#32 -#endif - -.Loop_outer: - mov w5,w22 // unpack key block - lsr x6,x22,#32 - mov w7,w23 - lsr x8,x23,#32 - mov w9,w24 - lsr x10,x24,#32 - mov w11,w25 - lsr x12,x25,#32 - mov w13,w26 - lsr x14,x26,#32 - mov w15,w27 - lsr x16,x27,#32 - mov w17,w28 - lsr x19,x28,#32 - mov w20,w30 - lsr x21,x30,#32 - - mov x4,#10 - subs x2,x2,#64 -.Loop: - sub x4,x4,#1 - add w5,w5,w9 - add w6,w6,w10 - add w7,w7,w11 - add w8,w8,w12 - eor w17,w17,w5 - eor w19,w19,w6 - eor w20,w20,w7 - eor w21,w21,w8 - ror w17,w17,#16 - ror w19,w19,#16 - ror w20,w20,#16 - ror w21,w21,#16 - add w13,w13,w17 - add w14,w14,w19 - add w15,w15,w20 - add w16,w16,w21 - eor w9,w9,w13 - eor w10,w10,w14 - eor w11,w11,w15 - eor w12,w12,w16 - ror w9,w9,#20 - ror w10,w10,#20 - ror w11,w11,#20 - ror w12,w12,#20 - add w5,w5,w9 - add w6,w6,w10 - add w7,w7,w11 - add w8,w8,w12 - eor w17,w17,w5 - eor w19,w19,w6 - eor w20,w20,w7 - eor w21,w21,w8 - ror w17,w17,#24 - ror w19,w19,#24 - ror w20,w20,#24 - ror w21,w21,#24 - add w13,w13,w17 - add w14,w14,w19 - add w15,w15,w20 - add w16,w16,w21 - eor w9,w9,w13 - eor w10,w10,w14 - eor w11,w11,w15 - eor w12,w12,w16 - ror w9,w9,#25 - ror w10,w10,#25 - ror w11,w11,#25 - ror w12,w12,#25 - add w5,w5,w10 - add w6,w6,w11 - add w7,w7,w12 - add w8,w8,w9 - eor w21,w21,w5 - eor w17,w17,w6 - eor w19,w19,w7 - eor w20,w20,w8 - ror w21,w21,#16 - ror w17,w17,#16 - ror w19,w19,#16 - ror w20,w20,#16 - add w15,w15,w21 - add w16,w16,w17 - add w13,w13,w19 - add w14,w14,w20 - eor w10,w10,w15 - eor w11,w11,w16 - eor w12,w12,w13 - eor w9,w9,w14 - ror w10,w10,#20 - ror w11,w11,#20 - ror w12,w12,#20 - ror w9,w9,#20 - add w5,w5,w10 - add w6,w6,w11 - add w7,w7,w12 - add w8,w8,w9 - eor w21,w21,w5 - eor w17,w17,w6 - eor w19,w19,w7 - eor w20,w20,w8 - ror w21,w21,#24 - ror w17,w17,#24 - ror w19,w19,#24 - ror w20,w20,#24 - add w15,w15,w21 - add w16,w16,w17 - add w13,w13,w19 - add w14,w14,w20 - eor w10,w10,w15 - eor w11,w11,w16 - eor w12,w12,w13 - eor w9,w9,w14 - ror w10,w10,#25 - ror w11,w11,#25 - ror w12,w12,#25 - ror w9,w9,#25 - cbnz x4,.Loop - - add w5,w5,w22 // accumulate key block - add x6,x6,x22,lsr#32 - add w7,w7,w23 - add x8,x8,x23,lsr#32 - add w9,w9,w24 - add x10,x10,x24,lsr#32 - add w11,w11,w25 - add x12,x12,x25,lsr#32 - add w13,w13,w26 - add x14,x14,x26,lsr#32 - add w15,w15,w27 - add x16,x16,x27,lsr#32 - add w17,w17,w28 - add x19,x19,x28,lsr#32 - add w20,w20,w30 - add x21,x21,x30,lsr#32 - - b.lo .Ltail - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor x15,x15,x16 - eor x17,x17,x19 - eor x20,x20,x21 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#1 // increment counter - stp x9,x11,[x0,#16] - stp x13,x15,[x0,#32] - stp x17,x20,[x0,#48] - add x0,x0,#64 - - b.hi .Loop_outer - - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 -.Labort: - ret - -.align 4 -.Ltail: - add x2,x2,#64 -.Less_than_64: - sub x0,x0,#1 - add x1,x1,x2 - add x0,x0,x2 - add x4,sp,x2 - neg x2,x2 - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - stp x5,x7,[sp,#0] - stp x9,x11,[sp,#16] - stp x13,x15,[sp,#32] - stp x17,x20,[sp,#48] - -.Loop_tail: - ldrb w10,[x1,x2] - ldrb w11,[x4,x2] - add x2,x2,#1 - eor w10,w10,w11 - strb w10,[x0,x2] - cbnz x2,.Loop_tail - - stp xzr,xzr,[sp,#0] - stp xzr,xzr,[sp,#16] - stp xzr,xzr,[sp,#32] - stp xzr,xzr,[sp,#48] - - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret -.size ChaCha20_ctr32,.-ChaCha20_ctr32 - -.type ChaCha20_neon,%function -.align 5 -ChaCha20_neon: - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - - adrp x5,.Lsigma - add x5,x5,:lo12:.Lsigma - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - cmp x2,#512 - b.hs .L512_or_more_neon - - sub sp,sp,#64 - - ldp x22,x23,[x5] // load sigma - ld1 {v24.4s},[x5],#16 - ldp x24,x25,[x3] // load key - ldp x26,x27,[x3,#16] - ld1 {v25.4s,v26.4s},[x3] - ldp x28,x30,[x4] // load counter - ld1 {v27.4s},[x4] - ld1 {v31.4s},[x5] -#ifdef __ARMEB__ - rev64 v24.4s,v24.4s - ror x24,x24,#32 - ror x25,x25,#32 - ror x26,x26,#32 - ror x27,x27,#32 - ror x28,x28,#32 - ror x30,x30,#32 -#endif - add v27.4s,v27.4s,v31.4s // += 1 - add v28.4s,v27.4s,v31.4s - add v29.4s,v28.4s,v31.4s - shl v31.4s,v31.4s,#2 // 1 -> 4 - -.Loop_outer_neon: - mov w5,w22 // unpack key block - lsr x6,x22,#32 - mov v0.16b,v24.16b - mov w7,w23 - lsr x8,x23,#32 - mov v4.16b,v24.16b - mov w9,w24 - lsr x10,x24,#32 - mov v16.16b,v24.16b - mov w11,w25 - mov v1.16b,v25.16b - lsr x12,x25,#32 - mov v5.16b,v25.16b - mov w13,w26 - mov v17.16b,v25.16b - lsr x14,x26,#32 - mov v3.16b,v27.16b - mov w15,w27 - mov v7.16b,v28.16b - lsr x16,x27,#32 - mov v19.16b,v29.16b - mov w17,w28 - mov v2.16b,v26.16b - lsr x19,x28,#32 - mov v6.16b,v26.16b - mov w20,w30 - mov v18.16b,v26.16b - lsr x21,x30,#32 - - mov x4,#10 - subs x2,x2,#256 -.Loop_neon: - sub x4,x4,#1 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v16.4s,v16.4s,v17.4s - add w7,w7,w11 - eor v3.16b,v3.16b,v0.16b - add w8,w8,w12 - eor v7.16b,v7.16b,v4.16b - eor w17,w17,w5 - eor v19.16b,v19.16b,v16.16b - eor w19,w19,w6 - rev32 v3.8h,v3.8h - eor w20,w20,w7 - rev32 v7.8h,v7.8h - eor w21,w21,w8 - rev32 v19.8h,v19.8h - ror w17,w17,#16 - add v2.4s,v2.4s,v3.4s - ror w19,w19,#16 - add v6.4s,v6.4s,v7.4s - ror w20,w20,#16 - add v18.4s,v18.4s,v19.4s - ror w21,w21,#16 - eor v20.16b,v1.16b,v2.16b - add w13,w13,w17 - eor v21.16b,v5.16b,v6.16b - add w14,w14,w19 - eor v22.16b,v17.16b,v18.16b - add w15,w15,w20 - ushr v1.4s,v20.4s,#20 - add w16,w16,w21 - ushr v5.4s,v21.4s,#20 - eor w9,w9,w13 - ushr v17.4s,v22.4s,#20 - eor w10,w10,w14 - sli v1.4s,v20.4s,#12 - eor w11,w11,w15 - sli v5.4s,v21.4s,#12 - eor w12,w12,w16 - sli v17.4s,v22.4s,#12 - ror w9,w9,#20 - add v0.4s,v0.4s,v1.4s - ror w10,w10,#20 - add v4.4s,v4.4s,v5.4s - ror w11,w11,#20 - add v16.4s,v16.4s,v17.4s - ror w12,w12,#20 - eor v20.16b,v3.16b,v0.16b - add w5,w5,w9 - eor v21.16b,v7.16b,v4.16b - add w6,w6,w10 - eor v22.16b,v19.16b,v16.16b - add w7,w7,w11 - ushr v3.4s,v20.4s,#24 - add w8,w8,w12 - ushr v7.4s,v21.4s,#24 - eor w17,w17,w5 - ushr v19.4s,v22.4s,#24 - eor w19,w19,w6 - sli v3.4s,v20.4s,#8 - eor w20,w20,w7 - sli v7.4s,v21.4s,#8 - eor w21,w21,w8 - sli v19.4s,v22.4s,#8 - ror w17,w17,#24 - add v2.4s,v2.4s,v3.4s - ror w19,w19,#24 - add v6.4s,v6.4s,v7.4s - ror w20,w20,#24 - add v18.4s,v18.4s,v19.4s - ror w21,w21,#24 - eor v20.16b,v1.16b,v2.16b - add w13,w13,w17 - eor v21.16b,v5.16b,v6.16b - add w14,w14,w19 - eor v22.16b,v17.16b,v18.16b - add w15,w15,w20 - ushr v1.4s,v20.4s,#25 - add w16,w16,w21 - ushr v5.4s,v21.4s,#25 - eor w9,w9,w13 - ushr v17.4s,v22.4s,#25 - eor w10,w10,w14 - sli v1.4s,v20.4s,#7 - eor w11,w11,w15 - sli v5.4s,v21.4s,#7 - eor w12,w12,w16 - sli v17.4s,v22.4s,#7 - ror w9,w9,#25 - ext v2.16b,v2.16b,v2.16b,#8 - ror w10,w10,#25 - ext v6.16b,v6.16b,v6.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v3.16b,v3.16b,v3.16b,#12 - ext v7.16b,v7.16b,v7.16b,#12 - ext v19.16b,v19.16b,v19.16b,#12 - ext v1.16b,v1.16b,v1.16b,#4 - ext v5.16b,v5.16b,v5.16b,#4 - ext v17.16b,v17.16b,v17.16b,#4 - add v0.4s,v0.4s,v1.4s - add w5,w5,w10 - add v4.4s,v4.4s,v5.4s - add w6,w6,w11 - add v16.4s,v16.4s,v17.4s - add w7,w7,w12 - eor v3.16b,v3.16b,v0.16b - add w8,w8,w9 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w5 - eor v19.16b,v19.16b,v16.16b - eor w17,w17,w6 - rev32 v3.8h,v3.8h - eor w19,w19,w7 - rev32 v7.8h,v7.8h - eor w20,w20,w8 - rev32 v19.8h,v19.8h - ror w21,w21,#16 - add v2.4s,v2.4s,v3.4s - ror w17,w17,#16 - add v6.4s,v6.4s,v7.4s - ror w19,w19,#16 - add v18.4s,v18.4s,v19.4s - ror w20,w20,#16 - eor v20.16b,v1.16b,v2.16b - add w15,w15,w21 - eor v21.16b,v5.16b,v6.16b - add w16,w16,w17 - eor v22.16b,v17.16b,v18.16b - add w13,w13,w19 - ushr v1.4s,v20.4s,#20 - add w14,w14,w20 - ushr v5.4s,v21.4s,#20 - eor w10,w10,w15 - ushr v17.4s,v22.4s,#20 - eor w11,w11,w16 - sli v1.4s,v20.4s,#12 - eor w12,w12,w13 - sli v5.4s,v21.4s,#12 - eor w9,w9,w14 - sli v17.4s,v22.4s,#12 - ror w10,w10,#20 - add v0.4s,v0.4s,v1.4s - ror w11,w11,#20 - add v4.4s,v4.4s,v5.4s - ror w12,w12,#20 - add v16.4s,v16.4s,v17.4s - ror w9,w9,#20 - eor v20.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v21.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v22.16b,v19.16b,v16.16b - add w7,w7,w12 - ushr v3.4s,v20.4s,#24 - add w8,w8,w9 - ushr v7.4s,v21.4s,#24 - eor w21,w21,w5 - ushr v19.4s,v22.4s,#24 - eor w17,w17,w6 - sli v3.4s,v20.4s,#8 - eor w19,w19,w7 - sli v7.4s,v21.4s,#8 - eor w20,w20,w8 - sli v19.4s,v22.4s,#8 - ror w21,w21,#24 - add v2.4s,v2.4s,v3.4s - ror w17,w17,#24 - add v6.4s,v6.4s,v7.4s - ror w19,w19,#24 - add v18.4s,v18.4s,v19.4s - ror w20,w20,#24 - eor v20.16b,v1.16b,v2.16b - add w15,w15,w21 - eor v21.16b,v5.16b,v6.16b - add w16,w16,w17 - eor v22.16b,v17.16b,v18.16b - add w13,w13,w19 - ushr v1.4s,v20.4s,#25 - add w14,w14,w20 - ushr v5.4s,v21.4s,#25 - eor w10,w10,w15 - ushr v17.4s,v22.4s,#25 - eor w11,w11,w16 - sli v1.4s,v20.4s,#7 - eor w12,w12,w13 - sli v5.4s,v21.4s,#7 - eor w9,w9,w14 - sli v17.4s,v22.4s,#7 - ror w10,w10,#25 - ext v2.16b,v2.16b,v2.16b,#8 - ror w11,w11,#25 - ext v6.16b,v6.16b,v6.16b,#8 - ror w12,w12,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#4 - ext v7.16b,v7.16b,v7.16b,#4 - ext v19.16b,v19.16b,v19.16b,#4 - ext v1.16b,v1.16b,v1.16b,#12 - ext v5.16b,v5.16b,v5.16b,#12 - ext v17.16b,v17.16b,v17.16b,#12 - cbnz x4,.Loop_neon - - add w5,w5,w22 // accumulate key block - add v0.4s,v0.4s,v24.4s - add x6,x6,x22,lsr#32 - add v4.4s,v4.4s,v24.4s - add w7,w7,w23 - add v16.4s,v16.4s,v24.4s - add x8,x8,x23,lsr#32 - add v2.4s,v2.4s,v26.4s - add w9,w9,w24 - add v6.4s,v6.4s,v26.4s - add x10,x10,x24,lsr#32 - add v18.4s,v18.4s,v26.4s - add w11,w11,w25 - add v3.4s,v3.4s,v27.4s - add x12,x12,x25,lsr#32 - add w13,w13,w26 - add v7.4s,v7.4s,v28.4s - add x14,x14,x26,lsr#32 - add w15,w15,w27 - add v19.4s,v19.4s,v29.4s - add x16,x16,x27,lsr#32 - add w17,w17,w28 - add v1.4s,v1.4s,v25.4s - add x19,x19,x28,lsr#32 - add w20,w20,w30 - add v5.4s,v5.4s,v25.4s - add x21,x21,x30,lsr#32 - add v17.4s,v17.4s,v25.4s - - b.lo .Ltail_neon - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor v0.16b,v0.16b,v20.16b - eor x15,x15,x16 - eor v1.16b,v1.16b,v21.16b - eor x17,x17,x19 - eor v2.16b,v2.16b,v22.16b - eor x20,x20,x21 - eor v3.16b,v3.16b,v23.16b - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#4 // increment counter - stp x9,x11,[x0,#16] - add v27.4s,v27.4s,v31.4s // += 4 - stp x13,x15,[x0,#32] - add v28.4s,v28.4s,v31.4s - stp x17,x20,[x0,#48] - add v29.4s,v29.4s,v31.4s - add x0,x0,#64 - - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 - ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 - - eor v4.16b,v4.16b,v20.16b - eor v5.16b,v5.16b,v21.16b - eor v6.16b,v6.16b,v22.16b - eor v7.16b,v7.16b,v23.16b - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 - - eor v16.16b,v16.16b,v0.16b - eor v17.16b,v17.16b,v1.16b - eor v18.16b,v18.16b,v2.16b - eor v19.16b,v19.16b,v3.16b - st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 - - b.hi .Loop_outer_neon - - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret - -.Ltail_neon: - add x2,x2,#256 - cmp x2,#64 - b.lo .Less_than_64 - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor x15,x15,x16 - eor x17,x17,x19 - eor x20,x20,x21 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#4 // increment counter - stp x9,x11,[x0,#16] - stp x13,x15,[x0,#32] - stp x17,x20,[x0,#48] - add x0,x0,#64 - b.eq .Ldone_neon - sub x2,x2,#64 - cmp x2,#64 - b.lo .Less_than_128 - - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - eor v0.16b,v0.16b,v20.16b - eor v1.16b,v1.16b,v21.16b - eor v2.16b,v2.16b,v22.16b - eor v3.16b,v3.16b,v23.16b - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 - b.eq .Ldone_neon - sub x2,x2,#64 - cmp x2,#64 - b.lo .Less_than_192 - - ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 - eor v4.16b,v4.16b,v20.16b - eor v5.16b,v5.16b,v21.16b - eor v6.16b,v6.16b,v22.16b - eor v7.16b,v7.16b,v23.16b - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 - b.eq .Ldone_neon - sub x2,x2,#64 - - st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] - b .Last_neon - -.Less_than_128: - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] - b .Last_neon -.Less_than_192: - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] - b .Last_neon - -.align 4 -.Last_neon: - sub x0,x0,#1 - add x1,x1,x2 - add x0,x0,x2 - add x4,sp,x2 - neg x2,x2 - -.Loop_tail_neon: - ldrb w10,[x1,x2] - ldrb w11,[x4,x2] - add x2,x2,#1 - eor w10,w10,w11 - strb w10,[x0,x2] - cbnz x2,.Loop_tail_neon - - stp xzr,xzr,[sp,#0] - stp xzr,xzr,[sp,#16] - stp xzr,xzr,[sp,#32] - stp xzr,xzr,[sp,#48] - -.Ldone_neon: - ldp x19,x20,[x29,#16] - add sp,sp,#64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret -.size ChaCha20_neon,.-ChaCha20_neon -.type ChaCha20_512_neon,%function -.align 5 -ChaCha20_512_neon: - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - - adrp x5,.Lsigma - add x5,x5,:lo12:.Lsigma - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - -.L512_or_more_neon: - sub sp,sp,#128+64 - - ldp x22,x23,[x5] // load sigma - ld1 {v24.4s},[x5],#16 - ldp x24,x25,[x3] // load key - ldp x26,x27,[x3,#16] - ld1 {v25.4s,v26.4s},[x3] - ldp x28,x30,[x4] // load counter - ld1 {v27.4s},[x4] - ld1 {v31.4s},[x5] -#ifdef __ARMEB__ - rev64 v24.4s,v24.4s - ror x24,x24,#32 - ror x25,x25,#32 - ror x26,x26,#32 - ror x27,x27,#32 - ror x28,x28,#32 - ror x30,x30,#32 -#endif - add v27.4s,v27.4s,v31.4s // += 1 - stp q24,q25,[sp,#0] // off-load key block, invariant part - add v27.4s,v27.4s,v31.4s // not typo - str q26,[sp,#32] - add v28.4s,v27.4s,v31.4s - add v29.4s,v28.4s,v31.4s - add v30.4s,v29.4s,v31.4s - shl v31.4s,v31.4s,#2 // 1 -> 4 - - stp d8,d9,[sp,#128+0] // meet ABI requirements - stp d10,d11,[sp,#128+16] - stp d12,d13,[sp,#128+32] - stp d14,d15,[sp,#128+48] - - sub x2,x2,#512 // not typo - -.Loop_outer_512_neon: - mov v0.16b,v24.16b - mov v4.16b,v24.16b - mov v8.16b,v24.16b - mov v12.16b,v24.16b - mov v16.16b,v24.16b - mov v20.16b,v24.16b - mov v1.16b,v25.16b - mov w5,w22 // unpack key block - mov v5.16b,v25.16b - lsr x6,x22,#32 - mov v9.16b,v25.16b - mov w7,w23 - mov v13.16b,v25.16b - lsr x8,x23,#32 - mov v17.16b,v25.16b - mov w9,w24 - mov v21.16b,v25.16b - lsr x10,x24,#32 - mov v3.16b,v27.16b - mov w11,w25 - mov v7.16b,v28.16b - lsr x12,x25,#32 - mov v11.16b,v29.16b - mov w13,w26 - mov v15.16b,v30.16b - lsr x14,x26,#32 - mov v2.16b,v26.16b - mov w15,w27 - mov v6.16b,v26.16b - lsr x16,x27,#32 - add v19.4s,v3.4s,v31.4s // +4 - mov w17,w28 - add v23.4s,v7.4s,v31.4s // +4 - lsr x19,x28,#32 - mov v10.16b,v26.16b - mov w20,w30 - mov v14.16b,v26.16b - lsr x21,x30,#32 - mov v18.16b,v26.16b - stp q27,q28,[sp,#48] // off-load key block, variable part - mov v22.16b,v26.16b - str q29,[sp,#80] - - mov x4,#5 - subs x2,x2,#512 -.Loop_upper_neon: - sub x4,x4,#1 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#12 - ext v7.16b,v7.16b,v7.16b,#12 - ext v11.16b,v11.16b,v11.16b,#12 - ext v15.16b,v15.16b,v15.16b,#12 - ext v19.16b,v19.16b,v19.16b,#12 - ext v23.16b,v23.16b,v23.16b,#12 - ext v1.16b,v1.16b,v1.16b,#4 - ext v5.16b,v5.16b,v5.16b,#4 - ext v9.16b,v9.16b,v9.16b,#4 - ext v13.16b,v13.16b,v13.16b,#4 - ext v17.16b,v17.16b,v17.16b,#4 - ext v21.16b,v21.16b,v21.16b,#4 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#4 - ext v7.16b,v7.16b,v7.16b,#4 - ext v11.16b,v11.16b,v11.16b,#4 - ext v15.16b,v15.16b,v15.16b,#4 - ext v19.16b,v19.16b,v19.16b,#4 - ext v23.16b,v23.16b,v23.16b,#4 - ext v1.16b,v1.16b,v1.16b,#12 - ext v5.16b,v5.16b,v5.16b,#12 - ext v9.16b,v9.16b,v9.16b,#12 - ext v13.16b,v13.16b,v13.16b,#12 - ext v17.16b,v17.16b,v17.16b,#12 - ext v21.16b,v21.16b,v21.16b,#12 - cbnz x4,.Loop_upper_neon - - add w5,w5,w22 // accumulate key block - add x6,x6,x22,lsr#32 - add w7,w7,w23 - add x8,x8,x23,lsr#32 - add w9,w9,w24 - add x10,x10,x24,lsr#32 - add w11,w11,w25 - add x12,x12,x25,lsr#32 - add w13,w13,w26 - add x14,x14,x26,lsr#32 - add w15,w15,w27 - add x16,x16,x27,lsr#32 - add w17,w17,w28 - add x19,x19,x28,lsr#32 - add w20,w20,w30 - add x21,x21,x30,lsr#32 - - add x5,x5,x6,lsl#32 // pack - add x7,x7,x8,lsl#32 - ldp x6,x8,[x1,#0] // load input - add x9,x9,x10,lsl#32 - add x11,x11,x12,lsl#32 - ldp x10,x12,[x1,#16] - add x13,x13,x14,lsl#32 - add x15,x15,x16,lsl#32 - ldp x14,x16,[x1,#32] - add x17,x17,x19,lsl#32 - add x20,x20,x21,lsl#32 - ldp x19,x21,[x1,#48] - add x1,x1,#64 -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor x15,x15,x16 - eor x17,x17,x19 - eor x20,x20,x21 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#1 // increment counter - mov w5,w22 // unpack key block - lsr x6,x22,#32 - stp x9,x11,[x0,#16] - mov w7,w23 - lsr x8,x23,#32 - stp x13,x15,[x0,#32] - mov w9,w24 - lsr x10,x24,#32 - stp x17,x20,[x0,#48] - add x0,x0,#64 - mov w11,w25 - lsr x12,x25,#32 - mov w13,w26 - lsr x14,x26,#32 - mov w15,w27 - lsr x16,x27,#32 - mov w17,w28 - lsr x19,x28,#32 - mov w20,w30 - lsr x21,x30,#32 - - mov x4,#5 -.Loop_lower_neon: - sub x4,x4,#1 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#12 - ext v7.16b,v7.16b,v7.16b,#12 - ext v11.16b,v11.16b,v11.16b,#12 - ext v15.16b,v15.16b,v15.16b,#12 - ext v19.16b,v19.16b,v19.16b,#12 - ext v23.16b,v23.16b,v23.16b,#12 - ext v1.16b,v1.16b,v1.16b,#4 - ext v5.16b,v5.16b,v5.16b,#4 - ext v9.16b,v9.16b,v9.16b,#4 - ext v13.16b,v13.16b,v13.16b,#4 - ext v17.16b,v17.16b,v17.16b,#4 - ext v21.16b,v21.16b,v21.16b,#4 - add v0.4s,v0.4s,v1.4s - add w5,w5,w9 - add v4.4s,v4.4s,v5.4s - add w6,w6,w10 - add v8.4s,v8.4s,v9.4s - add w7,w7,w11 - add v12.4s,v12.4s,v13.4s - add w8,w8,w12 - add v16.4s,v16.4s,v17.4s - eor w17,w17,w5 - add v20.4s,v20.4s,v21.4s - eor w19,w19,w6 - eor v3.16b,v3.16b,v0.16b - eor w20,w20,w7 - eor v7.16b,v7.16b,v4.16b - eor w21,w21,w8 - eor v11.16b,v11.16b,v8.16b - ror w17,w17,#16 - eor v15.16b,v15.16b,v12.16b - ror w19,w19,#16 - eor v19.16b,v19.16b,v16.16b - ror w20,w20,#16 - eor v23.16b,v23.16b,v20.16b - ror w21,w21,#16 - rev32 v3.8h,v3.8h - add w13,w13,w17 - rev32 v7.8h,v7.8h - add w14,w14,w19 - rev32 v11.8h,v11.8h - add w15,w15,w20 - rev32 v15.8h,v15.8h - add w16,w16,w21 - rev32 v19.8h,v19.8h - eor w9,w9,w13 - rev32 v23.8h,v23.8h - eor w10,w10,w14 - add v2.4s,v2.4s,v3.4s - eor w11,w11,w15 - add v6.4s,v6.4s,v7.4s - eor w12,w12,w16 - add v10.4s,v10.4s,v11.4s - ror w9,w9,#20 - add v14.4s,v14.4s,v15.4s - ror w10,w10,#20 - add v18.4s,v18.4s,v19.4s - ror w11,w11,#20 - add v22.4s,v22.4s,v23.4s - ror w12,w12,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w9 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w10 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w11 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w12 - eor v28.16b,v17.16b,v18.16b - eor w17,w17,w5 - eor v29.16b,v21.16b,v22.16b - eor w19,w19,w6 - ushr v1.4s,v24.4s,#20 - eor w20,w20,w7 - ushr v5.4s,v25.4s,#20 - eor w21,w21,w8 - ushr v9.4s,v26.4s,#20 - ror w17,w17,#24 - ushr v13.4s,v27.4s,#20 - ror w19,w19,#24 - ushr v17.4s,v28.4s,#20 - ror w20,w20,#24 - ushr v21.4s,v29.4s,#20 - ror w21,w21,#24 - sli v1.4s,v24.4s,#12 - add w13,w13,w17 - sli v5.4s,v25.4s,#12 - add w14,w14,w19 - sli v9.4s,v26.4s,#12 - add w15,w15,w20 - sli v13.4s,v27.4s,#12 - add w16,w16,w21 - sli v17.4s,v28.4s,#12 - eor w9,w9,w13 - sli v21.4s,v29.4s,#12 - eor w10,w10,w14 - add v0.4s,v0.4s,v1.4s - eor w11,w11,w15 - add v4.4s,v4.4s,v5.4s - eor w12,w12,w16 - add v8.4s,v8.4s,v9.4s - ror w9,w9,#25 - add v12.4s,v12.4s,v13.4s - ror w10,w10,#25 - add v16.4s,v16.4s,v17.4s - ror w11,w11,#25 - add v20.4s,v20.4s,v21.4s - ror w12,w12,#25 - eor v24.16b,v3.16b,v0.16b - add w5,w5,w10 - eor v25.16b,v7.16b,v4.16b - add w6,w6,w11 - eor v26.16b,v11.16b,v8.16b - add w7,w7,w12 - eor v27.16b,v15.16b,v12.16b - add w8,w8,w9 - eor v28.16b,v19.16b,v16.16b - eor w21,w21,w5 - eor v29.16b,v23.16b,v20.16b - eor w17,w17,w6 - ushr v3.4s,v24.4s,#24 - eor w19,w19,w7 - ushr v7.4s,v25.4s,#24 - eor w20,w20,w8 - ushr v11.4s,v26.4s,#24 - ror w21,w21,#16 - ushr v15.4s,v27.4s,#24 - ror w17,w17,#16 - ushr v19.4s,v28.4s,#24 - ror w19,w19,#16 - ushr v23.4s,v29.4s,#24 - ror w20,w20,#16 - sli v3.4s,v24.4s,#8 - add w15,w15,w21 - sli v7.4s,v25.4s,#8 - add w16,w16,w17 - sli v11.4s,v26.4s,#8 - add w13,w13,w19 - sli v15.4s,v27.4s,#8 - add w14,w14,w20 - sli v19.4s,v28.4s,#8 - eor w10,w10,w15 - sli v23.4s,v29.4s,#8 - eor w11,w11,w16 - add v2.4s,v2.4s,v3.4s - eor w12,w12,w13 - add v6.4s,v6.4s,v7.4s - eor w9,w9,w14 - add v10.4s,v10.4s,v11.4s - ror w10,w10,#20 - add v14.4s,v14.4s,v15.4s - ror w11,w11,#20 - add v18.4s,v18.4s,v19.4s - ror w12,w12,#20 - add v22.4s,v22.4s,v23.4s - ror w9,w9,#20 - eor v24.16b,v1.16b,v2.16b - add w5,w5,w10 - eor v25.16b,v5.16b,v6.16b - add w6,w6,w11 - eor v26.16b,v9.16b,v10.16b - add w7,w7,w12 - eor v27.16b,v13.16b,v14.16b - add w8,w8,w9 - eor v28.16b,v17.16b,v18.16b - eor w21,w21,w5 - eor v29.16b,v21.16b,v22.16b - eor w17,w17,w6 - ushr v1.4s,v24.4s,#25 - eor w19,w19,w7 - ushr v5.4s,v25.4s,#25 - eor w20,w20,w8 - ushr v9.4s,v26.4s,#25 - ror w21,w21,#24 - ushr v13.4s,v27.4s,#25 - ror w17,w17,#24 - ushr v17.4s,v28.4s,#25 - ror w19,w19,#24 - ushr v21.4s,v29.4s,#25 - ror w20,w20,#24 - sli v1.4s,v24.4s,#7 - add w15,w15,w21 - sli v5.4s,v25.4s,#7 - add w16,w16,w17 - sli v9.4s,v26.4s,#7 - add w13,w13,w19 - sli v13.4s,v27.4s,#7 - add w14,w14,w20 - sli v17.4s,v28.4s,#7 - eor w10,w10,w15 - sli v21.4s,v29.4s,#7 - eor w11,w11,w16 - ext v2.16b,v2.16b,v2.16b,#8 - eor w12,w12,w13 - ext v6.16b,v6.16b,v6.16b,#8 - eor w9,w9,w14 - ext v10.16b,v10.16b,v10.16b,#8 - ror w10,w10,#25 - ext v14.16b,v14.16b,v14.16b,#8 - ror w11,w11,#25 - ext v18.16b,v18.16b,v18.16b,#8 - ror w12,w12,#25 - ext v22.16b,v22.16b,v22.16b,#8 - ror w9,w9,#25 - ext v3.16b,v3.16b,v3.16b,#4 - ext v7.16b,v7.16b,v7.16b,#4 - ext v11.16b,v11.16b,v11.16b,#4 - ext v15.16b,v15.16b,v15.16b,#4 - ext v19.16b,v19.16b,v19.16b,#4 - ext v23.16b,v23.16b,v23.16b,#4 - ext v1.16b,v1.16b,v1.16b,#12 - ext v5.16b,v5.16b,v5.16b,#12 - ext v9.16b,v9.16b,v9.16b,#12 - ext v13.16b,v13.16b,v13.16b,#12 - ext v17.16b,v17.16b,v17.16b,#12 - ext v21.16b,v21.16b,v21.16b,#12 - cbnz x4,.Loop_lower_neon - - add w5,w5,w22 // accumulate key block - ldp q24,q25,[sp,#0] - add x6,x6,x22,lsr#32 - ldp q26,q27,[sp,#32] - add w7,w7,w23 - ldp q28,q29,[sp,#64] - add x8,x8,x23,lsr#32 - add v0.4s,v0.4s,v24.4s - add w9,w9,w24 - add v4.4s,v4.4s,v24.4s - add x10,x10,x24,lsr#32 - add v8.4s,v8.4s,v24.4s - add w11,w11,w25 - add v12.4s,v12.4s,v24.4s - add x12,x12,x25,lsr#32 - add v16.4s,v16.4s,v24.4s - add w13,w13,w26 - add v20.4s,v20.4s,v24.4s - add x14,x14,x26,lsr#32 - add v2.4s,v2.4s,v26.4s - add w15,w15,w27 - add v6.4s,v6.4s,v26.4s - add x16,x16,x27,lsr#32 - add v10.4s,v10.4s,v26.4s - add w17,w17,w28 - add v14.4s,v14.4s,v26.4s - add x19,x19,x28,lsr#32 - add v18.4s,v18.4s,v26.4s - add w20,w20,w30 - add v22.4s,v22.4s,v26.4s - add x21,x21,x30,lsr#32 - add v19.4s,v19.4s,v31.4s // +4 - add x5,x5,x6,lsl#32 // pack - add v23.4s,v23.4s,v31.4s // +4 - add x7,x7,x8,lsl#32 - add v3.4s,v3.4s,v27.4s - ldp x6,x8,[x1,#0] // load input - add v7.4s,v7.4s,v28.4s - add x9,x9,x10,lsl#32 - add v11.4s,v11.4s,v29.4s - add x11,x11,x12,lsl#32 - add v15.4s,v15.4s,v30.4s - ldp x10,x12,[x1,#16] - add v19.4s,v19.4s,v27.4s - add x13,x13,x14,lsl#32 - add v23.4s,v23.4s,v28.4s - add x15,x15,x16,lsl#32 - add v1.4s,v1.4s,v25.4s - ldp x14,x16,[x1,#32] - add v5.4s,v5.4s,v25.4s - add x17,x17,x19,lsl#32 - add v9.4s,v9.4s,v25.4s - add x20,x20,x21,lsl#32 - add v13.4s,v13.4s,v25.4s - ldp x19,x21,[x1,#48] - add v17.4s,v17.4s,v25.4s - add x1,x1,#64 - add v21.4s,v21.4s,v25.4s - -#ifdef __ARMEB__ - rev x5,x5 - rev x7,x7 - rev x9,x9 - rev x11,x11 - rev x13,x13 - rev x15,x15 - rev x17,x17 - rev x20,x20 -#endif - ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 - eor x5,x5,x6 - eor x7,x7,x8 - eor x9,x9,x10 - eor x11,x11,x12 - eor x13,x13,x14 - eor v0.16b,v0.16b,v24.16b - eor x15,x15,x16 - eor v1.16b,v1.16b,v25.16b - eor x17,x17,x19 - eor v2.16b,v2.16b,v26.16b - eor x20,x20,x21 - eor v3.16b,v3.16b,v27.16b - ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 - - stp x5,x7,[x0,#0] // store output - add x28,x28,#7 // increment counter - stp x9,x11,[x0,#16] - stp x13,x15,[x0,#32] - stp x17,x20,[x0,#48] - add x0,x0,#64 - st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 - - ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 - eor v4.16b,v4.16b,v24.16b - eor v5.16b,v5.16b,v25.16b - eor v6.16b,v6.16b,v26.16b - eor v7.16b,v7.16b,v27.16b - st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 - - ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 - eor v8.16b,v8.16b,v0.16b - ldp q24,q25,[sp,#0] - eor v9.16b,v9.16b,v1.16b - ldp q26,q27,[sp,#32] - eor v10.16b,v10.16b,v2.16b - eor v11.16b,v11.16b,v3.16b - st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 - - ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 - eor v12.16b,v12.16b,v4.16b - eor v13.16b,v13.16b,v5.16b - eor v14.16b,v14.16b,v6.16b - eor v15.16b,v15.16b,v7.16b - st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 - - ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 - eor v16.16b,v16.16b,v8.16b - eor v17.16b,v17.16b,v9.16b - eor v18.16b,v18.16b,v10.16b - eor v19.16b,v19.16b,v11.16b - st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 - - shl v0.4s,v31.4s,#1 // 4 -> 8 - eor v20.16b,v20.16b,v12.16b - eor v21.16b,v21.16b,v13.16b - eor v22.16b,v22.16b,v14.16b - eor v23.16b,v23.16b,v15.16b - st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 - - add v27.4s,v27.4s,v0.4s // += 8 - add v28.4s,v28.4s,v0.4s - add v29.4s,v29.4s,v0.4s - add v30.4s,v30.4s,v0.4s - - b.hs .Loop_outer_512_neon - - adds x2,x2,#512 - ushr v0.4s,v31.4s,#2 // 4 -> 1 - - ldp d8,d9,[sp,#128+0] // meet ABI requirements - ldp d10,d11,[sp,#128+16] - ldp d12,d13,[sp,#128+32] - ldp d14,d15,[sp,#128+48] - - stp q24,q31,[sp,#0] // wipe off-load area - stp q24,q31,[sp,#32] - stp q24,q31,[sp,#64] - - b.eq .Ldone_512_neon - - cmp x2,#192 - sub v27.4s,v27.4s,v0.4s // -= 1 - sub v28.4s,v28.4s,v0.4s - sub v29.4s,v29.4s,v0.4s - add sp,sp,#128 - b.hs .Loop_outer_neon - - eor v25.16b,v25.16b,v25.16b - eor v26.16b,v26.16b,v26.16b - eor v27.16b,v27.16b,v27.16b - eor v28.16b,v28.16b,v28.16b - eor v29.16b,v29.16b,v29.16b - eor v30.16b,v30.16b,v30.16b - b .Loop_outer - -.Ldone_512_neon: - ldp x19,x20,[x29,#16] - add sp,sp,#128+64 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#96 - ret -.size ChaCha20_512_neon,.-ChaCha20_512_neon -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S deleted file mode 100644 index 60c70a24fd..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S +++ /dev/null @@ -1,775 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -#if __ARM_MAX_ARCH__>=7 -.text -.arch armv8-a+crypto -.section .rodata -.align 5 -.Lrcon: -.long 0x01,0x01,0x01,0x01 -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat -.long 0x1b,0x1b,0x1b,0x1b - -.text - -.globl aes_hw_set_encrypt_key -.hidden aes_hw_set_encrypt_key -.type aes_hw_set_encrypt_key,%function -.align 5 -aes_hw_set_encrypt_key: -.Lenc_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - mov x3,#-1 - cmp x0,#0 - b.eq .Lenc_key_abort - cmp x2,#0 - b.eq .Lenc_key_abort - mov x3,#-2 - cmp w1,#128 - b.lt .Lenc_key_abort - cmp w1,#256 - b.gt .Lenc_key_abort - tst w1,#0x3f - b.ne .Lenc_key_abort - - adrp x3,.Lrcon - add x3,x3,:lo12:.Lrcon - cmp w1,#192 - - eor v0.16b,v0.16b,v0.16b - ld1 {v3.16b},[x0],#16 - mov w1,#8 // reuse w1 - ld1 {v1.4s,v2.4s},[x3],#32 - - b.lt .Loop128 - b.eq .L192 - b .L256 - -.align 4 -.Loop128: - tbl v6.16b,{v3.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v3.4s},[x2],#16 - aese v6.16b,v0.16b - subs w1,w1,#1 - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - shl v1.16b,v1.16b,#1 - eor v3.16b,v3.16b,v6.16b - b.ne .Loop128 - - ld1 {v1.4s},[x3] - - tbl v6.16b,{v3.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v3.4s},[x2],#16 - aese v6.16b,v0.16b - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - shl v1.16b,v1.16b,#1 - eor v3.16b,v3.16b,v6.16b - - tbl v6.16b,{v3.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v3.4s},[x2],#16 - aese v6.16b,v0.16b - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - eor v3.16b,v3.16b,v6.16b - st1 {v3.4s},[x2] - add x2,x2,#0x50 - - mov w12,#10 - b .Ldone - -.align 4 -.L192: - ld1 {v4.8b},[x0],#8 - movi v6.16b,#8 // borrow v6.16b - st1 {v3.4s},[x2],#16 - sub v2.16b,v2.16b,v6.16b // adjust the mask - -.Loop192: - tbl v6.16b,{v4.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v4.8b},[x2],#8 - aese v6.16b,v0.16b - subs w1,w1,#1 - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - - dup v5.4s,v3.s[3] - eor v5.16b,v5.16b,v4.16b - eor v6.16b,v6.16b,v1.16b - ext v4.16b,v0.16b,v4.16b,#12 - shl v1.16b,v1.16b,#1 - eor v4.16b,v4.16b,v5.16b - eor v3.16b,v3.16b,v6.16b - eor v4.16b,v4.16b,v6.16b - st1 {v3.4s},[x2],#16 - b.ne .Loop192 - - mov w12,#12 - add x2,x2,#0x20 - b .Ldone - -.align 4 -.L256: - ld1 {v4.16b},[x0] - mov w1,#7 - mov w12,#14 - st1 {v3.4s},[x2],#16 - -.Loop256: - tbl v6.16b,{v4.16b},v2.16b - ext v5.16b,v0.16b,v3.16b,#12 - st1 {v4.4s},[x2],#16 - aese v6.16b,v0.16b - subs w1,w1,#1 - - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v3.16b,v3.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v6.16b,v6.16b,v1.16b - eor v3.16b,v3.16b,v5.16b - shl v1.16b,v1.16b,#1 - eor v3.16b,v3.16b,v6.16b - st1 {v3.4s},[x2],#16 - b.eq .Ldone - - dup v6.4s,v3.s[3] // just splat - ext v5.16b,v0.16b,v4.16b,#12 - aese v6.16b,v0.16b - - eor v4.16b,v4.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v4.16b,v4.16b,v5.16b - ext v5.16b,v0.16b,v5.16b,#12 - eor v4.16b,v4.16b,v5.16b - - eor v4.16b,v4.16b,v6.16b - b .Loop256 - -.Ldone: - str w12,[x2] - mov x3,#0 - -.Lenc_key_abort: - mov x0,x3 // return value - ldr x29,[sp],#16 - ret -.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key - -.globl aes_hw_set_decrypt_key -.hidden aes_hw_set_decrypt_key -.type aes_hw_set_decrypt_key,%function -.align 5 -aes_hw_set_decrypt_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - bl .Lenc_key - - cmp x0,#0 - b.ne .Ldec_key_abort - - sub x2,x2,#240 // restore original x2 - mov x4,#-16 - add x0,x2,x12,lsl#4 // end of key schedule - - ld1 {v0.4s},[x2] - ld1 {v1.4s},[x0] - st1 {v0.4s},[x0],x4 - st1 {v1.4s},[x2],#16 - -.Loop_imc: - ld1 {v0.4s},[x2] - ld1 {v1.4s},[x0] - aesimc v0.16b,v0.16b - aesimc v1.16b,v1.16b - st1 {v0.4s},[x0],x4 - st1 {v1.4s},[x2],#16 - cmp x0,x2 - b.hi .Loop_imc - - ld1 {v0.4s},[x2] - aesimc v0.16b,v0.16b - st1 {v0.4s},[x0] - - eor x0,x0,x0 // return value -.Ldec_key_abort: - ldp x29,x30,[sp],#16 - ret -.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key -.globl aes_hw_encrypt -.hidden aes_hw_encrypt -.type aes_hw_encrypt,%function -.align 5 -aes_hw_encrypt: - ldr w3,[x2,#240] - ld1 {v0.4s},[x2],#16 - ld1 {v2.16b},[x0] - sub w3,w3,#2 - ld1 {v1.4s},[x2],#16 - -.Loop_enc: - aese v2.16b,v0.16b - aesmc v2.16b,v2.16b - ld1 {v0.4s},[x2],#16 - subs w3,w3,#2 - aese v2.16b,v1.16b - aesmc v2.16b,v2.16b - ld1 {v1.4s},[x2],#16 - b.gt .Loop_enc - - aese v2.16b,v0.16b - aesmc v2.16b,v2.16b - ld1 {v0.4s},[x2] - aese v2.16b,v1.16b - eor v2.16b,v2.16b,v0.16b - - st1 {v2.16b},[x1] - ret -.size aes_hw_encrypt,.-aes_hw_encrypt -.globl aes_hw_decrypt -.hidden aes_hw_decrypt -.type aes_hw_decrypt,%function -.align 5 -aes_hw_decrypt: - ldr w3,[x2,#240] - ld1 {v0.4s},[x2],#16 - ld1 {v2.16b},[x0] - sub w3,w3,#2 - ld1 {v1.4s},[x2],#16 - -.Loop_dec: - aesd v2.16b,v0.16b - aesimc v2.16b,v2.16b - ld1 {v0.4s},[x2],#16 - subs w3,w3,#2 - aesd v2.16b,v1.16b - aesimc v2.16b,v2.16b - ld1 {v1.4s},[x2],#16 - b.gt .Loop_dec - - aesd v2.16b,v0.16b - aesimc v2.16b,v2.16b - ld1 {v0.4s},[x2] - aesd v2.16b,v1.16b - eor v2.16b,v2.16b,v0.16b - - st1 {v2.16b},[x1] - ret -.size aes_hw_decrypt,.-aes_hw_decrypt -.globl aes_hw_cbc_encrypt -.hidden aes_hw_cbc_encrypt -.type aes_hw_cbc_encrypt,%function -.align 5 -aes_hw_cbc_encrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - subs x2,x2,#16 - mov x8,#16 - b.lo .Lcbc_abort - csel x8,xzr,x8,eq - - cmp w5,#0 // en- or decrypting? - ldr w5,[x3,#240] - and x2,x2,#-16 - ld1 {v6.16b},[x4] - ld1 {v0.16b},[x0],x8 - - ld1 {v16.4s,v17.4s},[x3] // load key schedule... - sub w5,w5,#6 - add x7,x3,x5,lsl#4 // pointer to last 7 round keys - sub w5,w5,#2 - ld1 {v18.4s,v19.4s},[x7],#32 - ld1 {v20.4s,v21.4s},[x7],#32 - ld1 {v22.4s,v23.4s},[x7],#32 - ld1 {v7.4s},[x7] - - add x7,x3,#32 - mov w6,w5 - b.eq .Lcbc_dec - - cmp w5,#2 - eor v0.16b,v0.16b,v6.16b - eor v5.16b,v16.16b,v7.16b - b.eq .Lcbc_enc128 - - ld1 {v2.4s,v3.4s},[x7] - add x7,x3,#16 - add x6,x3,#16*4 - add x12,x3,#16*5 - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - add x14,x3,#16*6 - add x3,x3,#16*7 - b .Lenter_cbc_enc - -.align 4 -.Loop_cbc_enc: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - st1 {v6.16b},[x1],#16 -.Lenter_cbc_enc: - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v0.16b,v2.16b - aesmc v0.16b,v0.16b - ld1 {v16.4s},[x6] - cmp w5,#4 - aese v0.16b,v3.16b - aesmc v0.16b,v0.16b - ld1 {v17.4s},[x12] - b.eq .Lcbc_enc192 - - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - ld1 {v16.4s},[x14] - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - ld1 {v17.4s},[x3] - nop - -.Lcbc_enc192: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - subs x2,x2,#16 - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - csel x8,xzr,x8,eq - aese v0.16b,v18.16b - aesmc v0.16b,v0.16b - aese v0.16b,v19.16b - aesmc v0.16b,v0.16b - ld1 {v16.16b},[x0],x8 - aese v0.16b,v20.16b - aesmc v0.16b,v0.16b - eor v16.16b,v16.16b,v5.16b - aese v0.16b,v21.16b - aesmc v0.16b,v0.16b - ld1 {v17.4s},[x7] // re-pre-load rndkey[1] - aese v0.16b,v22.16b - aesmc v0.16b,v0.16b - aese v0.16b,v23.16b - eor v6.16b,v0.16b,v7.16b - b.hs .Loop_cbc_enc - - st1 {v6.16b},[x1],#16 - b .Lcbc_done - -.align 5 -.Lcbc_enc128: - ld1 {v2.4s,v3.4s},[x7] - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - b .Lenter_cbc_enc128 -.Loop_cbc_enc128: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - st1 {v6.16b},[x1],#16 -.Lenter_cbc_enc128: - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - subs x2,x2,#16 - aese v0.16b,v2.16b - aesmc v0.16b,v0.16b - csel x8,xzr,x8,eq - aese v0.16b,v3.16b - aesmc v0.16b,v0.16b - aese v0.16b,v18.16b - aesmc v0.16b,v0.16b - aese v0.16b,v19.16b - aesmc v0.16b,v0.16b - ld1 {v16.16b},[x0],x8 - aese v0.16b,v20.16b - aesmc v0.16b,v0.16b - aese v0.16b,v21.16b - aesmc v0.16b,v0.16b - aese v0.16b,v22.16b - aesmc v0.16b,v0.16b - eor v16.16b,v16.16b,v5.16b - aese v0.16b,v23.16b - eor v6.16b,v0.16b,v7.16b - b.hs .Loop_cbc_enc128 - - st1 {v6.16b},[x1],#16 - b .Lcbc_done -.align 5 -.Lcbc_dec: - ld1 {v18.16b},[x0],#16 - subs x2,x2,#32 // bias - add w6,w5,#2 - orr v3.16b,v0.16b,v0.16b - orr v1.16b,v0.16b,v0.16b - orr v19.16b,v18.16b,v18.16b - b.lo .Lcbc_dec_tail - - orr v1.16b,v18.16b,v18.16b - ld1 {v18.16b},[x0],#16 - orr v2.16b,v0.16b,v0.16b - orr v3.16b,v1.16b,v1.16b - orr v19.16b,v18.16b,v18.16b - -.Loop3x_cbc_dec: - aesd v0.16b,v16.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aesd v0.16b,v17.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - ld1 {v17.4s},[x7],#16 - b.gt .Loop3x_cbc_dec - - aesd v0.16b,v16.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - eor v4.16b,v6.16b,v7.16b - subs x2,x2,#0x30 - eor v5.16b,v2.16b,v7.16b - csel x6,x2,x6,lo // x6, w6, is zero at this point - aesd v0.16b,v17.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - eor v17.16b,v3.16b,v7.16b - add x0,x0,x6 // x0 is adjusted in such way that - // at exit from the loop v1.16b-v18.16b - // are loaded with last "words" - orr v6.16b,v19.16b,v19.16b - mov x7,x3 - aesd v0.16b,v20.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v20.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v20.16b - aesimc v18.16b,v18.16b - ld1 {v2.16b},[x0],#16 - aesd v0.16b,v21.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v21.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v21.16b - aesimc v18.16b,v18.16b - ld1 {v3.16b},[x0],#16 - aesd v0.16b,v22.16b - aesimc v0.16b,v0.16b - aesd v1.16b,v22.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v22.16b - aesimc v18.16b,v18.16b - ld1 {v19.16b},[x0],#16 - aesd v0.16b,v23.16b - aesd v1.16b,v23.16b - aesd v18.16b,v23.16b - ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] - add w6,w5,#2 - eor v4.16b,v4.16b,v0.16b - eor v5.16b,v5.16b,v1.16b - eor v18.16b,v18.16b,v17.16b - ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] - st1 {v4.16b},[x1],#16 - orr v0.16b,v2.16b,v2.16b - st1 {v5.16b},[x1],#16 - orr v1.16b,v3.16b,v3.16b - st1 {v18.16b},[x1],#16 - orr v18.16b,v19.16b,v19.16b - b.hs .Loop3x_cbc_dec - - cmn x2,#0x30 - b.eq .Lcbc_done - nop - -.Lcbc_dec_tail: - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - ld1 {v17.4s},[x7],#16 - b.gt .Lcbc_dec_tail - - aesd v1.16b,v16.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v16.16b - aesimc v18.16b,v18.16b - aesd v1.16b,v17.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v17.16b - aesimc v18.16b,v18.16b - aesd v1.16b,v20.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v20.16b - aesimc v18.16b,v18.16b - cmn x2,#0x20 - aesd v1.16b,v21.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v21.16b - aesimc v18.16b,v18.16b - eor v5.16b,v6.16b,v7.16b - aesd v1.16b,v22.16b - aesimc v1.16b,v1.16b - aesd v18.16b,v22.16b - aesimc v18.16b,v18.16b - eor v17.16b,v3.16b,v7.16b - aesd v1.16b,v23.16b - aesd v18.16b,v23.16b - b.eq .Lcbc_dec_one - eor v5.16b,v5.16b,v1.16b - eor v17.16b,v17.16b,v18.16b - orr v6.16b,v19.16b,v19.16b - st1 {v5.16b},[x1],#16 - st1 {v17.16b},[x1],#16 - b .Lcbc_done - -.Lcbc_dec_one: - eor v5.16b,v5.16b,v18.16b - orr v6.16b,v19.16b,v19.16b - st1 {v5.16b},[x1],#16 - -.Lcbc_done: - st1 {v6.16b},[x4] -.Lcbc_abort: - ldr x29,[sp],#16 - ret -.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt -.globl aes_hw_ctr32_encrypt_blocks -.hidden aes_hw_ctr32_encrypt_blocks -.type aes_hw_ctr32_encrypt_blocks,%function -.align 5 -aes_hw_ctr32_encrypt_blocks: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - ldr w5,[x3,#240] - - ldr w8, [x4, #12] - ld1 {v0.4s},[x4] - - ld1 {v16.4s,v17.4s},[x3] // load key schedule... - sub w5,w5,#4 - mov x12,#16 - cmp x2,#2 - add x7,x3,x5,lsl#4 // pointer to last 5 round keys - sub w5,w5,#2 - ld1 {v20.4s,v21.4s},[x7],#32 - ld1 {v22.4s,v23.4s},[x7],#32 - ld1 {v7.4s},[x7] - add x7,x3,#32 - mov w6,w5 - csel x12,xzr,x12,lo -#ifndef __ARMEB__ - rev w8, w8 -#endif - orr v1.16b,v0.16b,v0.16b - add w10, w8, #1 - orr v18.16b,v0.16b,v0.16b - add w8, w8, #2 - orr v6.16b,v0.16b,v0.16b - rev w10, w10 - mov v1.s[3],w10 - b.ls .Lctr32_tail - rev w12, w8 - sub x2,x2,#3 // bias - mov v18.s[3],w12 - b .Loop3x_ctr32 - -.align 4 -.Loop3x_ctr32: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - aese v1.16b,v16.16b - aesmc v1.16b,v1.16b - aese v18.16b,v16.16b - aesmc v18.16b,v18.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v1.16b,v17.16b - aesmc v1.16b,v1.16b - aese v18.16b,v17.16b - aesmc v18.16b,v18.16b - ld1 {v17.4s},[x7],#16 - b.gt .Loop3x_ctr32 - - aese v0.16b,v16.16b - aesmc v4.16b,v0.16b - aese v1.16b,v16.16b - aesmc v5.16b,v1.16b - ld1 {v2.16b},[x0],#16 - orr v0.16b,v6.16b,v6.16b - aese v18.16b,v16.16b - aesmc v18.16b,v18.16b - ld1 {v3.16b},[x0],#16 - orr v1.16b,v6.16b,v6.16b - aese v4.16b,v17.16b - aesmc v4.16b,v4.16b - aese v5.16b,v17.16b - aesmc v5.16b,v5.16b - ld1 {v19.16b},[x0],#16 - mov x7,x3 - aese v18.16b,v17.16b - aesmc v17.16b,v18.16b - orr v18.16b,v6.16b,v6.16b - add w9,w8,#1 - aese v4.16b,v20.16b - aesmc v4.16b,v4.16b - aese v5.16b,v20.16b - aesmc v5.16b,v5.16b - eor v2.16b,v2.16b,v7.16b - add w10,w8,#2 - aese v17.16b,v20.16b - aesmc v17.16b,v17.16b - eor v3.16b,v3.16b,v7.16b - add w8,w8,#3 - aese v4.16b,v21.16b - aesmc v4.16b,v4.16b - aese v5.16b,v21.16b - aesmc v5.16b,v5.16b - eor v19.16b,v19.16b,v7.16b - rev w9,w9 - aese v17.16b,v21.16b - aesmc v17.16b,v17.16b - mov v0.s[3], w9 - rev w10,w10 - aese v4.16b,v22.16b - aesmc v4.16b,v4.16b - aese v5.16b,v22.16b - aesmc v5.16b,v5.16b - mov v1.s[3], w10 - rev w12,w8 - aese v17.16b,v22.16b - aesmc v17.16b,v17.16b - mov v18.s[3], w12 - subs x2,x2,#3 - aese v4.16b,v23.16b - aese v5.16b,v23.16b - aese v17.16b,v23.16b - - eor v2.16b,v2.16b,v4.16b - ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] - st1 {v2.16b},[x1],#16 - eor v3.16b,v3.16b,v5.16b - mov w6,w5 - st1 {v3.16b},[x1],#16 - eor v19.16b,v19.16b,v17.16b - ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] - st1 {v19.16b},[x1],#16 - b.hs .Loop3x_ctr32 - - adds x2,x2,#3 - b.eq .Lctr32_done - cmp x2,#1 - mov x12,#16 - csel x12,xzr,x12,eq - -.Lctr32_tail: - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - aese v1.16b,v16.16b - aesmc v1.16b,v1.16b - ld1 {v16.4s},[x7],#16 - subs w6,w6,#2 - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v1.16b,v17.16b - aesmc v1.16b,v1.16b - ld1 {v17.4s},[x7],#16 - b.gt .Lctr32_tail - - aese v0.16b,v16.16b - aesmc v0.16b,v0.16b - aese v1.16b,v16.16b - aesmc v1.16b,v1.16b - aese v0.16b,v17.16b - aesmc v0.16b,v0.16b - aese v1.16b,v17.16b - aesmc v1.16b,v1.16b - ld1 {v2.16b},[x0],x12 - aese v0.16b,v20.16b - aesmc v0.16b,v0.16b - aese v1.16b,v20.16b - aesmc v1.16b,v1.16b - ld1 {v3.16b},[x0] - aese v0.16b,v21.16b - aesmc v0.16b,v0.16b - aese v1.16b,v21.16b - aesmc v1.16b,v1.16b - eor v2.16b,v2.16b,v7.16b - aese v0.16b,v22.16b - aesmc v0.16b,v0.16b - aese v1.16b,v22.16b - aesmc v1.16b,v1.16b - eor v3.16b,v3.16b,v7.16b - aese v0.16b,v23.16b - aese v1.16b,v23.16b - - cmp x2,#1 - eor v2.16b,v2.16b,v0.16b - eor v3.16b,v3.16b,v1.16b - st1 {v2.16b},[x1],#16 - b.eq .Lctr32_done - st1 {v3.16b},[x1] - -.Lctr32_done: - ldr x29,[sp],#16 - ret -.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/armv8-mont.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/armv8-mont.S deleted file mode 100644 index 360bf4c7fe..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/armv8-mont.S +++ /dev/null @@ -1,1423 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl bn_mul_mont -.hidden bn_mul_mont -.type bn_mul_mont,%function -.align 5 -bn_mul_mont: - tst x5,#7 - b.eq __bn_sqr8x_mont - tst x5,#3 - b.eq __bn_mul4x_mont -.Lmul_mont: - stp x29,x30,[sp,#-64]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - - ldr x9,[x2],#8 // bp[0] - sub x22,sp,x5,lsl#3 - ldp x7,x8,[x1],#16 // ap[0..1] - lsl x5,x5,#3 - ldr x4,[x4] // *n0 - and x22,x22,#-16 // ABI says so - ldp x13,x14,[x3],#16 // np[0..1] - - mul x6,x7,x9 // ap[0]*bp[0] - sub x21,x5,#16 // j=num-2 - umulh x7,x7,x9 - mul x10,x8,x9 // ap[1]*bp[0] - umulh x11,x8,x9 - - mul x15,x6,x4 // "tp[0]"*n0 - mov sp,x22 // alloca - - // (*) mul x12,x13,x15 // np[0]*m1 - umulh x13,x13,x15 - mul x16,x14,x15 // np[1]*m1 - // (*) adds x12,x12,x6 // discarded - // (*) As for removal of first multiplication and addition - // instructions. The outcome of first addition is - // guaranteed to be zero, which leaves two computationally - // significant outcomes: it either carries or not. Then - // question is when does it carry? Is there alternative - // way to deduce it? If you follow operations, you can - // observe that condition for carry is quite simple: - // x6 being non-zero. So that carry can be calculated - // by adding -1 to x6. That's what next instruction does. - subs xzr,x6,#1 // (*) - umulh x17,x14,x15 - adc x13,x13,xzr - cbz x21,.L1st_skip - -.L1st: - ldr x8,[x1],#8 - adds x6,x10,x7 - sub x21,x21,#8 // j-- - adc x7,x11,xzr - - ldr x14,[x3],#8 - adds x12,x16,x13 - mul x10,x8,x9 // ap[j]*bp[0] - adc x13,x17,xzr - umulh x11,x8,x9 - - adds x12,x12,x6 - mul x16,x14,x15 // np[j]*m1 - adc x13,x13,xzr - umulh x17,x14,x15 - str x12,[x22],#8 // tp[j-1] - cbnz x21,.L1st - -.L1st_skip: - adds x6,x10,x7 - sub x1,x1,x5 // rewind x1 - adc x7,x11,xzr - - adds x12,x16,x13 - sub x3,x3,x5 // rewind x3 - adc x13,x17,xzr - - adds x12,x12,x6 - sub x20,x5,#8 // i=num-1 - adcs x13,x13,x7 - - adc x19,xzr,xzr // upmost overflow bit - stp x12,x13,[x22] - -.Louter: - ldr x9,[x2],#8 // bp[i] - ldp x7,x8,[x1],#16 - ldr x23,[sp] // tp[0] - add x22,sp,#8 - - mul x6,x7,x9 // ap[0]*bp[i] - sub x21,x5,#16 // j=num-2 - umulh x7,x7,x9 - ldp x13,x14,[x3],#16 - mul x10,x8,x9 // ap[1]*bp[i] - adds x6,x6,x23 - umulh x11,x8,x9 - adc x7,x7,xzr - - mul x15,x6,x4 - sub x20,x20,#8 // i-- - - // (*) mul x12,x13,x15 // np[0]*m1 - umulh x13,x13,x15 - mul x16,x14,x15 // np[1]*m1 - // (*) adds x12,x12,x6 - subs xzr,x6,#1 // (*) - umulh x17,x14,x15 - cbz x21,.Linner_skip - -.Linner: - ldr x8,[x1],#8 - adc x13,x13,xzr - ldr x23,[x22],#8 // tp[j] - adds x6,x10,x7 - sub x21,x21,#8 // j-- - adc x7,x11,xzr - - adds x12,x16,x13 - ldr x14,[x3],#8 - adc x13,x17,xzr - - mul x10,x8,x9 // ap[j]*bp[i] - adds x6,x6,x23 - umulh x11,x8,x9 - adc x7,x7,xzr - - mul x16,x14,x15 // np[j]*m1 - adds x12,x12,x6 - umulh x17,x14,x15 - str x12,[x22,#-16] // tp[j-1] - cbnz x21,.Linner - -.Linner_skip: - ldr x23,[x22],#8 // tp[j] - adc x13,x13,xzr - adds x6,x10,x7 - sub x1,x1,x5 // rewind x1 - adc x7,x11,xzr - - adds x12,x16,x13 - sub x3,x3,x5 // rewind x3 - adcs x13,x17,x19 - adc x19,xzr,xzr - - adds x6,x6,x23 - adc x7,x7,xzr - - adds x12,x12,x6 - adcs x13,x13,x7 - adc x19,x19,xzr // upmost overflow bit - stp x12,x13,[x22,#-16] - - cbnz x20,.Louter - - // Final step. We see if result is larger than modulus, and - // if it is, subtract the modulus. But comparison implies - // subtraction. So we subtract modulus, see if it borrowed, - // and conditionally copy original value. - ldr x23,[sp] // tp[0] - add x22,sp,#8 - ldr x14,[x3],#8 // np[0] - subs x21,x5,#8 // j=num-1 and clear borrow - mov x1,x0 -.Lsub: - sbcs x8,x23,x14 // tp[j]-np[j] - ldr x23,[x22],#8 - sub x21,x21,#8 // j-- - ldr x14,[x3],#8 - str x8,[x1],#8 // rp[j]=tp[j]-np[j] - cbnz x21,.Lsub - - sbcs x8,x23,x14 - sbcs x19,x19,xzr // did it borrow? - str x8,[x1],#8 // rp[num-1] - - ldr x23,[sp] // tp[0] - add x22,sp,#8 - ldr x8,[x0],#8 // rp[0] - sub x5,x5,#8 // num-- - nop -.Lcond_copy: - sub x5,x5,#8 // num-- - csel x14,x23,x8,lo // did it borrow? - ldr x23,[x22],#8 - ldr x8,[x0],#8 - str xzr,[x22,#-16] // wipe tp - str x14,[x0,#-16] - cbnz x5,.Lcond_copy - - csel x14,x23,x8,lo - str xzr,[x22,#-8] // wipe tp - str x14,[x0,#-8] - - ldp x19,x20,[x29,#16] - mov sp,x29 - ldp x21,x22,[x29,#32] - mov x0,#1 - ldp x23,x24,[x29,#48] - ldr x29,[sp],#64 - ret -.size bn_mul_mont,.-bn_mul_mont -.type __bn_sqr8x_mont,%function -.align 5 -__bn_sqr8x_mont: - cmp x1,x2 - b.ne __bn_mul4x_mont -.Lsqr8x_mont: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - stp x0,x3,[sp,#96] // offload rp and np - - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - ldp x10,x11,[x1,#8*4] - ldp x12,x13,[x1,#8*6] - - sub x2,sp,x5,lsl#4 - lsl x5,x5,#3 - ldr x4,[x4] // *n0 - mov sp,x2 // alloca - sub x27,x5,#8*8 - b .Lsqr8x_zero_start - -.Lsqr8x_zero: - sub x27,x27,#8*8 - stp xzr,xzr,[x2,#8*0] - stp xzr,xzr,[x2,#8*2] - stp xzr,xzr,[x2,#8*4] - stp xzr,xzr,[x2,#8*6] -.Lsqr8x_zero_start: - stp xzr,xzr,[x2,#8*8] - stp xzr,xzr,[x2,#8*10] - stp xzr,xzr,[x2,#8*12] - stp xzr,xzr,[x2,#8*14] - add x2,x2,#8*16 - cbnz x27,.Lsqr8x_zero - - add x3,x1,x5 - add x1,x1,#8*8 - mov x19,xzr - mov x20,xzr - mov x21,xzr - mov x22,xzr - mov x23,xzr - mov x24,xzr - mov x25,xzr - mov x26,xzr - mov x2,sp - str x4,[x29,#112] // offload n0 - - // Multiply everything but a[i]*a[i] -.align 4 -.Lsqr8x_outer_loop: - // a[1]a[0] (i) - // a[2]a[0] - // a[3]a[0] - // a[4]a[0] - // a[5]a[0] - // a[6]a[0] - // a[7]a[0] - // a[2]a[1] (ii) - // a[3]a[1] - // a[4]a[1] - // a[5]a[1] - // a[6]a[1] - // a[7]a[1] - // a[3]a[2] (iii) - // a[4]a[2] - // a[5]a[2] - // a[6]a[2] - // a[7]a[2] - // a[4]a[3] (iv) - // a[5]a[3] - // a[6]a[3] - // a[7]a[3] - // a[5]a[4] (v) - // a[6]a[4] - // a[7]a[4] - // a[6]a[5] (vi) - // a[7]a[5] - // a[7]a[6] (vii) - - mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) - mul x15,x8,x6 - mul x16,x9,x6 - mul x17,x10,x6 - adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) - mul x14,x11,x6 - adcs x21,x21,x15 - mul x15,x12,x6 - adcs x22,x22,x16 - mul x16,x13,x6 - adcs x23,x23,x17 - umulh x17,x7,x6 // hi(a[1..7]*a[0]) - adcs x24,x24,x14 - umulh x14,x8,x6 - adcs x25,x25,x15 - umulh x15,x9,x6 - adcs x26,x26,x16 - umulh x16,x10,x6 - stp x19,x20,[x2],#8*2 // t[0..1] - adc x19,xzr,xzr // t[8] - adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) - umulh x17,x11,x6 - adcs x22,x22,x14 - umulh x14,x12,x6 - adcs x23,x23,x15 - umulh x15,x13,x6 - adcs x24,x24,x16 - mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) - adcs x25,x25,x17 - mul x17,x9,x7 - adcs x26,x26,x14 - mul x14,x10,x7 - adc x19,x19,x15 - - mul x15,x11,x7 - adds x22,x22,x16 - mul x16,x12,x7 - adcs x23,x23,x17 - mul x17,x13,x7 - adcs x24,x24,x14 - umulh x14,x8,x7 // hi(a[2..7]*a[1]) - adcs x25,x25,x15 - umulh x15,x9,x7 - adcs x26,x26,x16 - umulh x16,x10,x7 - adcs x19,x19,x17 - umulh x17,x11,x7 - stp x21,x22,[x2],#8*2 // t[2..3] - adc x20,xzr,xzr // t[9] - adds x23,x23,x14 - umulh x14,x12,x7 - adcs x24,x24,x15 - umulh x15,x13,x7 - adcs x25,x25,x16 - mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) - adcs x26,x26,x17 - mul x17,x10,x8 - adcs x19,x19,x14 - mul x14,x11,x8 - adc x20,x20,x15 - - mul x15,x12,x8 - adds x24,x24,x16 - mul x16,x13,x8 - adcs x25,x25,x17 - umulh x17,x9,x8 // hi(a[3..7]*a[2]) - adcs x26,x26,x14 - umulh x14,x10,x8 - adcs x19,x19,x15 - umulh x15,x11,x8 - adcs x20,x20,x16 - umulh x16,x12,x8 - stp x23,x24,[x2],#8*2 // t[4..5] - adc x21,xzr,xzr // t[10] - adds x25,x25,x17 - umulh x17,x13,x8 - adcs x26,x26,x14 - mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) - adcs x19,x19,x15 - mul x15,x11,x9 - adcs x20,x20,x16 - mul x16,x12,x9 - adc x21,x21,x17 - - mul x17,x13,x9 - adds x26,x26,x14 - umulh x14,x10,x9 // hi(a[4..7]*a[3]) - adcs x19,x19,x15 - umulh x15,x11,x9 - adcs x20,x20,x16 - umulh x16,x12,x9 - adcs x21,x21,x17 - umulh x17,x13,x9 - stp x25,x26,[x2],#8*2 // t[6..7] - adc x22,xzr,xzr // t[11] - adds x19,x19,x14 - mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) - adcs x20,x20,x15 - mul x15,x12,x10 - adcs x21,x21,x16 - mul x16,x13,x10 - adc x22,x22,x17 - - umulh x17,x11,x10 // hi(a[5..7]*a[4]) - adds x20,x20,x14 - umulh x14,x12,x10 - adcs x21,x21,x15 - umulh x15,x13,x10 - adcs x22,x22,x16 - mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) - adc x23,xzr,xzr // t[12] - adds x21,x21,x17 - mul x17,x13,x11 - adcs x22,x22,x14 - umulh x14,x12,x11 // hi(a[6..7]*a[5]) - adc x23,x23,x15 - - umulh x15,x13,x11 - adds x22,x22,x16 - mul x16,x13,x12 // lo(a[7]*a[6]) (vii) - adcs x23,x23,x17 - umulh x17,x13,x12 // hi(a[7]*a[6]) - adc x24,xzr,xzr // t[13] - adds x23,x23,x14 - sub x27,x3,x1 // done yet? - adc x24,x24,x15 - - adds x24,x24,x16 - sub x14,x3,x5 // rewinded ap - adc x25,xzr,xzr // t[14] - add x25,x25,x17 - - cbz x27,.Lsqr8x_outer_break - - mov x4,x6 - ldp x6,x7,[x2,#8*0] - ldp x8,x9,[x2,#8*2] - ldp x10,x11,[x2,#8*4] - ldp x12,x13,[x2,#8*6] - adds x19,x19,x6 - adcs x20,x20,x7 - ldp x6,x7,[x1,#8*0] - adcs x21,x21,x8 - adcs x22,x22,x9 - ldp x8,x9,[x1,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x1,#8*4] - adcs x25,x25,x12 - mov x0,x1 - adcs x26,xzr,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - //adc x28,xzr,xzr // moved below - mov x27,#-8*8 - - // a[8]a[0] - // a[9]a[0] - // a[a]a[0] - // a[b]a[0] - // a[c]a[0] - // a[d]a[0] - // a[e]a[0] - // a[f]a[0] - // a[8]a[1] - // a[f]a[1]........................ - // a[8]a[2] - // a[f]a[2]........................ - // a[8]a[3] - // a[f]a[3]........................ - // a[8]a[4] - // a[f]a[4]........................ - // a[8]a[5] - // a[f]a[5]........................ - // a[8]a[6] - // a[f]a[6]........................ - // a[8]a[7] - // a[f]a[7]........................ -.Lsqr8x_mul: - mul x14,x6,x4 - adc x28,xzr,xzr // carry bit, modulo-scheduled - mul x15,x7,x4 - add x27,x27,#8 - mul x16,x8,x4 - mul x17,x9,x4 - adds x19,x19,x14 - mul x14,x10,x4 - adcs x20,x20,x15 - mul x15,x11,x4 - adcs x21,x21,x16 - mul x16,x12,x4 - adcs x22,x22,x17 - mul x17,x13,x4 - adcs x23,x23,x14 - umulh x14,x6,x4 - adcs x24,x24,x15 - umulh x15,x7,x4 - adcs x25,x25,x16 - umulh x16,x8,x4 - adcs x26,x26,x17 - umulh x17,x9,x4 - adc x28,x28,xzr - str x19,[x2],#8 - adds x19,x20,x14 - umulh x14,x10,x4 - adcs x20,x21,x15 - umulh x15,x11,x4 - adcs x21,x22,x16 - umulh x16,x12,x4 - adcs x22,x23,x17 - umulh x17,x13,x4 - ldr x4,[x0,x27] - adcs x23,x24,x14 - adcs x24,x25,x15 - adcs x25,x26,x16 - adcs x26,x28,x17 - //adc x28,xzr,xzr // moved above - cbnz x27,.Lsqr8x_mul - // note that carry flag is guaranteed - // to be zero at this point - cmp x1,x3 // done yet? - b.eq .Lsqr8x_break - - ldp x6,x7,[x2,#8*0] - ldp x8,x9,[x2,#8*2] - ldp x10,x11,[x2,#8*4] - ldp x12,x13,[x2,#8*6] - adds x19,x19,x6 - ldr x4,[x0,#-8*8] - adcs x20,x20,x7 - ldp x6,x7,[x1,#8*0] - adcs x21,x21,x8 - adcs x22,x22,x9 - ldp x8,x9,[x1,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x1,#8*4] - adcs x25,x25,x12 - mov x27,#-8*8 - adcs x26,x26,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - //adc x28,xzr,xzr // moved above - b .Lsqr8x_mul - -.align 4 -.Lsqr8x_break: - ldp x6,x7,[x0,#8*0] - add x1,x0,#8*8 - ldp x8,x9,[x0,#8*2] - sub x14,x3,x1 // is it last iteration? - ldp x10,x11,[x0,#8*4] - sub x15,x2,x14 - ldp x12,x13,[x0,#8*6] - cbz x14,.Lsqr8x_outer_loop - - stp x19,x20,[x2,#8*0] - ldp x19,x20,[x15,#8*0] - stp x21,x22,[x2,#8*2] - ldp x21,x22,[x15,#8*2] - stp x23,x24,[x2,#8*4] - ldp x23,x24,[x15,#8*4] - stp x25,x26,[x2,#8*6] - mov x2,x15 - ldp x25,x26,[x15,#8*6] - b .Lsqr8x_outer_loop - -.align 4 -.Lsqr8x_outer_break: - // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] - ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] - ldp x15,x16,[sp,#8*1] - ldp x11,x13,[x14,#8*2] - add x1,x14,#8*4 - ldp x17,x14,[sp,#8*3] - - stp x19,x20,[x2,#8*0] - mul x19,x7,x7 - stp x21,x22,[x2,#8*2] - umulh x7,x7,x7 - stp x23,x24,[x2,#8*4] - mul x8,x9,x9 - stp x25,x26,[x2,#8*6] - mov x2,sp - umulh x9,x9,x9 - adds x20,x7,x15,lsl#1 - extr x15,x16,x15,#63 - sub x27,x5,#8*4 - -.Lsqr4x_shift_n_add: - adcs x21,x8,x15 - extr x16,x17,x16,#63 - sub x27,x27,#8*4 - adcs x22,x9,x16 - ldp x15,x16,[x2,#8*5] - mul x10,x11,x11 - ldp x7,x9,[x1],#8*2 - umulh x11,x11,x11 - mul x12,x13,x13 - umulh x13,x13,x13 - extr x17,x14,x17,#63 - stp x19,x20,[x2,#8*0] - adcs x23,x10,x17 - extr x14,x15,x14,#63 - stp x21,x22,[x2,#8*2] - adcs x24,x11,x14 - ldp x17,x14,[x2,#8*7] - extr x15,x16,x15,#63 - adcs x25,x12,x15 - extr x16,x17,x16,#63 - adcs x26,x13,x16 - ldp x15,x16,[x2,#8*9] - mul x6,x7,x7 - ldp x11,x13,[x1],#8*2 - umulh x7,x7,x7 - mul x8,x9,x9 - umulh x9,x9,x9 - stp x23,x24,[x2,#8*4] - extr x17,x14,x17,#63 - stp x25,x26,[x2,#8*6] - add x2,x2,#8*8 - adcs x19,x6,x17 - extr x14,x15,x14,#63 - adcs x20,x7,x14 - ldp x17,x14,[x2,#8*3] - extr x15,x16,x15,#63 - cbnz x27,.Lsqr4x_shift_n_add - ldp x1,x4,[x29,#104] // pull np and n0 - - adcs x21,x8,x15 - extr x16,x17,x16,#63 - adcs x22,x9,x16 - ldp x15,x16,[x2,#8*5] - mul x10,x11,x11 - umulh x11,x11,x11 - stp x19,x20,[x2,#8*0] - mul x12,x13,x13 - umulh x13,x13,x13 - stp x21,x22,[x2,#8*2] - extr x17,x14,x17,#63 - adcs x23,x10,x17 - extr x14,x15,x14,#63 - ldp x19,x20,[sp,#8*0] - adcs x24,x11,x14 - extr x15,x16,x15,#63 - ldp x6,x7,[x1,#8*0] - adcs x25,x12,x15 - extr x16,xzr,x16,#63 - ldp x8,x9,[x1,#8*2] - adc x26,x13,x16 - ldp x10,x11,[x1,#8*4] - - // Reduce by 512 bits per iteration - mul x28,x4,x19 // t[0]*n0 - ldp x12,x13,[x1,#8*6] - add x3,x1,x5 - ldp x21,x22,[sp,#8*2] - stp x23,x24,[x2,#8*4] - ldp x23,x24,[sp,#8*4] - stp x25,x26,[x2,#8*6] - ldp x25,x26,[sp,#8*6] - add x1,x1,#8*8 - mov x30,xzr // initial top-most carry - mov x2,sp - mov x27,#8 - -.Lsqr8x_reduction: - // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) - mul x15,x7,x28 - sub x27,x27,#1 - mul x16,x8,x28 - str x28,[x2],#8 // put aside t[0]*n0 for tail processing - mul x17,x9,x28 - // (*) adds xzr,x19,x14 - subs xzr,x19,#1 // (*) - mul x14,x10,x28 - adcs x19,x20,x15 - mul x15,x11,x28 - adcs x20,x21,x16 - mul x16,x12,x28 - adcs x21,x22,x17 - mul x17,x13,x28 - adcs x22,x23,x14 - umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) - adcs x23,x24,x15 - umulh x15,x7,x28 - adcs x24,x25,x16 - umulh x16,x8,x28 - adcs x25,x26,x17 - umulh x17,x9,x28 - adc x26,xzr,xzr - adds x19,x19,x14 - umulh x14,x10,x28 - adcs x20,x20,x15 - umulh x15,x11,x28 - adcs x21,x21,x16 - umulh x16,x12,x28 - adcs x22,x22,x17 - umulh x17,x13,x28 - mul x28,x4,x19 // next t[0]*n0 - adcs x23,x23,x14 - adcs x24,x24,x15 - adcs x25,x25,x16 - adc x26,x26,x17 - cbnz x27,.Lsqr8x_reduction - - ldp x14,x15,[x2,#8*0] - ldp x16,x17,[x2,#8*2] - mov x0,x2 - sub x27,x3,x1 // done yet? - adds x19,x19,x14 - adcs x20,x20,x15 - ldp x14,x15,[x2,#8*4] - adcs x21,x21,x16 - adcs x22,x22,x17 - ldp x16,x17,[x2,#8*6] - adcs x23,x23,x14 - adcs x24,x24,x15 - adcs x25,x25,x16 - adcs x26,x26,x17 - //adc x28,xzr,xzr // moved below - cbz x27,.Lsqr8x8_post_condition - - ldr x4,[x2,#-8*8] - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - ldp x10,x11,[x1,#8*4] - mov x27,#-8*8 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - -.Lsqr8x_tail: - mul x14,x6,x4 - adc x28,xzr,xzr // carry bit, modulo-scheduled - mul x15,x7,x4 - add x27,x27,#8 - mul x16,x8,x4 - mul x17,x9,x4 - adds x19,x19,x14 - mul x14,x10,x4 - adcs x20,x20,x15 - mul x15,x11,x4 - adcs x21,x21,x16 - mul x16,x12,x4 - adcs x22,x22,x17 - mul x17,x13,x4 - adcs x23,x23,x14 - umulh x14,x6,x4 - adcs x24,x24,x15 - umulh x15,x7,x4 - adcs x25,x25,x16 - umulh x16,x8,x4 - adcs x26,x26,x17 - umulh x17,x9,x4 - adc x28,x28,xzr - str x19,[x2],#8 - adds x19,x20,x14 - umulh x14,x10,x4 - adcs x20,x21,x15 - umulh x15,x11,x4 - adcs x21,x22,x16 - umulh x16,x12,x4 - adcs x22,x23,x17 - umulh x17,x13,x4 - ldr x4,[x0,x27] - adcs x23,x24,x14 - adcs x24,x25,x15 - adcs x25,x26,x16 - adcs x26,x28,x17 - //adc x28,xzr,xzr // moved above - cbnz x27,.Lsqr8x_tail - // note that carry flag is guaranteed - // to be zero at this point - ldp x6,x7,[x2,#8*0] - sub x27,x3,x1 // done yet? - sub x16,x3,x5 // rewinded np - ldp x8,x9,[x2,#8*2] - ldp x10,x11,[x2,#8*4] - ldp x12,x13,[x2,#8*6] - cbz x27,.Lsqr8x_tail_break - - ldr x4,[x0,#-8*8] - adds x19,x19,x6 - adcs x20,x20,x7 - ldp x6,x7,[x1,#8*0] - adcs x21,x21,x8 - adcs x22,x22,x9 - ldp x8,x9,[x1,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x1,#8*4] - adcs x25,x25,x12 - mov x27,#-8*8 - adcs x26,x26,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - //adc x28,xzr,xzr // moved above - b .Lsqr8x_tail - -.align 4 -.Lsqr8x_tail_break: - ldr x4,[x29,#112] // pull n0 - add x27,x2,#8*8 // end of current t[num] window - - subs xzr,x30,#1 // "move" top-most carry to carry bit - adcs x14,x19,x6 - adcs x15,x20,x7 - ldp x19,x20,[x0,#8*0] - adcs x21,x21,x8 - ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] - adcs x22,x22,x9 - ldp x8,x9,[x16,#8*2] - adcs x23,x23,x10 - adcs x24,x24,x11 - ldp x10,x11,[x16,#8*4] - adcs x25,x25,x12 - adcs x26,x26,x13 - ldp x12,x13,[x16,#8*6] - add x1,x16,#8*8 - adc x30,xzr,xzr // top-most carry - mul x28,x4,x19 - stp x14,x15,[x2,#8*0] - stp x21,x22,[x2,#8*2] - ldp x21,x22,[x0,#8*2] - stp x23,x24,[x2,#8*4] - ldp x23,x24,[x0,#8*4] - cmp x27,x29 // did we hit the bottom? - stp x25,x26,[x2,#8*6] - mov x2,x0 // slide the window - ldp x25,x26,[x0,#8*6] - mov x27,#8 - b.ne .Lsqr8x_reduction - - // Final step. We see if result is larger than modulus, and - // if it is, subtract the modulus. But comparison implies - // subtraction. So we subtract modulus, see if it borrowed, - // and conditionally copy original value. - ldr x0,[x29,#96] // pull rp - add x2,x2,#8*8 - subs x14,x19,x6 - sbcs x15,x20,x7 - sub x27,x5,#8*8 - mov x3,x0 // x0 copy - -.Lsqr8x_sub: - sbcs x16,x21,x8 - ldp x6,x7,[x1,#8*0] - sbcs x17,x22,x9 - stp x14,x15,[x0,#8*0] - sbcs x14,x23,x10 - ldp x8,x9,[x1,#8*2] - sbcs x15,x24,x11 - stp x16,x17,[x0,#8*2] - sbcs x16,x25,x12 - ldp x10,x11,[x1,#8*4] - sbcs x17,x26,x13 - ldp x12,x13,[x1,#8*6] - add x1,x1,#8*8 - ldp x19,x20,[x2,#8*0] - sub x27,x27,#8*8 - ldp x21,x22,[x2,#8*2] - ldp x23,x24,[x2,#8*4] - ldp x25,x26,[x2,#8*6] - add x2,x2,#8*8 - stp x14,x15,[x0,#8*4] - sbcs x14,x19,x6 - stp x16,x17,[x0,#8*6] - add x0,x0,#8*8 - sbcs x15,x20,x7 - cbnz x27,.Lsqr8x_sub - - sbcs x16,x21,x8 - mov x2,sp - add x1,sp,x5 - ldp x6,x7,[x3,#8*0] - sbcs x17,x22,x9 - stp x14,x15,[x0,#8*0] - sbcs x14,x23,x10 - ldp x8,x9,[x3,#8*2] - sbcs x15,x24,x11 - stp x16,x17,[x0,#8*2] - sbcs x16,x25,x12 - ldp x19,x20,[x1,#8*0] - sbcs x17,x26,x13 - ldp x21,x22,[x1,#8*2] - sbcs xzr,x30,xzr // did it borrow? - ldr x30,[x29,#8] // pull return address - stp x14,x15,[x0,#8*4] - stp x16,x17,[x0,#8*6] - - sub x27,x5,#8*4 -.Lsqr4x_cond_copy: - sub x27,x27,#8*4 - csel x14,x19,x6,lo - stp xzr,xzr,[x2,#8*0] - csel x15,x20,x7,lo - ldp x6,x7,[x3,#8*4] - ldp x19,x20,[x1,#8*4] - csel x16,x21,x8,lo - stp xzr,xzr,[x2,#8*2] - add x2,x2,#8*4 - csel x17,x22,x9,lo - ldp x8,x9,[x3,#8*6] - ldp x21,x22,[x1,#8*6] - add x1,x1,#8*4 - stp x14,x15,[x3,#8*0] - stp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - stp xzr,xzr,[x1,#8*0] - stp xzr,xzr,[x1,#8*2] - cbnz x27,.Lsqr4x_cond_copy - - csel x14,x19,x6,lo - stp xzr,xzr,[x2,#8*0] - csel x15,x20,x7,lo - stp xzr,xzr,[x2,#8*2] - csel x16,x21,x8,lo - csel x17,x22,x9,lo - stp x14,x15,[x3,#8*0] - stp x16,x17,[x3,#8*2] - - b .Lsqr8x_done - -.align 4 -.Lsqr8x8_post_condition: - adc x28,xzr,xzr - ldr x30,[x29,#8] // pull return address - // x19-7,x28 hold result, x6-7 hold modulus - subs x6,x19,x6 - ldr x1,[x29,#96] // pull rp - sbcs x7,x20,x7 - stp xzr,xzr,[sp,#8*0] - sbcs x8,x21,x8 - stp xzr,xzr,[sp,#8*2] - sbcs x9,x22,x9 - stp xzr,xzr,[sp,#8*4] - sbcs x10,x23,x10 - stp xzr,xzr,[sp,#8*6] - sbcs x11,x24,x11 - stp xzr,xzr,[sp,#8*8] - sbcs x12,x25,x12 - stp xzr,xzr,[sp,#8*10] - sbcs x13,x26,x13 - stp xzr,xzr,[sp,#8*12] - sbcs x28,x28,xzr // did it borrow? - stp xzr,xzr,[sp,#8*14] - - // x6-7 hold result-modulus - csel x6,x19,x6,lo - csel x7,x20,x7,lo - csel x8,x21,x8,lo - csel x9,x22,x9,lo - stp x6,x7,[x1,#8*0] - csel x10,x23,x10,lo - csel x11,x24,x11,lo - stp x8,x9,[x1,#8*2] - csel x12,x25,x12,lo - csel x13,x26,x13,lo - stp x10,x11,[x1,#8*4] - stp x12,x13,[x1,#8*6] - -.Lsqr8x_done: - ldp x19,x20,[x29,#16] - mov sp,x29 - ldp x21,x22,[x29,#32] - mov x0,#1 - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldr x29,[sp],#128 - ret -.size __bn_sqr8x_mont,.-__bn_sqr8x_mont -.type __bn_mul4x_mont,%function -.align 5 -__bn_mul4x_mont: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - - sub x26,sp,x5,lsl#3 - lsl x5,x5,#3 - ldr x4,[x4] // *n0 - sub sp,x26,#8*4 // alloca - - add x10,x2,x5 - add x27,x1,x5 - stp x0,x10,[x29,#96] // offload rp and &b[num] - - ldr x24,[x2,#8*0] // b[0] - ldp x6,x7,[x1,#8*0] // a[0..3] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - mov x19,xzr - mov x20,xzr - mov x21,xzr - mov x22,xzr - ldp x14,x15,[x3,#8*0] // n[0..3] - ldp x16,x17,[x3,#8*2] - adds x3,x3,#8*4 // clear carry bit - mov x0,xzr - mov x28,#0 - mov x26,sp - -.Loop_mul4x_1st_reduction: - mul x10,x6,x24 // lo(a[0..3]*b[0]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[0..3]*b[0]) - adcs x20,x20,x11 - mul x25,x19,x4 // t[0]*n0 - adcs x21,x21,x12 - umulh x11,x7,x24 - adcs x22,x22,x13 - umulh x12,x8,x24 - adc x23,xzr,xzr - umulh x13,x9,x24 - ldr x24,[x2,x28] // next b[i] (or b[0]) - adds x20,x20,x10 - // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) - str x25,[x26],#8 // put aside t[0]*n0 for tail processing - adcs x21,x21,x11 - mul x11,x15,x25 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - // (*) adds xzr,x19,x10 - subs xzr,x19,#1 // (*) - umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) - adcs x19,x20,x11 - umulh x11,x15,x25 - adcs x20,x21,x12 - umulh x12,x16,x25 - adcs x21,x22,x13 - umulh x13,x17,x25 - adcs x22,x23,x0 - adc x0,xzr,xzr - adds x19,x19,x10 - sub x10,x27,x1 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - cbnz x28,.Loop_mul4x_1st_reduction - - cbz x10,.Lmul4x4_post_condition - - ldp x6,x7,[x1,#8*0] // a[4..7] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - ldr x25,[sp] // a[0]*n0 - ldp x14,x15,[x3,#8*0] // n[4..7] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - -.Loop_mul4x_1st_tail: - mul x10,x6,x24 // lo(a[4..7]*b[i]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[4..7]*b[i]) - adcs x20,x20,x11 - umulh x11,x7,x24 - adcs x21,x21,x12 - umulh x12,x8,x24 - adcs x22,x22,x13 - umulh x13,x9,x24 - adc x23,xzr,xzr - ldr x24,[x2,x28] // next b[i] (or b[0]) - adds x20,x20,x10 - mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) - adcs x21,x21,x11 - mul x11,x15,x25 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - adds x19,x19,x10 - umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) - adcs x20,x20,x11 - umulh x11,x15,x25 - adcs x21,x21,x12 - umulh x12,x16,x25 - adcs x22,x22,x13 - adcs x23,x23,x0 - umulh x13,x17,x25 - adc x0,xzr,xzr - ldr x25,[sp,x28] // next t[0]*n0 - str x19,[x26],#8 // result!!! - adds x19,x20,x10 - sub x10,x27,x1 // done yet? - adcs x20,x21,x11 - adcs x21,x22,x12 - adcs x22,x23,x13 - //adc x0,x0,xzr - cbnz x28,.Loop_mul4x_1st_tail - - sub x11,x27,x5 // rewinded x1 - cbz x10,.Lmul4x_proceed - - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - ldp x14,x15,[x3,#8*0] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - b .Loop_mul4x_1st_tail - -.align 5 -.Lmul4x_proceed: - ldr x24,[x2,#8*4]! // *++b - adc x30,x0,xzr - ldp x6,x7,[x11,#8*0] // a[0..3] - sub x3,x3,x5 // rewind np - ldp x8,x9,[x11,#8*2] - add x1,x11,#8*4 - - stp x19,x20,[x26,#8*0] // result!!! - ldp x19,x20,[sp,#8*4] // t[0..3] - stp x21,x22,[x26,#8*2] // result!!! - ldp x21,x22,[sp,#8*6] - - ldp x14,x15,[x3,#8*0] // n[0..3] - mov x26,sp - ldp x16,x17,[x3,#8*2] - adds x3,x3,#8*4 // clear carry bit - mov x0,xzr - -.align 4 -.Loop_mul4x_reduction: - mul x10,x6,x24 // lo(a[0..3]*b[4]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[0..3]*b[4]) - adcs x20,x20,x11 - mul x25,x19,x4 // t[0]*n0 - adcs x21,x21,x12 - umulh x11,x7,x24 - adcs x22,x22,x13 - umulh x12,x8,x24 - adc x23,xzr,xzr - umulh x13,x9,x24 - ldr x24,[x2,x28] // next b[i] - adds x20,x20,x10 - // (*) mul x10,x14,x25 - str x25,[x26],#8 // put aside t[0]*n0 for tail processing - adcs x21,x21,x11 - mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - // (*) adds xzr,x19,x10 - subs xzr,x19,#1 // (*) - umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 - adcs x19,x20,x11 - umulh x11,x15,x25 - adcs x20,x21,x12 - umulh x12,x16,x25 - adcs x21,x22,x13 - umulh x13,x17,x25 - adcs x22,x23,x0 - adc x0,xzr,xzr - adds x19,x19,x10 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - cbnz x28,.Loop_mul4x_reduction - - adc x0,x0,xzr - ldp x10,x11,[x26,#8*4] // t[4..7] - ldp x12,x13,[x26,#8*6] - ldp x6,x7,[x1,#8*0] // a[4..7] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - adds x19,x19,x10 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - - ldr x25,[sp] // t[0]*n0 - ldp x14,x15,[x3,#8*0] // n[4..7] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - -.align 4 -.Loop_mul4x_tail: - mul x10,x6,x24 // lo(a[4..7]*b[4]) - adc x0,x0,xzr // modulo-scheduled - mul x11,x7,x24 - add x28,x28,#8 - mul x12,x8,x24 - and x28,x28,#31 - mul x13,x9,x24 - adds x19,x19,x10 - umulh x10,x6,x24 // hi(a[4..7]*b[4]) - adcs x20,x20,x11 - umulh x11,x7,x24 - adcs x21,x21,x12 - umulh x12,x8,x24 - adcs x22,x22,x13 - umulh x13,x9,x24 - adc x23,xzr,xzr - ldr x24,[x2,x28] // next b[i] - adds x20,x20,x10 - mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) - adcs x21,x21,x11 - mul x11,x15,x25 - adcs x22,x22,x12 - mul x12,x16,x25 - adc x23,x23,x13 // can't overflow - mul x13,x17,x25 - adds x19,x19,x10 - umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) - adcs x20,x20,x11 - umulh x11,x15,x25 - adcs x21,x21,x12 - umulh x12,x16,x25 - adcs x22,x22,x13 - umulh x13,x17,x25 - adcs x23,x23,x0 - ldr x25,[sp,x28] // next a[0]*n0 - adc x0,xzr,xzr - str x19,[x26],#8 // result!!! - adds x19,x20,x10 - sub x10,x27,x1 // done yet? - adcs x20,x21,x11 - adcs x21,x22,x12 - adcs x22,x23,x13 - //adc x0,x0,xzr - cbnz x28,.Loop_mul4x_tail - - sub x11,x3,x5 // rewinded np? - adc x0,x0,xzr - cbz x10,.Loop_mul4x_break - - ldp x10,x11,[x26,#8*4] - ldp x12,x13,[x26,#8*6] - ldp x6,x7,[x1,#8*0] - ldp x8,x9,[x1,#8*2] - add x1,x1,#8*4 - adds x19,x19,x10 - adcs x20,x20,x11 - adcs x21,x21,x12 - adcs x22,x22,x13 - //adc x0,x0,xzr - ldp x14,x15,[x3,#8*0] - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - b .Loop_mul4x_tail - -.align 4 -.Loop_mul4x_break: - ldp x12,x13,[x29,#96] // pull rp and &b[num] - adds x19,x19,x30 - add x2,x2,#8*4 // bp++ - adcs x20,x20,xzr - sub x1,x1,x5 // rewind ap - adcs x21,x21,xzr - stp x19,x20,[x26,#8*0] // result!!! - adcs x22,x22,xzr - ldp x19,x20,[sp,#8*4] // t[0..3] - adc x30,x0,xzr - stp x21,x22,[x26,#8*2] // result!!! - cmp x2,x13 // done yet? - ldp x21,x22,[sp,#8*6] - ldp x14,x15,[x11,#8*0] // n[0..3] - ldp x16,x17,[x11,#8*2] - add x3,x11,#8*4 - b.eq .Lmul4x_post - - ldr x24,[x2] - ldp x6,x7,[x1,#8*0] // a[0..3] - ldp x8,x9,[x1,#8*2] - adds x1,x1,#8*4 // clear carry bit - mov x0,xzr - mov x26,sp - b .Loop_mul4x_reduction - -.align 4 -.Lmul4x_post: - // Final step. We see if result is larger than modulus, and - // if it is, subtract the modulus. But comparison implies - // subtraction. So we subtract modulus, see if it borrowed, - // and conditionally copy original value. - mov x0,x12 - mov x27,x12 // x0 copy - subs x10,x19,x14 - add x26,sp,#8*8 - sbcs x11,x20,x15 - sub x28,x5,#8*4 - -.Lmul4x_sub: - sbcs x12,x21,x16 - ldp x14,x15,[x3,#8*0] - sub x28,x28,#8*4 - ldp x19,x20,[x26,#8*0] - sbcs x13,x22,x17 - ldp x16,x17,[x3,#8*2] - add x3,x3,#8*4 - ldp x21,x22,[x26,#8*2] - add x26,x26,#8*4 - stp x10,x11,[x0,#8*0] - sbcs x10,x19,x14 - stp x12,x13,[x0,#8*2] - add x0,x0,#8*4 - sbcs x11,x20,x15 - cbnz x28,.Lmul4x_sub - - sbcs x12,x21,x16 - mov x26,sp - add x1,sp,#8*4 - ldp x6,x7,[x27,#8*0] - sbcs x13,x22,x17 - stp x10,x11,[x0,#8*0] - ldp x8,x9,[x27,#8*2] - stp x12,x13,[x0,#8*2] - ldp x19,x20,[x1,#8*0] - ldp x21,x22,[x1,#8*2] - sbcs xzr,x30,xzr // did it borrow? - ldr x30,[x29,#8] // pull return address - - sub x28,x5,#8*4 -.Lmul4x_cond_copy: - sub x28,x28,#8*4 - csel x10,x19,x6,lo - stp xzr,xzr,[x26,#8*0] - csel x11,x20,x7,lo - ldp x6,x7,[x27,#8*4] - ldp x19,x20,[x1,#8*4] - csel x12,x21,x8,lo - stp xzr,xzr,[x26,#8*2] - add x26,x26,#8*4 - csel x13,x22,x9,lo - ldp x8,x9,[x27,#8*6] - ldp x21,x22,[x1,#8*6] - add x1,x1,#8*4 - stp x10,x11,[x27,#8*0] - stp x12,x13,[x27,#8*2] - add x27,x27,#8*4 - cbnz x28,.Lmul4x_cond_copy - - csel x10,x19,x6,lo - stp xzr,xzr,[x26,#8*0] - csel x11,x20,x7,lo - stp xzr,xzr,[x26,#8*2] - csel x12,x21,x8,lo - stp xzr,xzr,[x26,#8*3] - csel x13,x22,x9,lo - stp xzr,xzr,[x26,#8*4] - stp x10,x11,[x27,#8*0] - stp x12,x13,[x27,#8*2] - - b .Lmul4x_done - -.align 4 -.Lmul4x4_post_condition: - adc x0,x0,xzr - ldr x1,[x29,#96] // pull rp - // x19-3,x0 hold result, x14-7 hold modulus - subs x6,x19,x14 - ldr x30,[x29,#8] // pull return address - sbcs x7,x20,x15 - stp xzr,xzr,[sp,#8*0] - sbcs x8,x21,x16 - stp xzr,xzr,[sp,#8*2] - sbcs x9,x22,x17 - stp xzr,xzr,[sp,#8*4] - sbcs xzr,x0,xzr // did it borrow? - stp xzr,xzr,[sp,#8*6] - - // x6-3 hold result-modulus - csel x6,x19,x6,lo - csel x7,x20,x7,lo - csel x8,x21,x8,lo - csel x9,x22,x9,lo - stp x6,x7,[x1,#8*0] - stp x8,x9,[x1,#8*2] - -.Lmul4x_done: - ldp x19,x20,[x29,#16] - mov sp,x29 - ldp x21,x22,[x29,#32] - mov x0,#1 - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldr x29,[sp],#128 - ret -.size __bn_mul4x_mont,.-__bn_mul4x_mont -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 4 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S deleted file mode 100644 index f876db3f89..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S +++ /dev/null @@ -1,341 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl gcm_init_neon -.hidden gcm_init_neon -.type gcm_init_neon,%function -.align 4 -gcm_init_neon: - // This function is adapted from gcm_init_v8. xC2 is t3. - ld1 {v17.2d}, [x1] // load H - movi v19.16b, #0xe1 - shl v19.2d, v19.2d, #57 // 0xc2.0 - ext v3.16b, v17.16b, v17.16b, #8 - ushr v18.2d, v19.2d, #63 - dup v17.4s, v17.s[1] - ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 - ushr v18.2d, v3.2d, #63 - sshr v17.4s, v17.4s, #31 // broadcast carry bit - and v18.16b, v18.16b, v16.16b - shl v3.2d, v3.2d, #1 - ext v18.16b, v18.16b, v18.16b, #8 - and v16.16b, v16.16b, v17.16b - orr v3.16b, v3.16b, v18.16b // H<<<=1 - eor v5.16b, v3.16b, v16.16b // twisted H - st1 {v5.2d}, [x0] // store Htable[0] - ret -.size gcm_init_neon,.-gcm_init_neon - -.globl gcm_gmult_neon -.hidden gcm_gmult_neon -.type gcm_gmult_neon,%function -.align 4 -gcm_gmult_neon: - ld1 {v3.16b}, [x0] // load Xi - ld1 {v5.1d}, [x1], #8 // load twisted H - ld1 {v6.1d}, [x1] - adrp x9, .Lmasks // load constants - add x9, x9, :lo12:.Lmasks - ld1 {v24.2d, v25.2d}, [x9] - rev64 v3.16b, v3.16b // byteswap Xi - ext v3.16b, v3.16b, v3.16b, #8 - eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing - - mov x3, #16 - b .Lgmult_neon -.size gcm_gmult_neon,.-gcm_gmult_neon - -.globl gcm_ghash_neon -.hidden gcm_ghash_neon -.type gcm_ghash_neon,%function -.align 4 -gcm_ghash_neon: - ld1 {v0.16b}, [x0] // load Xi - ld1 {v5.1d}, [x1], #8 // load twisted H - ld1 {v6.1d}, [x1] - adrp x9, .Lmasks // load constants - add x9, x9, :lo12:.Lmasks - ld1 {v24.2d, v25.2d}, [x9] - rev64 v0.16b, v0.16b // byteswap Xi - ext v0.16b, v0.16b, v0.16b, #8 - eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing - -.Loop_neon: - ld1 {v3.16b}, [x2], #16 // load inp - rev64 v3.16b, v3.16b // byteswap inp - ext v3.16b, v3.16b, v3.16b, #8 - eor v3.16b, v3.16b, v0.16b // inp ^= Xi - -.Lgmult_neon: - // Split the input into v3 and v4. (The upper halves are unused, - // so it is okay to leave them alone.) - ins v4.d[0], v3.d[1] - ext v16.8b, v5.8b, v5.8b, #1 // A1 - pmull v16.8h, v16.8b, v3.8b // F = A1*B - ext v0.8b, v3.8b, v3.8b, #1 // B1 - pmull v0.8h, v5.8b, v0.8b // E = A*B1 - ext v17.8b, v5.8b, v5.8b, #2 // A2 - pmull v17.8h, v17.8b, v3.8b // H = A2*B - ext v19.8b, v3.8b, v3.8b, #2 // B2 - pmull v19.8h, v5.8b, v19.8b // G = A*B2 - ext v18.8b, v5.8b, v5.8b, #3 // A3 - eor v16.16b, v16.16b, v0.16b // L = E + F - pmull v18.8h, v18.8b, v3.8b // J = A3*B - ext v0.8b, v3.8b, v3.8b, #3 // B3 - eor v17.16b, v17.16b, v19.16b // M = G + H - pmull v0.8h, v5.8b, v0.8b // I = A*B3 - - // Here we diverge from the 32-bit version. It computes the following - // (instructions reordered for clarity): - // - // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) - // vand $t0#hi, $t0#hi, $k48 - // veor $t0#lo, $t0#lo, $t0#hi - // - // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) - // vand $t1#hi, $t1#hi, $k32 - // veor $t1#lo, $t1#lo, $t1#hi - // - // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) - // vand $t2#hi, $t2#hi, $k16 - // veor $t2#lo, $t2#lo, $t2#hi - // - // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) - // vmov.i64 $t3#hi, #0 - // - // $kN is a mask with the bottom N bits set. AArch64 cannot compute on - // upper halves of SIMD registers, so we must split each half into - // separate registers. To compensate, we pair computations up and - // parallelize. - - ext v19.8b, v3.8b, v3.8b, #4 // B4 - eor v18.16b, v18.16b, v0.16b // N = I + J - pmull v19.8h, v5.8b, v19.8b // K = A*B4 - - // This can probably be scheduled more efficiently. For now, we just - // pair up independent instructions. - zip1 v20.2d, v16.2d, v17.2d - zip1 v22.2d, v18.2d, v19.2d - zip2 v21.2d, v16.2d, v17.2d - zip2 v23.2d, v18.2d, v19.2d - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - and v21.16b, v21.16b, v24.16b - and v23.16b, v23.16b, v25.16b - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - zip1 v16.2d, v20.2d, v21.2d - zip1 v18.2d, v22.2d, v23.2d - zip2 v17.2d, v20.2d, v21.2d - zip2 v19.2d, v22.2d, v23.2d - - ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 - ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 - pmull v0.8h, v5.8b, v3.8b // D = A*B - ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 - ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 - eor v16.16b, v16.16b, v17.16b - eor v18.16b, v18.16b, v19.16b - eor v0.16b, v0.16b, v16.16b - eor v0.16b, v0.16b, v18.16b - eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing - ext v16.8b, v7.8b, v7.8b, #1 // A1 - pmull v16.8h, v16.8b, v3.8b // F = A1*B - ext v1.8b, v3.8b, v3.8b, #1 // B1 - pmull v1.8h, v7.8b, v1.8b // E = A*B1 - ext v17.8b, v7.8b, v7.8b, #2 // A2 - pmull v17.8h, v17.8b, v3.8b // H = A2*B - ext v19.8b, v3.8b, v3.8b, #2 // B2 - pmull v19.8h, v7.8b, v19.8b // G = A*B2 - ext v18.8b, v7.8b, v7.8b, #3 // A3 - eor v16.16b, v16.16b, v1.16b // L = E + F - pmull v18.8h, v18.8b, v3.8b // J = A3*B - ext v1.8b, v3.8b, v3.8b, #3 // B3 - eor v17.16b, v17.16b, v19.16b // M = G + H - pmull v1.8h, v7.8b, v1.8b // I = A*B3 - - // Here we diverge from the 32-bit version. It computes the following - // (instructions reordered for clarity): - // - // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) - // vand $t0#hi, $t0#hi, $k48 - // veor $t0#lo, $t0#lo, $t0#hi - // - // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) - // vand $t1#hi, $t1#hi, $k32 - // veor $t1#lo, $t1#lo, $t1#hi - // - // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) - // vand $t2#hi, $t2#hi, $k16 - // veor $t2#lo, $t2#lo, $t2#hi - // - // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) - // vmov.i64 $t3#hi, #0 - // - // $kN is a mask with the bottom N bits set. AArch64 cannot compute on - // upper halves of SIMD registers, so we must split each half into - // separate registers. To compensate, we pair computations up and - // parallelize. - - ext v19.8b, v3.8b, v3.8b, #4 // B4 - eor v18.16b, v18.16b, v1.16b // N = I + J - pmull v19.8h, v7.8b, v19.8b // K = A*B4 - - // This can probably be scheduled more efficiently. For now, we just - // pair up independent instructions. - zip1 v20.2d, v16.2d, v17.2d - zip1 v22.2d, v18.2d, v19.2d - zip2 v21.2d, v16.2d, v17.2d - zip2 v23.2d, v18.2d, v19.2d - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - and v21.16b, v21.16b, v24.16b - and v23.16b, v23.16b, v25.16b - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - zip1 v16.2d, v20.2d, v21.2d - zip1 v18.2d, v22.2d, v23.2d - zip2 v17.2d, v20.2d, v21.2d - zip2 v19.2d, v22.2d, v23.2d - - ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 - ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 - pmull v1.8h, v7.8b, v3.8b // D = A*B - ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 - ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 - eor v16.16b, v16.16b, v17.16b - eor v18.16b, v18.16b, v19.16b - eor v1.16b, v1.16b, v16.16b - eor v1.16b, v1.16b, v18.16b - ext v16.8b, v6.8b, v6.8b, #1 // A1 - pmull v16.8h, v16.8b, v4.8b // F = A1*B - ext v2.8b, v4.8b, v4.8b, #1 // B1 - pmull v2.8h, v6.8b, v2.8b // E = A*B1 - ext v17.8b, v6.8b, v6.8b, #2 // A2 - pmull v17.8h, v17.8b, v4.8b // H = A2*B - ext v19.8b, v4.8b, v4.8b, #2 // B2 - pmull v19.8h, v6.8b, v19.8b // G = A*B2 - ext v18.8b, v6.8b, v6.8b, #3 // A3 - eor v16.16b, v16.16b, v2.16b // L = E + F - pmull v18.8h, v18.8b, v4.8b // J = A3*B - ext v2.8b, v4.8b, v4.8b, #3 // B3 - eor v17.16b, v17.16b, v19.16b // M = G + H - pmull v2.8h, v6.8b, v2.8b // I = A*B3 - - // Here we diverge from the 32-bit version. It computes the following - // (instructions reordered for clarity): - // - // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) - // vand $t0#hi, $t0#hi, $k48 - // veor $t0#lo, $t0#lo, $t0#hi - // - // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) - // vand $t1#hi, $t1#hi, $k32 - // veor $t1#lo, $t1#lo, $t1#hi - // - // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) - // vand $t2#hi, $t2#hi, $k16 - // veor $t2#lo, $t2#lo, $t2#hi - // - // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) - // vmov.i64 $t3#hi, #0 - // - // $kN is a mask with the bottom N bits set. AArch64 cannot compute on - // upper halves of SIMD registers, so we must split each half into - // separate registers. To compensate, we pair computations up and - // parallelize. - - ext v19.8b, v4.8b, v4.8b, #4 // B4 - eor v18.16b, v18.16b, v2.16b // N = I + J - pmull v19.8h, v6.8b, v19.8b // K = A*B4 - - // This can probably be scheduled more efficiently. For now, we just - // pair up independent instructions. - zip1 v20.2d, v16.2d, v17.2d - zip1 v22.2d, v18.2d, v19.2d - zip2 v21.2d, v16.2d, v17.2d - zip2 v23.2d, v18.2d, v19.2d - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - and v21.16b, v21.16b, v24.16b - and v23.16b, v23.16b, v25.16b - eor v20.16b, v20.16b, v21.16b - eor v22.16b, v22.16b, v23.16b - zip1 v16.2d, v20.2d, v21.2d - zip1 v18.2d, v22.2d, v23.2d - zip2 v17.2d, v20.2d, v21.2d - zip2 v19.2d, v22.2d, v23.2d - - ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 - ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 - pmull v2.8h, v6.8b, v4.8b // D = A*B - ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 - ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 - eor v16.16b, v16.16b, v17.16b - eor v18.16b, v18.16b, v19.16b - eor v2.16b, v2.16b, v16.16b - eor v2.16b, v2.16b, v18.16b - ext v16.16b, v0.16b, v2.16b, #8 - eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing - eor v1.16b, v1.16b, v2.16b - eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi - ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result - // This is a no-op due to the ins instruction below. - // ins v2.d[0], v1.d[1] - - // equivalent of reduction_avx from ghash-x86_64.pl - shl v17.2d, v0.2d, #57 // 1st phase - shl v18.2d, v0.2d, #62 - eor v18.16b, v18.16b, v17.16b // - shl v17.2d, v0.2d, #63 - eor v18.16b, v18.16b, v17.16b // - // Note Xm contains {Xl.d[1], Xh.d[0]}. - eor v18.16b, v18.16b, v1.16b - ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] - ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] - - ushr v18.2d, v0.2d, #1 // 2nd phase - eor v2.16b, v2.16b,v0.16b - eor v0.16b, v0.16b,v18.16b // - ushr v18.2d, v18.2d, #6 - ushr v0.2d, v0.2d, #1 // - eor v0.16b, v0.16b, v2.16b // - eor v0.16b, v0.16b, v18.16b // - - subs x3, x3, #16 - bne .Loop_neon - - rev64 v0.16b, v0.16b // byteswap Xi and write - ext v0.16b, v0.16b, v0.16b, #8 - st1 {v0.16b}, [x0] - - ret -.size gcm_ghash_neon,.-gcm_ghash_neon - -.section .rodata -.align 4 -.Lmasks: -.quad 0x0000ffffffffffff // k48 -.quad 0x00000000ffffffff // k32 -.quad 0x000000000000ffff // k16 -.quad 0x0000000000000000 // k0 -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S deleted file mode 100644 index 37d97317aa..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S +++ /dev/null @@ -1,249 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text -.arch armv8-a+crypto -.globl gcm_init_v8 -.hidden gcm_init_v8 -.type gcm_init_v8,%function -.align 4 -gcm_init_v8: - ld1 {v17.2d},[x1] //load input H - movi v19.16b,#0xe1 - shl v19.2d,v19.2d,#57 //0xc2.0 - ext v3.16b,v17.16b,v17.16b,#8 - ushr v18.2d,v19.2d,#63 - dup v17.4s,v17.s[1] - ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 - ushr v18.2d,v3.2d,#63 - sshr v17.4s,v17.4s,#31 //broadcast carry bit - and v18.16b,v18.16b,v16.16b - shl v3.2d,v3.2d,#1 - ext v18.16b,v18.16b,v18.16b,#8 - and v16.16b,v16.16b,v17.16b - orr v3.16b,v3.16b,v18.16b //H<<<=1 - eor v20.16b,v3.16b,v16.16b //twisted H - st1 {v20.2d},[x0],#16 //store Htable[0] - - //calculate H^2 - ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing - pmull v0.1q,v20.1d,v20.1d - eor v16.16b,v16.16b,v20.16b - pmull2 v2.1q,v20.2d,v20.2d - pmull v1.1q,v16.1d,v16.1d - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase - - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - eor v0.16b,v1.16b,v18.16b - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase - pmull v0.1q,v0.1d,v19.1d - eor v18.16b,v18.16b,v2.16b - eor v22.16b,v0.16b,v18.16b - - ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing - eor v17.16b,v17.16b,v22.16b - ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed - st1 {v21.2d,v22.2d},[x0] //store Htable[1..2] - - ret -.size gcm_init_v8,.-gcm_init_v8 -.globl gcm_gmult_v8 -.hidden gcm_gmult_v8 -.type gcm_gmult_v8,%function -.align 4 -gcm_gmult_v8: - ld1 {v17.2d},[x0] //load Xi - movi v19.16b,#0xe1 - ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... - shl v19.2d,v19.2d,#57 -#ifndef __ARMEB__ - rev64 v17.16b,v17.16b -#endif - ext v3.16b,v17.16b,v17.16b,#8 - - pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo - eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing - pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi - pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase of reduction - - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - eor v0.16b,v1.16b,v18.16b - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction - pmull v0.1q,v0.1d,v19.1d - eor v18.16b,v18.16b,v2.16b - eor v0.16b,v0.16b,v18.16b - -#ifndef __ARMEB__ - rev64 v0.16b,v0.16b -#endif - ext v0.16b,v0.16b,v0.16b,#8 - st1 {v0.2d},[x0] //write out Xi - - ret -.size gcm_gmult_v8,.-gcm_gmult_v8 -.globl gcm_ghash_v8 -.hidden gcm_ghash_v8 -.type gcm_ghash_v8,%function -.align 4 -gcm_ghash_v8: - ld1 {v0.2d},[x0] //load [rotated] Xi - //"[rotated]" means that - //loaded value would have - //to be rotated in order to - //make it appear as in - //algorithm specification - subs x3,x3,#32 //see if x3 is 32 or larger - mov x12,#16 //x12 is used as post- - //increment for input pointer; - //as loop is modulo-scheduled - //x12 is zeroed just in time - //to preclude overstepping - //inp[len], which means that - //last block[s] are actually - //loaded twice, but last - //copy is not processed - ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2 - movi v19.16b,#0xe1 - ld1 {v22.2d},[x1] - csel x12,xzr,x12,eq //is it time to zero x12? - ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi - ld1 {v16.2d},[x2],#16 //load [rotated] I[0] - shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant -#ifndef __ARMEB__ - rev64 v16.16b,v16.16b - rev64 v0.16b,v0.16b -#endif - ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0] - b.lo .Lodd_tail_v8 //x3 was less than 32 - ld1 {v17.2d},[x2],x12 //load [rotated] I[1] -#ifndef __ARMEB__ - rev64 v17.16b,v17.16b -#endif - ext v7.16b,v17.16b,v17.16b,#8 - eor v3.16b,v3.16b,v0.16b //I[i]^=Xi - pmull v4.1q,v20.1d,v7.1d //H·Ii+1 - eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing - pmull2 v6.1q,v20.2d,v7.2d - b .Loop_mod2x_v8 - -.align 4 -.Loop_mod2x_v8: - ext v18.16b,v3.16b,v3.16b,#8 - subs x3,x3,#32 //is there more data? - pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo - csel x12,xzr,x12,lo //is it time to zero x12? - - pmull v5.1q,v21.1d,v17.1d - eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing - pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi - eor v0.16b,v0.16b,v4.16b //accumulate - pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) - ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] - - eor v2.16b,v2.16b,v6.16b - csel x12,xzr,x12,eq //is it time to zero x12? - eor v1.16b,v1.16b,v5.16b - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3] -#ifndef __ARMEB__ - rev64 v16.16b,v16.16b -#endif - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase of reduction - -#ifndef __ARMEB__ - rev64 v17.16b,v17.16b -#endif - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - ext v7.16b,v17.16b,v17.16b,#8 - ext v3.16b,v16.16b,v16.16b,#8 - eor v0.16b,v1.16b,v18.16b - pmull v4.1q,v20.1d,v7.1d //H·Ii+1 - eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction - pmull v0.1q,v0.1d,v19.1d - eor v3.16b,v3.16b,v18.16b - eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing - eor v3.16b,v3.16b,v0.16b - pmull2 v6.1q,v20.2d,v7.2d - b.hs .Loop_mod2x_v8 //there was at least 32 more bytes - - eor v2.16b,v2.16b,v18.16b - ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b - adds x3,x3,#32 //re-construct x3 - eor v0.16b,v0.16b,v2.16b //re-construct v0.16b - b.eq .Ldone_v8 //is x3 zero? -.Lodd_tail_v8: - ext v18.16b,v0.16b,v0.16b,#8 - eor v3.16b,v3.16b,v0.16b //inp^=Xi - eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi - - pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo - eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing - pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi - pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) - - ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing - eor v18.16b,v0.16b,v2.16b - eor v1.16b,v1.16b,v17.16b - eor v1.16b,v1.16b,v18.16b - pmull v18.1q,v0.1d,v19.1d //1st phase of reduction - - ins v2.d[0],v1.d[1] - ins v1.d[1],v0.d[0] - eor v0.16b,v1.16b,v18.16b - - ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction - pmull v0.1q,v0.1d,v19.1d - eor v18.16b,v18.16b,v2.16b - eor v0.16b,v0.16b,v18.16b - -.Ldone_v8: -#ifndef __ARMEB__ - rev64 v0.16b,v0.16b -#endif - ext v0.16b,v0.16b,v0.16b,#8 - st1 {v0.2d},[x0] //write out Xi - - ret -.size gcm_ghash_v8,.-gcm_ghash_v8 -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha1-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha1-armv8.S deleted file mode 100644 index f681b9983f..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha1-armv8.S +++ /dev/null @@ -1,1235 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text - - -.globl sha1_block_data_order -.hidden sha1_block_data_order -.type sha1_block_data_order,%function -.align 6 -sha1_block_data_order: -#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 - adrp x16,:pg_hi21_nc:OPENSSL_armcap_P -#else - adrp x16,OPENSSL_armcap_P -#endif - ldr w16,[x16,:lo12:OPENSSL_armcap_P] - tst w16,#ARMV8_SHA1 - b.ne .Lv8_entry - - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - - ldp w20,w21,[x0] - ldp w22,w23,[x0,#8] - ldr w24,[x0,#16] - -.Loop: - ldr x3,[x1],#64 - movz w28,#0x7999 - sub x2,x2,#1 - movk w28,#0x5a82,lsl#16 -#ifdef __ARMEB__ - ror x3,x3,#32 -#else - rev32 x3,x3 -#endif - add w24,w24,w28 // warm it up - add w24,w24,w3 - lsr x4,x3,#32 - ldr x5,[x1,#-56] - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w4 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x5,x5,#32 -#else - rev32 x5,x5 -#endif - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w5 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - lsr x6,x5,#32 - ldr x7,[x1,#-48] - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w6 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x7,x7,#32 -#else - rev32 x7,x7 -#endif - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w7 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - lsr x8,x7,#32 - ldr x9,[x1,#-40] - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - add w24,w24,w8 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x9,x9,#32 -#else - rev32 x9,x9 -#endif - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w9 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - lsr x10,x9,#32 - ldr x11,[x1,#-32] - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w10 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x11,x11,#32 -#else - rev32 x11,x11 -#endif - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w11 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - lsr x12,x11,#32 - ldr x13,[x1,#-24] - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w12 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x13,x13,#32 -#else - rev32 x13,x13 -#endif - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - add w24,w24,w13 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - lsr x14,x13,#32 - ldr x15,[x1,#-16] - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w14 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x15,x15,#32 -#else - rev32 x15,x15 -#endif - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w15 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - lsr x16,x15,#32 - ldr x17,[x1,#-8] - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w16 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x17,x17,#32 -#else - rev32 x17,x17 -#endif - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w17 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - lsr x19,x17,#32 - eor w3,w3,w5 - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - eor w3,w3,w11 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - eor w3,w3,w16 - ror w22,w22,#2 - add w24,w24,w19 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - eor w4,w4,w12 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - eor w4,w4,w17 - ror w21,w21,#2 - add w23,w23,w3 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - eor w5,w5,w13 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - eor w5,w5,w19 - ror w20,w20,#2 - add w22,w22,w4 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - eor w6,w6,w14 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - eor w6,w6,w3 - ror w24,w24,#2 - add w21,w21,w5 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - eor w7,w7,w15 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - eor w7,w7,w4 - ror w23,w23,#2 - add w20,w20,w6 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w7,w7,#31 - movz w28,#0xeba1 - movk w28,#0x6ed9,lsl#16 - eor w8,w8,w10 - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - eor w8,w8,w16 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - eor w8,w8,w5 - ror w22,w22,#2 - add w24,w24,w7 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w9,w9,w6 - add w23,w23,w8 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w10,w10,w7 - add w22,w22,w9 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w11,w11,w8 - add w21,w21,w10 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w11,w11,#31 - eor w12,w12,w14 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w12,w12,w9 - add w20,w20,w11 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w12,w12,#31 - eor w13,w13,w15 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w13,w13,w5 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w13,w13,w10 - add w24,w24,w12 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w13,w13,#31 - eor w14,w14,w16 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w14,w14,w6 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w14,w14,w11 - add w23,w23,w13 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w14,w14,#31 - eor w15,w15,w17 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w15,w15,w7 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w15,w15,w12 - add w22,w22,w14 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w15,w15,#31 - eor w16,w16,w19 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w16,w16,w8 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w16,w16,w13 - add w21,w21,w15 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w17,w17,w14 - add w20,w20,w16 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w19,w19,w15 - add w24,w24,w17 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w19,w19,#31 - eor w3,w3,w5 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w3,w3,w11 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w3,w3,w16 - add w23,w23,w19 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w4,w4,w12 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w4,w4,w17 - add w22,w22,w3 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w5,w5,w13 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w5,w5,w19 - add w21,w21,w4 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w6,w6,w14 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w6,w6,w3 - add w20,w20,w5 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w7,w7,w15 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w7,w7,w4 - add w24,w24,w6 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w7,w7,#31 - eor w8,w8,w10 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w8,w8,w16 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w8,w8,w5 - add w23,w23,w7 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w9,w9,w6 - add w22,w22,w8 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w10,w10,w7 - add w21,w21,w9 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w11,w11,w8 - add w20,w20,w10 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w11,w11,#31 - movz w28,#0xbcdc - movk w28,#0x8f1b,lsl#16 - eor w12,w12,w14 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w12,w12,w9 - add w24,w24,w11 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w12,w12,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w13,w13,w15 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w13,w13,w5 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w13,w13,w10 - add w23,w23,w12 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w13,w13,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w14,w14,w16 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w14,w14,w6 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w14,w14,w11 - add w22,w22,w13 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w14,w14,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w15,w15,w17 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w15,w15,w7 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w15,w15,w12 - add w21,w21,w14 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w15,w15,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w16,w16,w19 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w16,w16,w8 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w16,w16,w13 - add w20,w20,w15 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w16,w16,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w17,w17,w3 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w17,w17,w9 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w17,w17,w14 - add w24,w24,w16 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w17,w17,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w19,w19,w4 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w19,w19,w10 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w19,w19,w15 - add w23,w23,w17 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w19,w19,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w3,w3,w5 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w3,w3,w11 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w3,w3,w16 - add w22,w22,w19 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w3,w3,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w4,w4,w6 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w4,w4,w12 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w4,w4,w17 - add w21,w21,w3 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w4,w4,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w5,w5,w7 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w5,w5,w13 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w5,w5,w19 - add w20,w20,w4 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w5,w5,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w6,w6,w8 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w6,w6,w14 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w6,w6,w3 - add w24,w24,w5 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w6,w6,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w7,w7,w9 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w7,w7,w15 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w7,w7,w4 - add w23,w23,w6 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w7,w7,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w8,w8,w10 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w8,w8,w16 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w8,w8,w5 - add w22,w22,w7 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w8,w8,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w9,w9,w11 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w9,w9,w17 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w9,w9,w6 - add w21,w21,w8 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w9,w9,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w10,w10,w12 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w10,w10,w19 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w10,w10,w7 - add w20,w20,w9 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w10,w10,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w11,w11,w13 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w11,w11,w3 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w11,w11,w8 - add w24,w24,w10 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w11,w11,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w12,w12,w14 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w12,w12,w4 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w12,w12,w9 - add w23,w23,w11 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w12,w12,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w13,w13,w15 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w13,w13,w5 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w13,w13,w10 - add w22,w22,w12 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w13,w13,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w14,w14,w16 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w14,w14,w6 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w14,w14,w11 - add w21,w21,w13 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w14,w14,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w15,w15,w17 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w15,w15,w7 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w15,w15,w12 - add w20,w20,w14 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w15,w15,#31 - movz w28,#0xc1d6 - movk w28,#0xca62,lsl#16 - orr w25,w22,w23 - and w26,w22,w23 - eor w16,w16,w19 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w16,w16,w8 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w16,w16,w13 - add w24,w24,w15 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w17,w17,w14 - add w23,w23,w16 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w19,w19,w15 - add w22,w22,w17 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w19,w19,#31 - eor w3,w3,w5 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w3,w3,w11 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w3,w3,w16 - add w21,w21,w19 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w4,w4,w12 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w4,w4,w17 - add w20,w20,w3 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w5,w5,w13 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w5,w5,w19 - add w24,w24,w4 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w6,w6,w14 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w6,w6,w3 - add w23,w23,w5 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w7,w7,w15 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w7,w7,w4 - add w22,w22,w6 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w7,w7,#31 - eor w8,w8,w10 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w8,w8,w16 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w8,w8,w5 - add w21,w21,w7 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w9,w9,w6 - add w20,w20,w8 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w10,w10,w7 - add w24,w24,w9 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w11,w11,w8 - add w23,w23,w10 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w11,w11,#31 - eor w12,w12,w14 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w12,w12,w9 - add w22,w22,w11 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w12,w12,#31 - eor w13,w13,w15 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w13,w13,w5 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w13,w13,w10 - add w21,w21,w12 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w13,w13,#31 - eor w14,w14,w16 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w14,w14,w6 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w14,w14,w11 - add w20,w20,w13 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w14,w14,#31 - eor w15,w15,w17 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w15,w15,w7 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w15,w15,w12 - add w24,w24,w14 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w15,w15,#31 - eor w16,w16,w19 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w16,w16,w8 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w16,w16,w13 - add w23,w23,w15 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w17,w17,w14 - add w22,w22,w16 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w19,w19,w15 - add w21,w21,w17 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w19,w19,#31 - ldp w4,w5,[x0] - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w19 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ldp w6,w7,[x0,#8] - eor w25,w24,w22 - ror w27,w21,#27 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - ldr w8,[x0,#16] - add w20,w20,w25 // e+=F(b,c,d) - add w21,w21,w5 - add w22,w22,w6 - add w20,w20,w4 - add w23,w23,w7 - add w24,w24,w8 - stp w20,w21,[x0] - stp w22,w23,[x0,#8] - str w24,[x0,#16] - cbnz x2,.Loop - - ldp x19,x20,[sp,#16] - ldp x21,x22,[sp,#32] - ldp x23,x24,[sp,#48] - ldp x25,x26,[sp,#64] - ldp x27,x28,[sp,#80] - ldr x29,[sp],#96 - ret -.size sha1_block_data_order,.-sha1_block_data_order -.type sha1_block_armv8,%function -.align 6 -sha1_block_armv8: -.Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - adrp x4,.Lconst - add x4,x4,:lo12:.Lconst - eor v1.16b,v1.16b,v1.16b - ld1 {v0.4s},[x0],#16 - ld1 {v1.s}[0],[x0] - sub x0,x0,#16 - ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4] - -.Loop_hw: - ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 - sub x2,x2,#1 - rev32 v4.16b,v4.16b - rev32 v5.16b,v5.16b - - add v20.4s,v16.4s,v4.4s - rev32 v6.16b,v6.16b - orr v22.16b,v0.16b,v0.16b // offload - - add v21.4s,v16.4s,v5.4s - rev32 v7.16b,v7.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b -.inst 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 - add v20.4s,v16.4s,v6.4s -.inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 1 -.inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s - add v21.4s,v16.4s,v7.4s -.inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 2 -.inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s - add v20.4s,v16.4s,v4.4s -.inst 0x5e281885 //sha1su1 v5.16b,v4.16b -.inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 3 -.inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v5.4s -.inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 4 -.inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s - add v20.4s,v17.4s,v6.4s -.inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 5 -.inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v7.4s -.inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 6 -.inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v17.4s,v4.4s -.inst 0x5e281885 //sha1su1 v5.16b,v4.16b -.inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 7 -.inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v5.4s -.inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 8 -.inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v6.4s -.inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 9 -.inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v18.4s,v7.4s -.inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 10 -.inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v4.4s -.inst 0x5e281885 //sha1su1 v5.16b,v4.16b -.inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 11 -.inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s - add v21.4s,v18.4s,v5.4s -.inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 12 -.inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v6.4s -.inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 13 -.inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v7.4s -.inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b -.inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 14 -.inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v19.4s,v4.4s -.inst 0x5e281885 //sha1su1 v5.16b,v4.16b -.inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 15 -.inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v5.4s -.inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b -.inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 16 -.inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v19.4s,v6.4s -.inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 17 -.inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v7.4s - -.inst 0x5e280803 //sha1h v3.16b,v0.16b // 18 -.inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - -.inst 0x5e280802 //sha1h v2.16b,v0.16b // 19 -.inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - - add v1.4s,v1.4s,v2.4s - add v0.4s,v0.4s,v22.4s - - cbnz x2,.Loop_hw - - st1 {v0.4s},[x0],#16 - st1 {v1.s}[0],[x0] - - ldr x29,[sp],#16 - ret -.size sha1_block_armv8,.-sha1_block_armv8 -.section .rodata -.align 6 -.Lconst: -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha256-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha256-armv8.S deleted file mode 100644 index 6e09f69a94..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha256-armv8.S +++ /dev/null @@ -1,1213 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. -// -// Licensed under the OpenSSL license (the "License"). You may not use -// this file except in compliance with the License. You can obtain a copy -// in the file LICENSE in the source distribution or at -// https://www.openssl.org/source/license.html - -// ==================================================================== -// Written by Andy Polyakov for the OpenSSL -// project. The module is, however, dual licensed under OpenSSL and -// CRYPTOGAMS licenses depending on where you obtain it. For further -// details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. -// ==================================================================== -// -// SHA256/512 for ARMv8. -// -// Performance in cycles per processed byte and improvement coefficient -// over code generated with "default" compiler: -// -// SHA256-hw SHA256(*) SHA512 -// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) -// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) -// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) -// Denver 2.01 10.5 (+26%) 6.70 (+8%) -// X-Gene 20.0 (+100%) 12.8 (+300%(***)) -// Mongoose 2.36 13.0 (+50%) 8.36 (+33%) -// -// (*) Software SHA256 results are of lesser relevance, presented -// mostly for informational purposes. -// (**) The result is a trade-off: it's possible to improve it by -// 10% (or by 1 cycle per round), but at the cost of 20% loss -// on Cortex-A53 (or by 4 cycles per round). -// (***) Super-impressive coefficients over gcc-generated code are -// indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster -// and the gap is only 40-90%. - -#ifndef __KERNEL__ -# include -#endif - -.text - - -.globl sha256_block_data_order -.hidden sha256_block_data_order -.type sha256_block_data_order,%function -.align 6 -sha256_block_data_order: -#ifndef __KERNEL__ -#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 - adrp x16,:pg_hi21_nc:OPENSSL_armcap_P -#else - adrp x16,OPENSSL_armcap_P -#endif - ldr w16,[x16,:lo12:OPENSSL_armcap_P] - tst w16,#ARMV8_SHA256 - b.ne .Lv8_entry -#endif - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*4 - - ldp w20,w21,[x0] // load context - ldp w22,w23,[x0,#2*4] - ldp w24,w25,[x0,#4*4] - add x2,x1,x2,lsl#6 // end of input - ldp w26,w27,[x0,#6*4] - adrp x30,.LK256 - add x30,x30,:lo12:.LK256 - stp x0,x2,[x29,#96] - -.Loop: - ldp w3,w4,[x1],#2*4 - ldr w19,[x30],#4 // *K++ - eor w28,w21,w22 // magic seed - str x1,[x29,#112] -#ifndef __ARMEB__ - rev w3,w3 // 0 -#endif - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - eor w6,w24,w24,ror#14 - and w17,w25,w24 - bic w19,w26,w24 - add w27,w27,w3 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w6,ror#11 // Sigma1(e) - ror w6,w20,#2 - add w27,w27,w17 // h+=Ch(e,f,g) - eor w17,w20,w20,ror#9 - add w27,w27,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w23,w23,w27 // d+=h - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w6,w17,ror#13 // Sigma0(a) - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w4,w4 // 1 -#endif - ldp w5,w6,[x1],#2*4 - add w27,w27,w17 // h+=Sigma0(a) - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - eor w7,w23,w23,ror#14 - and w17,w24,w23 - bic w28,w25,w23 - add w26,w26,w4 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w7,ror#11 // Sigma1(e) - ror w7,w27,#2 - add w26,w26,w17 // h+=Ch(e,f,g) - eor w17,w27,w27,ror#9 - add w26,w26,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w22,w22,w26 // d+=h - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w7,w17,ror#13 // Sigma0(a) - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w5,w5 // 2 -#endif - add w26,w26,w17 // h+=Sigma0(a) - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - eor w8,w22,w22,ror#14 - and w17,w23,w22 - bic w19,w24,w22 - add w25,w25,w5 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w8,ror#11 // Sigma1(e) - ror w8,w26,#2 - add w25,w25,w17 // h+=Ch(e,f,g) - eor w17,w26,w26,ror#9 - add w25,w25,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w21,w21,w25 // d+=h - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w8,w17,ror#13 // Sigma0(a) - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w6,w6 // 3 -#endif - ldp w7,w8,[x1],#2*4 - add w25,w25,w17 // h+=Sigma0(a) - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - eor w9,w21,w21,ror#14 - and w17,w22,w21 - bic w28,w23,w21 - add w24,w24,w6 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w9,ror#11 // Sigma1(e) - ror w9,w25,#2 - add w24,w24,w17 // h+=Ch(e,f,g) - eor w17,w25,w25,ror#9 - add w24,w24,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w20,w20,w24 // d+=h - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w9,w17,ror#13 // Sigma0(a) - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w7,w7 // 4 -#endif - add w24,w24,w17 // h+=Sigma0(a) - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - eor w10,w20,w20,ror#14 - and w17,w21,w20 - bic w19,w22,w20 - add w23,w23,w7 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w10,ror#11 // Sigma1(e) - ror w10,w24,#2 - add w23,w23,w17 // h+=Ch(e,f,g) - eor w17,w24,w24,ror#9 - add w23,w23,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w27,w27,w23 // d+=h - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w10,w17,ror#13 // Sigma0(a) - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w8,w8 // 5 -#endif - ldp w9,w10,[x1],#2*4 - add w23,w23,w17 // h+=Sigma0(a) - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - eor w11,w27,w27,ror#14 - and w17,w20,w27 - bic w28,w21,w27 - add w22,w22,w8 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w11,ror#11 // Sigma1(e) - ror w11,w23,#2 - add w22,w22,w17 // h+=Ch(e,f,g) - eor w17,w23,w23,ror#9 - add w22,w22,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w26,w26,w22 // d+=h - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w11,w17,ror#13 // Sigma0(a) - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w9,w9 // 6 -#endif - add w22,w22,w17 // h+=Sigma0(a) - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - eor w12,w26,w26,ror#14 - and w17,w27,w26 - bic w19,w20,w26 - add w21,w21,w9 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w12,ror#11 // Sigma1(e) - ror w12,w22,#2 - add w21,w21,w17 // h+=Ch(e,f,g) - eor w17,w22,w22,ror#9 - add w21,w21,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w25,w25,w21 // d+=h - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w12,w17,ror#13 // Sigma0(a) - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w10,w10 // 7 -#endif - ldp w11,w12,[x1],#2*4 - add w21,w21,w17 // h+=Sigma0(a) - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - eor w13,w25,w25,ror#14 - and w17,w26,w25 - bic w28,w27,w25 - add w20,w20,w10 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w13,ror#11 // Sigma1(e) - ror w13,w21,#2 - add w20,w20,w17 // h+=Ch(e,f,g) - eor w17,w21,w21,ror#9 - add w20,w20,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w24,w24,w20 // d+=h - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w13,w17,ror#13 // Sigma0(a) - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w20,w20,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w11,w11 // 8 -#endif - add w20,w20,w17 // h+=Sigma0(a) - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - eor w14,w24,w24,ror#14 - and w17,w25,w24 - bic w19,w26,w24 - add w27,w27,w11 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w14,ror#11 // Sigma1(e) - ror w14,w20,#2 - add w27,w27,w17 // h+=Ch(e,f,g) - eor w17,w20,w20,ror#9 - add w27,w27,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w23,w23,w27 // d+=h - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w14,w17,ror#13 // Sigma0(a) - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w12,w12 // 9 -#endif - ldp w13,w14,[x1],#2*4 - add w27,w27,w17 // h+=Sigma0(a) - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - eor w15,w23,w23,ror#14 - and w17,w24,w23 - bic w28,w25,w23 - add w26,w26,w12 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w15,ror#11 // Sigma1(e) - ror w15,w27,#2 - add w26,w26,w17 // h+=Ch(e,f,g) - eor w17,w27,w27,ror#9 - add w26,w26,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w22,w22,w26 // d+=h - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w15,w17,ror#13 // Sigma0(a) - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w13,w13 // 10 -#endif - add w26,w26,w17 // h+=Sigma0(a) - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - eor w0,w22,w22,ror#14 - and w17,w23,w22 - bic w19,w24,w22 - add w25,w25,w13 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w0,ror#11 // Sigma1(e) - ror w0,w26,#2 - add w25,w25,w17 // h+=Ch(e,f,g) - eor w17,w26,w26,ror#9 - add w25,w25,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w21,w21,w25 // d+=h - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w0,w17,ror#13 // Sigma0(a) - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w14,w14 // 11 -#endif - ldp w15,w0,[x1],#2*4 - add w25,w25,w17 // h+=Sigma0(a) - str w6,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - eor w6,w21,w21,ror#14 - and w17,w22,w21 - bic w28,w23,w21 - add w24,w24,w14 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w6,ror#11 // Sigma1(e) - ror w6,w25,#2 - add w24,w24,w17 // h+=Ch(e,f,g) - eor w17,w25,w25,ror#9 - add w24,w24,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w20,w20,w24 // d+=h - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w6,w17,ror#13 // Sigma0(a) - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w15,w15 // 12 -#endif - add w24,w24,w17 // h+=Sigma0(a) - str w7,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - eor w7,w20,w20,ror#14 - and w17,w21,w20 - bic w19,w22,w20 - add w23,w23,w15 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w7,ror#11 // Sigma1(e) - ror w7,w24,#2 - add w23,w23,w17 // h+=Ch(e,f,g) - eor w17,w24,w24,ror#9 - add w23,w23,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w27,w27,w23 // d+=h - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w7,w17,ror#13 // Sigma0(a) - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w0,w0 // 13 -#endif - ldp w1,w2,[x1] - add w23,w23,w17 // h+=Sigma0(a) - str w8,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - eor w8,w27,w27,ror#14 - and w17,w20,w27 - bic w28,w21,w27 - add w22,w22,w0 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w8,ror#11 // Sigma1(e) - ror w8,w23,#2 - add w22,w22,w17 // h+=Ch(e,f,g) - eor w17,w23,w23,ror#9 - add w22,w22,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w26,w26,w22 // d+=h - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w8,w17,ror#13 // Sigma0(a) - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w1,w1 // 14 -#endif - ldr w6,[sp,#12] - add w22,w22,w17 // h+=Sigma0(a) - str w9,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - eor w9,w26,w26,ror#14 - and w17,w27,w26 - bic w19,w20,w26 - add w21,w21,w1 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w9,ror#11 // Sigma1(e) - ror w9,w22,#2 - add w21,w21,w17 // h+=Ch(e,f,g) - eor w17,w22,w22,ror#9 - add w21,w21,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w25,w25,w21 // d+=h - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w9,w17,ror#13 // Sigma0(a) - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w2,w2 // 15 -#endif - ldr w7,[sp,#0] - add w21,w21,w17 // h+=Sigma0(a) - str w10,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w9,w4,#7 - and w17,w26,w25 - ror w8,w1,#17 - bic w28,w27,w25 - ror w10,w21,#2 - add w20,w20,w2 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w9,w9,w4,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w10,w10,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w8,w8,w1,ror#19 - eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w10,w21,ror#22 // Sigma0(a) - eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) - add w3,w3,w12 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w3,w3,w9 - add w20,w20,w17 // h+=Sigma0(a) - add w3,w3,w8 -.Loop_16_xx: - ldr w8,[sp,#4] - str w11,[sp,#0] - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - ror w10,w5,#7 - and w17,w25,w24 - ror w9,w2,#17 - bic w19,w26,w24 - ror w11,w20,#2 - add w27,w27,w3 // h+=X[i] - eor w16,w16,w24,ror#11 - eor w10,w10,w5,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w24,ror#25 // Sigma1(e) - eor w11,w11,w20,ror#13 - add w27,w27,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w9,w9,w2,ror#19 - eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) - add w27,w27,w16 // h+=Sigma1(e) - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w11,w20,ror#22 // Sigma0(a) - eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) - add w4,w4,w13 - add w23,w23,w27 // d+=h - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w4,w4,w10 - add w27,w27,w17 // h+=Sigma0(a) - add w4,w4,w9 - ldr w9,[sp,#8] - str w12,[sp,#4] - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - ror w11,w6,#7 - and w17,w24,w23 - ror w10,w3,#17 - bic w28,w25,w23 - ror w12,w27,#2 - add w26,w26,w4 // h+=X[i] - eor w16,w16,w23,ror#11 - eor w11,w11,w6,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w23,ror#25 // Sigma1(e) - eor w12,w12,w27,ror#13 - add w26,w26,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w10,w10,w3,ror#19 - eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) - add w26,w26,w16 // h+=Sigma1(e) - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w12,w27,ror#22 // Sigma0(a) - eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) - add w5,w5,w14 - add w22,w22,w26 // d+=h - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w5,w5,w11 - add w26,w26,w17 // h+=Sigma0(a) - add w5,w5,w10 - ldr w10,[sp,#12] - str w13,[sp,#8] - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - ror w12,w7,#7 - and w17,w23,w22 - ror w11,w4,#17 - bic w19,w24,w22 - ror w13,w26,#2 - add w25,w25,w5 // h+=X[i] - eor w16,w16,w22,ror#11 - eor w12,w12,w7,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w22,ror#25 // Sigma1(e) - eor w13,w13,w26,ror#13 - add w25,w25,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w11,w11,w4,ror#19 - eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) - add w25,w25,w16 // h+=Sigma1(e) - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w13,w26,ror#22 // Sigma0(a) - eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) - add w6,w6,w15 - add w21,w21,w25 // d+=h - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w6,w6,w12 - add w25,w25,w17 // h+=Sigma0(a) - add w6,w6,w11 - ldr w11,[sp,#0] - str w14,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - ror w13,w8,#7 - and w17,w22,w21 - ror w12,w5,#17 - bic w28,w23,w21 - ror w14,w25,#2 - add w24,w24,w6 // h+=X[i] - eor w16,w16,w21,ror#11 - eor w13,w13,w8,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w21,ror#25 // Sigma1(e) - eor w14,w14,w25,ror#13 - add w24,w24,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w12,w12,w5,ror#19 - eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) - add w24,w24,w16 // h+=Sigma1(e) - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w14,w25,ror#22 // Sigma0(a) - eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) - add w7,w7,w0 - add w20,w20,w24 // d+=h - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w7,w7,w13 - add w24,w24,w17 // h+=Sigma0(a) - add w7,w7,w12 - ldr w12,[sp,#4] - str w15,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - ror w14,w9,#7 - and w17,w21,w20 - ror w13,w6,#17 - bic w19,w22,w20 - ror w15,w24,#2 - add w23,w23,w7 // h+=X[i] - eor w16,w16,w20,ror#11 - eor w14,w14,w9,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w20,ror#25 // Sigma1(e) - eor w15,w15,w24,ror#13 - add w23,w23,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w13,w13,w6,ror#19 - eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) - add w23,w23,w16 // h+=Sigma1(e) - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w15,w24,ror#22 // Sigma0(a) - eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) - add w8,w8,w1 - add w27,w27,w23 // d+=h - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w8,w8,w14 - add w23,w23,w17 // h+=Sigma0(a) - add w8,w8,w13 - ldr w13,[sp,#8] - str w0,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - ror w15,w10,#7 - and w17,w20,w27 - ror w14,w7,#17 - bic w28,w21,w27 - ror w0,w23,#2 - add w22,w22,w8 // h+=X[i] - eor w16,w16,w27,ror#11 - eor w15,w15,w10,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w27,ror#25 // Sigma1(e) - eor w0,w0,w23,ror#13 - add w22,w22,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w14,w14,w7,ror#19 - eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) - add w22,w22,w16 // h+=Sigma1(e) - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w0,w23,ror#22 // Sigma0(a) - eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) - add w9,w9,w2 - add w26,w26,w22 // d+=h - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w9,w9,w15 - add w22,w22,w17 // h+=Sigma0(a) - add w9,w9,w14 - ldr w14,[sp,#12] - str w1,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - ror w0,w11,#7 - and w17,w27,w26 - ror w15,w8,#17 - bic w19,w20,w26 - ror w1,w22,#2 - add w21,w21,w9 // h+=X[i] - eor w16,w16,w26,ror#11 - eor w0,w0,w11,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w26,ror#25 // Sigma1(e) - eor w1,w1,w22,ror#13 - add w21,w21,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w15,w15,w8,ror#19 - eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) - add w21,w21,w16 // h+=Sigma1(e) - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w1,w22,ror#22 // Sigma0(a) - eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) - add w10,w10,w3 - add w25,w25,w21 // d+=h - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w10,w10,w0 - add w21,w21,w17 // h+=Sigma0(a) - add w10,w10,w15 - ldr w15,[sp,#0] - str w2,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w1,w12,#7 - and w17,w26,w25 - ror w0,w9,#17 - bic w28,w27,w25 - ror w2,w21,#2 - add w20,w20,w10 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w1,w1,w12,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w2,w2,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w0,w0,w9,ror#19 - eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w2,w21,ror#22 // Sigma0(a) - eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) - add w11,w11,w4 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w11,w11,w1 - add w20,w20,w17 // h+=Sigma0(a) - add w11,w11,w0 - ldr w0,[sp,#4] - str w3,[sp,#0] - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - ror w2,w13,#7 - and w17,w25,w24 - ror w1,w10,#17 - bic w19,w26,w24 - ror w3,w20,#2 - add w27,w27,w11 // h+=X[i] - eor w16,w16,w24,ror#11 - eor w2,w2,w13,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w24,ror#25 // Sigma1(e) - eor w3,w3,w20,ror#13 - add w27,w27,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w1,w1,w10,ror#19 - eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) - add w27,w27,w16 // h+=Sigma1(e) - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w3,w20,ror#22 // Sigma0(a) - eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) - add w12,w12,w5 - add w23,w23,w27 // d+=h - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w12,w12,w2 - add w27,w27,w17 // h+=Sigma0(a) - add w12,w12,w1 - ldr w1,[sp,#8] - str w4,[sp,#4] - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - ror w3,w14,#7 - and w17,w24,w23 - ror w2,w11,#17 - bic w28,w25,w23 - ror w4,w27,#2 - add w26,w26,w12 // h+=X[i] - eor w16,w16,w23,ror#11 - eor w3,w3,w14,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w23,ror#25 // Sigma1(e) - eor w4,w4,w27,ror#13 - add w26,w26,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w2,w2,w11,ror#19 - eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) - add w26,w26,w16 // h+=Sigma1(e) - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w4,w27,ror#22 // Sigma0(a) - eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) - add w13,w13,w6 - add w22,w22,w26 // d+=h - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w13,w13,w3 - add w26,w26,w17 // h+=Sigma0(a) - add w13,w13,w2 - ldr w2,[sp,#12] - str w5,[sp,#8] - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - ror w4,w15,#7 - and w17,w23,w22 - ror w3,w12,#17 - bic w19,w24,w22 - ror w5,w26,#2 - add w25,w25,w13 // h+=X[i] - eor w16,w16,w22,ror#11 - eor w4,w4,w15,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w22,ror#25 // Sigma1(e) - eor w5,w5,w26,ror#13 - add w25,w25,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w3,w3,w12,ror#19 - eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) - add w25,w25,w16 // h+=Sigma1(e) - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w5,w26,ror#22 // Sigma0(a) - eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) - add w14,w14,w7 - add w21,w21,w25 // d+=h - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w14,w14,w4 - add w25,w25,w17 // h+=Sigma0(a) - add w14,w14,w3 - ldr w3,[sp,#0] - str w6,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - ror w5,w0,#7 - and w17,w22,w21 - ror w4,w13,#17 - bic w28,w23,w21 - ror w6,w25,#2 - add w24,w24,w14 // h+=X[i] - eor w16,w16,w21,ror#11 - eor w5,w5,w0,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w21,ror#25 // Sigma1(e) - eor w6,w6,w25,ror#13 - add w24,w24,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w4,w4,w13,ror#19 - eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) - add w24,w24,w16 // h+=Sigma1(e) - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w6,w25,ror#22 // Sigma0(a) - eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) - add w15,w15,w8 - add w20,w20,w24 // d+=h - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w15,w15,w5 - add w24,w24,w17 // h+=Sigma0(a) - add w15,w15,w4 - ldr w4,[sp,#4] - str w7,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - ror w6,w1,#7 - and w17,w21,w20 - ror w5,w14,#17 - bic w19,w22,w20 - ror w7,w24,#2 - add w23,w23,w15 // h+=X[i] - eor w16,w16,w20,ror#11 - eor w6,w6,w1,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w20,ror#25 // Sigma1(e) - eor w7,w7,w24,ror#13 - add w23,w23,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w5,w5,w14,ror#19 - eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) - add w23,w23,w16 // h+=Sigma1(e) - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w7,w24,ror#22 // Sigma0(a) - eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) - add w0,w0,w9 - add w27,w27,w23 // d+=h - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w0,w0,w6 - add w23,w23,w17 // h+=Sigma0(a) - add w0,w0,w5 - ldr w5,[sp,#8] - str w8,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - ror w7,w2,#7 - and w17,w20,w27 - ror w6,w15,#17 - bic w28,w21,w27 - ror w8,w23,#2 - add w22,w22,w0 // h+=X[i] - eor w16,w16,w27,ror#11 - eor w7,w7,w2,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w27,ror#25 // Sigma1(e) - eor w8,w8,w23,ror#13 - add w22,w22,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w6,w6,w15,ror#19 - eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) - add w22,w22,w16 // h+=Sigma1(e) - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w8,w23,ror#22 // Sigma0(a) - eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) - add w1,w1,w10 - add w26,w26,w22 // d+=h - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w1,w1,w7 - add w22,w22,w17 // h+=Sigma0(a) - add w1,w1,w6 - ldr w6,[sp,#12] - str w9,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - ror w8,w3,#7 - and w17,w27,w26 - ror w7,w0,#17 - bic w19,w20,w26 - ror w9,w22,#2 - add w21,w21,w1 // h+=X[i] - eor w16,w16,w26,ror#11 - eor w8,w8,w3,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w26,ror#25 // Sigma1(e) - eor w9,w9,w22,ror#13 - add w21,w21,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w7,w7,w0,ror#19 - eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) - add w21,w21,w16 // h+=Sigma1(e) - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w9,w22,ror#22 // Sigma0(a) - eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) - add w2,w2,w11 - add w25,w25,w21 // d+=h - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w2,w2,w8 - add w21,w21,w17 // h+=Sigma0(a) - add w2,w2,w7 - ldr w7,[sp,#0] - str w10,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w9,w4,#7 - and w17,w26,w25 - ror w8,w1,#17 - bic w28,w27,w25 - ror w10,w21,#2 - add w20,w20,w2 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w9,w9,w4,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w10,w10,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w8,w8,w1,ror#19 - eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w10,w21,ror#22 // Sigma0(a) - eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) - add w3,w3,w12 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w3,w3,w9 - add w20,w20,w17 // h+=Sigma0(a) - add w3,w3,w8 - cbnz w19,.Loop_16_xx - - ldp x0,x2,[x29,#96] - ldr x1,[x29,#112] - sub x30,x30,#260 // rewind - - ldp w3,w4,[x0] - ldp w5,w6,[x0,#2*4] - add x1,x1,#14*4 // advance input pointer - ldp w7,w8,[x0,#4*4] - add w20,w20,w3 - ldp w9,w10,[x0,#6*4] - add w21,w21,w4 - add w22,w22,w5 - add w23,w23,w6 - stp w20,w21,[x0] - add w24,w24,w7 - add w25,w25,w8 - stp w22,w23,[x0,#2*4] - add w26,w26,w9 - add w27,w27,w10 - cmp x1,x2 - stp w24,w25,[x0,#4*4] - stp w26,w27,[x0,#6*4] - b.ne .Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*4 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret -.size sha256_block_data_order,.-sha256_block_data_order - -.section .rodata -.align 6 -.type .LK256,%object -.LK256: -.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 -.long 0 //terminator -.size .LK256,.-.LK256 -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -.text -#ifndef __KERNEL__ -.type sha256_block_armv8,%function -.align 6 -sha256_block_armv8: -.Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v0.4s,v1.4s},[x0] - adrp x3,.LK256 - add x3,x3,:lo12:.LK256 - -.Loop_hw: - ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 - sub x2,x2,#1 - ld1 {v16.4s},[x3],#16 - rev32 v4.16b,v4.16b - rev32 v5.16b,v5.16b - rev32 v6.16b,v6.16b - rev32 v7.16b,v7.16b - orr v18.16b,v0.16b,v0.16b // offload - orr v19.16b,v1.16b,v1.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s -.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s -.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s -.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s -.inst 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s -.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s -.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s -.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s -.inst 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s -.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s -.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s -.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s -.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s -.inst 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s -.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - - ld1 {v17.4s},[x3] - add v16.4s,v16.4s,v6.4s - sub x3,x3,#64*4-16 // rewind - orr v2.16b,v0.16b,v0.16b -.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s -.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - - add v17.4s,v17.4s,v7.4s - orr v2.16b,v0.16b,v0.16b -.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s -.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - - add v0.4s,v0.4s,v18.4s - add v1.4s,v1.4s,v19.4s - - cbnz x2,.Loop_hw - - st1 {v0.4s,v1.4s},[x0] - - ldr x29,[sp],#16 - ret -.size sha256_block_armv8,.-sha256_block_armv8 -#endif -#ifndef __KERNEL__ -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha512-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha512-armv8.S deleted file mode 100644 index 7b9b22a02a..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/sha512-armv8.S +++ /dev/null @@ -1,1085 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. -// -// Licensed under the OpenSSL license (the "License"). You may not use -// this file except in compliance with the License. You can obtain a copy -// in the file LICENSE in the source distribution or at -// https://www.openssl.org/source/license.html - -// ==================================================================== -// Written by Andy Polyakov for the OpenSSL -// project. The module is, however, dual licensed under OpenSSL and -// CRYPTOGAMS licenses depending on where you obtain it. For further -// details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. -// ==================================================================== -// -// SHA256/512 for ARMv8. -// -// Performance in cycles per processed byte and improvement coefficient -// over code generated with "default" compiler: -// -// SHA256-hw SHA256(*) SHA512 -// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) -// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) -// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) -// Denver 2.01 10.5 (+26%) 6.70 (+8%) -// X-Gene 20.0 (+100%) 12.8 (+300%(***)) -// Mongoose 2.36 13.0 (+50%) 8.36 (+33%) -// -// (*) Software SHA256 results are of lesser relevance, presented -// mostly for informational purposes. -// (**) The result is a trade-off: it's possible to improve it by -// 10% (or by 1 cycle per round), but at the cost of 20% loss -// on Cortex-A53 (or by 4 cycles per round). -// (***) Super-impressive coefficients over gcc-generated code are -// indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster -// and the gap is only 40-90%. - -#ifndef __KERNEL__ -# include -#endif - -.text - - -.globl sha512_block_data_order -.hidden sha512_block_data_order -.type sha512_block_data_order,%function -.align 6 -sha512_block_data_order: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*8 - - ldp x20,x21,[x0] // load context - ldp x22,x23,[x0,#2*8] - ldp x24,x25,[x0,#4*8] - add x2,x1,x2,lsl#7 // end of input - ldp x26,x27,[x0,#6*8] - adrp x30,.LK512 - add x30,x30,:lo12:.LK512 - stp x0,x2,[x29,#96] - -.Loop: - ldp x3,x4,[x1],#2*8 - ldr x19,[x30],#8 // *K++ - eor x28,x21,x22 // magic seed - str x1,[x29,#112] -#ifndef __ARMEB__ - rev x3,x3 // 0 -#endif - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - eor x6,x24,x24,ror#23 - and x17,x25,x24 - bic x19,x26,x24 - add x27,x27,x3 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x6,ror#18 // Sigma1(e) - ror x6,x20,#28 - add x27,x27,x17 // h+=Ch(e,f,g) - eor x17,x20,x20,ror#5 - add x27,x27,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x23,x23,x27 // d+=h - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x6,x17,ror#34 // Sigma0(a) - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x4,x4 // 1 -#endif - ldp x5,x6,[x1],#2*8 - add x27,x27,x17 // h+=Sigma0(a) - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - eor x7,x23,x23,ror#23 - and x17,x24,x23 - bic x28,x25,x23 - add x26,x26,x4 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x7,ror#18 // Sigma1(e) - ror x7,x27,#28 - add x26,x26,x17 // h+=Ch(e,f,g) - eor x17,x27,x27,ror#5 - add x26,x26,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x22,x22,x26 // d+=h - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x7,x17,ror#34 // Sigma0(a) - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x5,x5 // 2 -#endif - add x26,x26,x17 // h+=Sigma0(a) - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - eor x8,x22,x22,ror#23 - and x17,x23,x22 - bic x19,x24,x22 - add x25,x25,x5 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x8,ror#18 // Sigma1(e) - ror x8,x26,#28 - add x25,x25,x17 // h+=Ch(e,f,g) - eor x17,x26,x26,ror#5 - add x25,x25,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x21,x21,x25 // d+=h - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x8,x17,ror#34 // Sigma0(a) - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x6,x6 // 3 -#endif - ldp x7,x8,[x1],#2*8 - add x25,x25,x17 // h+=Sigma0(a) - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - eor x9,x21,x21,ror#23 - and x17,x22,x21 - bic x28,x23,x21 - add x24,x24,x6 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x9,ror#18 // Sigma1(e) - ror x9,x25,#28 - add x24,x24,x17 // h+=Ch(e,f,g) - eor x17,x25,x25,ror#5 - add x24,x24,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x20,x20,x24 // d+=h - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x9,x17,ror#34 // Sigma0(a) - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x7,x7 // 4 -#endif - add x24,x24,x17 // h+=Sigma0(a) - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - eor x10,x20,x20,ror#23 - and x17,x21,x20 - bic x19,x22,x20 - add x23,x23,x7 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x10,ror#18 // Sigma1(e) - ror x10,x24,#28 - add x23,x23,x17 // h+=Ch(e,f,g) - eor x17,x24,x24,ror#5 - add x23,x23,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x27,x27,x23 // d+=h - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x10,x17,ror#34 // Sigma0(a) - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x8,x8 // 5 -#endif - ldp x9,x10,[x1],#2*8 - add x23,x23,x17 // h+=Sigma0(a) - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - eor x11,x27,x27,ror#23 - and x17,x20,x27 - bic x28,x21,x27 - add x22,x22,x8 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x11,ror#18 // Sigma1(e) - ror x11,x23,#28 - add x22,x22,x17 // h+=Ch(e,f,g) - eor x17,x23,x23,ror#5 - add x22,x22,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x26,x26,x22 // d+=h - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x11,x17,ror#34 // Sigma0(a) - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x9,x9 // 6 -#endif - add x22,x22,x17 // h+=Sigma0(a) - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - eor x12,x26,x26,ror#23 - and x17,x27,x26 - bic x19,x20,x26 - add x21,x21,x9 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x12,ror#18 // Sigma1(e) - ror x12,x22,#28 - add x21,x21,x17 // h+=Ch(e,f,g) - eor x17,x22,x22,ror#5 - add x21,x21,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x25,x25,x21 // d+=h - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x12,x17,ror#34 // Sigma0(a) - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x10,x10 // 7 -#endif - ldp x11,x12,[x1],#2*8 - add x21,x21,x17 // h+=Sigma0(a) - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - eor x13,x25,x25,ror#23 - and x17,x26,x25 - bic x28,x27,x25 - add x20,x20,x10 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x13,ror#18 // Sigma1(e) - ror x13,x21,#28 - add x20,x20,x17 // h+=Ch(e,f,g) - eor x17,x21,x21,ror#5 - add x20,x20,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x24,x24,x20 // d+=h - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x13,x17,ror#34 // Sigma0(a) - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x20,x20,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x11,x11 // 8 -#endif - add x20,x20,x17 // h+=Sigma0(a) - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - eor x14,x24,x24,ror#23 - and x17,x25,x24 - bic x19,x26,x24 - add x27,x27,x11 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x14,ror#18 // Sigma1(e) - ror x14,x20,#28 - add x27,x27,x17 // h+=Ch(e,f,g) - eor x17,x20,x20,ror#5 - add x27,x27,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x23,x23,x27 // d+=h - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x14,x17,ror#34 // Sigma0(a) - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x12,x12 // 9 -#endif - ldp x13,x14,[x1],#2*8 - add x27,x27,x17 // h+=Sigma0(a) - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - eor x15,x23,x23,ror#23 - and x17,x24,x23 - bic x28,x25,x23 - add x26,x26,x12 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x15,ror#18 // Sigma1(e) - ror x15,x27,#28 - add x26,x26,x17 // h+=Ch(e,f,g) - eor x17,x27,x27,ror#5 - add x26,x26,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x22,x22,x26 // d+=h - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x15,x17,ror#34 // Sigma0(a) - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x13,x13 // 10 -#endif - add x26,x26,x17 // h+=Sigma0(a) - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - eor x0,x22,x22,ror#23 - and x17,x23,x22 - bic x19,x24,x22 - add x25,x25,x13 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x0,ror#18 // Sigma1(e) - ror x0,x26,#28 - add x25,x25,x17 // h+=Ch(e,f,g) - eor x17,x26,x26,ror#5 - add x25,x25,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x21,x21,x25 // d+=h - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x0,x17,ror#34 // Sigma0(a) - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x14,x14 // 11 -#endif - ldp x15,x0,[x1],#2*8 - add x25,x25,x17 // h+=Sigma0(a) - str x6,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - eor x6,x21,x21,ror#23 - and x17,x22,x21 - bic x28,x23,x21 - add x24,x24,x14 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x6,ror#18 // Sigma1(e) - ror x6,x25,#28 - add x24,x24,x17 // h+=Ch(e,f,g) - eor x17,x25,x25,ror#5 - add x24,x24,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x20,x20,x24 // d+=h - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x6,x17,ror#34 // Sigma0(a) - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x15,x15 // 12 -#endif - add x24,x24,x17 // h+=Sigma0(a) - str x7,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - eor x7,x20,x20,ror#23 - and x17,x21,x20 - bic x19,x22,x20 - add x23,x23,x15 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x7,ror#18 // Sigma1(e) - ror x7,x24,#28 - add x23,x23,x17 // h+=Ch(e,f,g) - eor x17,x24,x24,ror#5 - add x23,x23,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x27,x27,x23 // d+=h - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x7,x17,ror#34 // Sigma0(a) - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x0,x0 // 13 -#endif - ldp x1,x2,[x1] - add x23,x23,x17 // h+=Sigma0(a) - str x8,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - eor x8,x27,x27,ror#23 - and x17,x20,x27 - bic x28,x21,x27 - add x22,x22,x0 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x8,ror#18 // Sigma1(e) - ror x8,x23,#28 - add x22,x22,x17 // h+=Ch(e,f,g) - eor x17,x23,x23,ror#5 - add x22,x22,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x26,x26,x22 // d+=h - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x8,x17,ror#34 // Sigma0(a) - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x1,x1 // 14 -#endif - ldr x6,[sp,#24] - add x22,x22,x17 // h+=Sigma0(a) - str x9,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - eor x9,x26,x26,ror#23 - and x17,x27,x26 - bic x19,x20,x26 - add x21,x21,x1 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x9,ror#18 // Sigma1(e) - ror x9,x22,#28 - add x21,x21,x17 // h+=Ch(e,f,g) - eor x17,x22,x22,ror#5 - add x21,x21,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x25,x25,x21 // d+=h - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x9,x17,ror#34 // Sigma0(a) - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x2,x2 // 15 -#endif - ldr x7,[sp,#0] - add x21,x21,x17 // h+=Sigma0(a) - str x10,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x9,x4,#1 - and x17,x26,x25 - ror x8,x1,#19 - bic x28,x27,x25 - ror x10,x21,#28 - add x20,x20,x2 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x9,x9,x4,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x10,x10,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x8,x8,x1,ror#61 - eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x10,x21,ror#39 // Sigma0(a) - eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) - add x3,x3,x12 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x3,x3,x9 - add x20,x20,x17 // h+=Sigma0(a) - add x3,x3,x8 -.Loop_16_xx: - ldr x8,[sp,#8] - str x11,[sp,#0] - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - ror x10,x5,#1 - and x17,x25,x24 - ror x9,x2,#19 - bic x19,x26,x24 - ror x11,x20,#28 - add x27,x27,x3 // h+=X[i] - eor x16,x16,x24,ror#18 - eor x10,x10,x5,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x24,ror#41 // Sigma1(e) - eor x11,x11,x20,ror#34 - add x27,x27,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x9,x9,x2,ror#61 - eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) - add x27,x27,x16 // h+=Sigma1(e) - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x11,x20,ror#39 // Sigma0(a) - eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) - add x4,x4,x13 - add x23,x23,x27 // d+=h - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x4,x4,x10 - add x27,x27,x17 // h+=Sigma0(a) - add x4,x4,x9 - ldr x9,[sp,#16] - str x12,[sp,#8] - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - ror x11,x6,#1 - and x17,x24,x23 - ror x10,x3,#19 - bic x28,x25,x23 - ror x12,x27,#28 - add x26,x26,x4 // h+=X[i] - eor x16,x16,x23,ror#18 - eor x11,x11,x6,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x23,ror#41 // Sigma1(e) - eor x12,x12,x27,ror#34 - add x26,x26,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x10,x10,x3,ror#61 - eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) - add x26,x26,x16 // h+=Sigma1(e) - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x12,x27,ror#39 // Sigma0(a) - eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) - add x5,x5,x14 - add x22,x22,x26 // d+=h - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x5,x5,x11 - add x26,x26,x17 // h+=Sigma0(a) - add x5,x5,x10 - ldr x10,[sp,#24] - str x13,[sp,#16] - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - ror x12,x7,#1 - and x17,x23,x22 - ror x11,x4,#19 - bic x19,x24,x22 - ror x13,x26,#28 - add x25,x25,x5 // h+=X[i] - eor x16,x16,x22,ror#18 - eor x12,x12,x7,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x22,ror#41 // Sigma1(e) - eor x13,x13,x26,ror#34 - add x25,x25,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x11,x11,x4,ror#61 - eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) - add x25,x25,x16 // h+=Sigma1(e) - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x13,x26,ror#39 // Sigma0(a) - eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) - add x6,x6,x15 - add x21,x21,x25 // d+=h - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x6,x6,x12 - add x25,x25,x17 // h+=Sigma0(a) - add x6,x6,x11 - ldr x11,[sp,#0] - str x14,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - ror x13,x8,#1 - and x17,x22,x21 - ror x12,x5,#19 - bic x28,x23,x21 - ror x14,x25,#28 - add x24,x24,x6 // h+=X[i] - eor x16,x16,x21,ror#18 - eor x13,x13,x8,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x21,ror#41 // Sigma1(e) - eor x14,x14,x25,ror#34 - add x24,x24,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x12,x12,x5,ror#61 - eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) - add x24,x24,x16 // h+=Sigma1(e) - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x14,x25,ror#39 // Sigma0(a) - eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) - add x7,x7,x0 - add x20,x20,x24 // d+=h - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x7,x7,x13 - add x24,x24,x17 // h+=Sigma0(a) - add x7,x7,x12 - ldr x12,[sp,#8] - str x15,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - ror x14,x9,#1 - and x17,x21,x20 - ror x13,x6,#19 - bic x19,x22,x20 - ror x15,x24,#28 - add x23,x23,x7 // h+=X[i] - eor x16,x16,x20,ror#18 - eor x14,x14,x9,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x20,ror#41 // Sigma1(e) - eor x15,x15,x24,ror#34 - add x23,x23,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x13,x13,x6,ror#61 - eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) - add x23,x23,x16 // h+=Sigma1(e) - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x15,x24,ror#39 // Sigma0(a) - eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) - add x8,x8,x1 - add x27,x27,x23 // d+=h - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x8,x8,x14 - add x23,x23,x17 // h+=Sigma0(a) - add x8,x8,x13 - ldr x13,[sp,#16] - str x0,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - ror x15,x10,#1 - and x17,x20,x27 - ror x14,x7,#19 - bic x28,x21,x27 - ror x0,x23,#28 - add x22,x22,x8 // h+=X[i] - eor x16,x16,x27,ror#18 - eor x15,x15,x10,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x27,ror#41 // Sigma1(e) - eor x0,x0,x23,ror#34 - add x22,x22,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x14,x14,x7,ror#61 - eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) - add x22,x22,x16 // h+=Sigma1(e) - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x0,x23,ror#39 // Sigma0(a) - eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) - add x9,x9,x2 - add x26,x26,x22 // d+=h - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x9,x9,x15 - add x22,x22,x17 // h+=Sigma0(a) - add x9,x9,x14 - ldr x14,[sp,#24] - str x1,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - ror x0,x11,#1 - and x17,x27,x26 - ror x15,x8,#19 - bic x19,x20,x26 - ror x1,x22,#28 - add x21,x21,x9 // h+=X[i] - eor x16,x16,x26,ror#18 - eor x0,x0,x11,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x26,ror#41 // Sigma1(e) - eor x1,x1,x22,ror#34 - add x21,x21,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x15,x15,x8,ror#61 - eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) - add x21,x21,x16 // h+=Sigma1(e) - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x1,x22,ror#39 // Sigma0(a) - eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) - add x10,x10,x3 - add x25,x25,x21 // d+=h - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x10,x10,x0 - add x21,x21,x17 // h+=Sigma0(a) - add x10,x10,x15 - ldr x15,[sp,#0] - str x2,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x1,x12,#1 - and x17,x26,x25 - ror x0,x9,#19 - bic x28,x27,x25 - ror x2,x21,#28 - add x20,x20,x10 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x1,x1,x12,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x2,x2,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x0,x0,x9,ror#61 - eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x2,x21,ror#39 // Sigma0(a) - eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) - add x11,x11,x4 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x11,x11,x1 - add x20,x20,x17 // h+=Sigma0(a) - add x11,x11,x0 - ldr x0,[sp,#8] - str x3,[sp,#0] - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - ror x2,x13,#1 - and x17,x25,x24 - ror x1,x10,#19 - bic x19,x26,x24 - ror x3,x20,#28 - add x27,x27,x11 // h+=X[i] - eor x16,x16,x24,ror#18 - eor x2,x2,x13,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x24,ror#41 // Sigma1(e) - eor x3,x3,x20,ror#34 - add x27,x27,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x1,x1,x10,ror#61 - eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) - add x27,x27,x16 // h+=Sigma1(e) - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x3,x20,ror#39 // Sigma0(a) - eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) - add x12,x12,x5 - add x23,x23,x27 // d+=h - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x12,x12,x2 - add x27,x27,x17 // h+=Sigma0(a) - add x12,x12,x1 - ldr x1,[sp,#16] - str x4,[sp,#8] - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - ror x3,x14,#1 - and x17,x24,x23 - ror x2,x11,#19 - bic x28,x25,x23 - ror x4,x27,#28 - add x26,x26,x12 // h+=X[i] - eor x16,x16,x23,ror#18 - eor x3,x3,x14,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x23,ror#41 // Sigma1(e) - eor x4,x4,x27,ror#34 - add x26,x26,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x2,x2,x11,ror#61 - eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) - add x26,x26,x16 // h+=Sigma1(e) - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x4,x27,ror#39 // Sigma0(a) - eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) - add x13,x13,x6 - add x22,x22,x26 // d+=h - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x13,x13,x3 - add x26,x26,x17 // h+=Sigma0(a) - add x13,x13,x2 - ldr x2,[sp,#24] - str x5,[sp,#16] - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - ror x4,x15,#1 - and x17,x23,x22 - ror x3,x12,#19 - bic x19,x24,x22 - ror x5,x26,#28 - add x25,x25,x13 // h+=X[i] - eor x16,x16,x22,ror#18 - eor x4,x4,x15,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x22,ror#41 // Sigma1(e) - eor x5,x5,x26,ror#34 - add x25,x25,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x3,x3,x12,ror#61 - eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) - add x25,x25,x16 // h+=Sigma1(e) - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x5,x26,ror#39 // Sigma0(a) - eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) - add x14,x14,x7 - add x21,x21,x25 // d+=h - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x14,x14,x4 - add x25,x25,x17 // h+=Sigma0(a) - add x14,x14,x3 - ldr x3,[sp,#0] - str x6,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - ror x5,x0,#1 - and x17,x22,x21 - ror x4,x13,#19 - bic x28,x23,x21 - ror x6,x25,#28 - add x24,x24,x14 // h+=X[i] - eor x16,x16,x21,ror#18 - eor x5,x5,x0,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x21,ror#41 // Sigma1(e) - eor x6,x6,x25,ror#34 - add x24,x24,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x4,x4,x13,ror#61 - eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) - add x24,x24,x16 // h+=Sigma1(e) - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x6,x25,ror#39 // Sigma0(a) - eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) - add x15,x15,x8 - add x20,x20,x24 // d+=h - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x15,x15,x5 - add x24,x24,x17 // h+=Sigma0(a) - add x15,x15,x4 - ldr x4,[sp,#8] - str x7,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - ror x6,x1,#1 - and x17,x21,x20 - ror x5,x14,#19 - bic x19,x22,x20 - ror x7,x24,#28 - add x23,x23,x15 // h+=X[i] - eor x16,x16,x20,ror#18 - eor x6,x6,x1,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x20,ror#41 // Sigma1(e) - eor x7,x7,x24,ror#34 - add x23,x23,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x5,x5,x14,ror#61 - eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) - add x23,x23,x16 // h+=Sigma1(e) - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x7,x24,ror#39 // Sigma0(a) - eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) - add x0,x0,x9 - add x27,x27,x23 // d+=h - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x0,x0,x6 - add x23,x23,x17 // h+=Sigma0(a) - add x0,x0,x5 - ldr x5,[sp,#16] - str x8,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - ror x7,x2,#1 - and x17,x20,x27 - ror x6,x15,#19 - bic x28,x21,x27 - ror x8,x23,#28 - add x22,x22,x0 // h+=X[i] - eor x16,x16,x27,ror#18 - eor x7,x7,x2,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x27,ror#41 // Sigma1(e) - eor x8,x8,x23,ror#34 - add x22,x22,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x6,x6,x15,ror#61 - eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) - add x22,x22,x16 // h+=Sigma1(e) - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x8,x23,ror#39 // Sigma0(a) - eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) - add x1,x1,x10 - add x26,x26,x22 // d+=h - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x1,x1,x7 - add x22,x22,x17 // h+=Sigma0(a) - add x1,x1,x6 - ldr x6,[sp,#24] - str x9,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - ror x8,x3,#1 - and x17,x27,x26 - ror x7,x0,#19 - bic x19,x20,x26 - ror x9,x22,#28 - add x21,x21,x1 // h+=X[i] - eor x16,x16,x26,ror#18 - eor x8,x8,x3,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x26,ror#41 // Sigma1(e) - eor x9,x9,x22,ror#34 - add x21,x21,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x7,x7,x0,ror#61 - eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) - add x21,x21,x16 // h+=Sigma1(e) - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x9,x22,ror#39 // Sigma0(a) - eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) - add x2,x2,x11 - add x25,x25,x21 // d+=h - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x2,x2,x8 - add x21,x21,x17 // h+=Sigma0(a) - add x2,x2,x7 - ldr x7,[sp,#0] - str x10,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x9,x4,#1 - and x17,x26,x25 - ror x8,x1,#19 - bic x28,x27,x25 - ror x10,x21,#28 - add x20,x20,x2 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x9,x9,x4,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x10,x10,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x8,x8,x1,ror#61 - eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x10,x21,ror#39 // Sigma0(a) - eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) - add x3,x3,x12 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x3,x3,x9 - add x20,x20,x17 // h+=Sigma0(a) - add x3,x3,x8 - cbnz x19,.Loop_16_xx - - ldp x0,x2,[x29,#96] - ldr x1,[x29,#112] - sub x30,x30,#648 // rewind - - ldp x3,x4,[x0] - ldp x5,x6,[x0,#2*8] - add x1,x1,#14*8 // advance input pointer - ldp x7,x8,[x0,#4*8] - add x20,x20,x3 - ldp x9,x10,[x0,#6*8] - add x21,x21,x4 - add x22,x22,x5 - add x23,x23,x6 - stp x20,x21,[x0] - add x24,x24,x7 - add x25,x25,x8 - stp x22,x23,[x0,#2*8] - add x26,x26,x9 - add x27,x27,x10 - cmp x1,x2 - stp x24,x25,[x0,#4*8] - stp x26,x27,[x0,#6*8] - b.ne .Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*8 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret -.size sha512_block_data_order,.-sha512_block_data_order - -.section .rodata -.align 6 -.type .LK512,%object -.LK512: -.quad 0x428a2f98d728ae22,0x7137449123ef65cd -.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc -.quad 0x3956c25bf348b538,0x59f111f1b605d019 -.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 -.quad 0xd807aa98a3030242,0x12835b0145706fbe -.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 -.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 -.quad 0x9bdc06a725c71235,0xc19bf174cf692694 -.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 -.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 -.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 -.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 -.quad 0x983e5152ee66dfab,0xa831c66d2db43210 -.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 -.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 -.quad 0x06ca6351e003826f,0x142929670a0e6e70 -.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 -.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df -.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 -.quad 0x81c2c92e47edaee6,0x92722c851482353b -.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 -.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 -.quad 0xd192e819d6ef5218,0xd69906245565a910 -.quad 0xf40e35855771202a,0x106aa07032bbd1b8 -.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 -.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 -.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb -.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 -.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 -.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec -.quad 0x90befffa23631e28,0xa4506cebde82bde9 -.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b -.quad 0xca273eceea26619c,0xd186b8c721c0c207 -.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 -.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 -.quad 0x113f9804bef90dae,0x1b710b35131c471b -.quad 0x28db77f523047d84,0x32caab7b40c72493 -.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c -.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a -.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 -.quad 0 // terminator -.size .LK512,.-.LK512 -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#ifndef __KERNEL__ -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S deleted file mode 100644 index f57b7b5174..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S +++ /dev/null @@ -1,1216 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.section .rodata - -.type _vpaes_consts,%object -.align 7 // totally strategic alignment -_vpaes_consts: -.Lk_mc_forward: // mc_forward -.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 -.quad 0x080B0A0904070605, 0x000302010C0F0E0D -.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 -.quad 0x000302010C0F0E0D, 0x080B0A0904070605 -.Lk_mc_backward: // mc_backward -.quad 0x0605040702010003, 0x0E0D0C0F0A09080B -.quad 0x020100030E0D0C0F, 0x0A09080B06050407 -.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 -.quad 0x0A09080B06050407, 0x020100030E0D0C0F -.Lk_sr: // sr -.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 -.quad 0x030E09040F0A0500, 0x0B06010C07020D08 -.quad 0x0F060D040B020900, 0x070E050C030A0108 -.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 - -// -// "Hot" constants -// -.Lk_inv: // inv, inva -.quad 0x0E05060F0D080180, 0x040703090A0B0C02 -.quad 0x01040A060F0B0780, 0x030D0E0C02050809 -.Lk_ipt: // input transform (lo, hi) -.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 -.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 -.Lk_sbo: // sbou, sbot -.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 -.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA -.Lk_sb1: // sb1u, sb1t -.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF -.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 -.Lk_sb2: // sb2u, sb2t -.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A -.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD - -// -// Decryption stuff -// -.Lk_dipt: // decryption input transform -.quad 0x0F505B040B545F00, 0x154A411E114E451A -.quad 0x86E383E660056500, 0x12771772F491F194 -.Lk_dsbo: // decryption sbox final output -.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D -.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C -.Lk_dsb9: // decryption sbox output *9*u, *9*t -.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 -.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 -.Lk_dsbd: // decryption sbox output *D*u, *D*t -.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 -.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 -.Lk_dsbb: // decryption sbox output *B*u, *B*t -.quad 0xD022649296B44200, 0x602646F6B0F2D404 -.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B -.Lk_dsbe: // decryption sbox output *E*u, *E*t -.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 -.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 - -// -// Key schedule constants -// -.Lk_dksd: // decryption key schedule: invskew x*D -.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 -.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E -.Lk_dksb: // decryption key schedule: invskew x*B -.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 -.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 -.Lk_dkse: // decryption key schedule: invskew x*E + 0x63 -.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 -.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 -.Lk_dks9: // decryption key schedule: invskew x*9 -.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC -.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE - -.Lk_rcon: // rcon -.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 - -.Lk_opt: // output transform -.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 -.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 -.Lk_deskew: // deskew tables: inverts the sbox's "skew" -.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A -.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 - -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 -.align 2 -.size _vpaes_consts,.-_vpaes_consts -.align 6 - -.text -## -## _aes_preheat -## -## Fills register %r10 -> .aes_consts (so you can -fPIC) -## and %xmm9-%xmm15 as specified below. -## -.type _vpaes_encrypt_preheat,%function -.align 4 -_vpaes_encrypt_preheat: - adrp x10, .Lk_inv - add x10, x10, :lo12:.Lk_inv - movi v17.16b, #0x0f - ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv - ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo - ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2 - ret -.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat - -## -## _aes_encrypt_core -## -## AES-encrypt %xmm0. -## -## Inputs: -## %xmm0 = input -## %xmm9-%xmm15 as in _vpaes_preheat -## (%rdx) = scheduled keys -## -## Output in %xmm0 -## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax -## Preserves %xmm6 - %xmm8 so you get some local vectors -## -## -.type _vpaes_encrypt_core,%function -.align 4 -_vpaes_encrypt_core: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - adrp x11, .Lk_mc_forward+16 - add x11, x11, :lo12:.Lk_mc_forward+16 - // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo - ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key - and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 - // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi - tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 - eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - b .Lenc_entry - -.align 4 -.Lenc_loop: - // middle of middle round - add x10, x11, #0x40 - tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u - ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] - tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t - ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] - tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B - eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A - tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B - tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C - eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D - and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D - sub w8, w8, #1 // nr-- - -.Lenc_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 - cbnz w8, .Lenc_loop - - // middle of last round - add x10, x11, #0x80 - // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo - // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] - tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 - ret -.size _vpaes_encrypt_core,.-_vpaes_encrypt_core - -.globl vpaes_encrypt -.hidden vpaes_encrypt -.type vpaes_encrypt,%function -.align 4 -vpaes_encrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v7.16b}, [x0] - bl _vpaes_encrypt_preheat - bl _vpaes_encrypt_core - st1 {v0.16b}, [x1] - - ldp x29,x30,[sp],#16 - ret -.size vpaes_encrypt,.-vpaes_encrypt - -.type _vpaes_encrypt_2x,%function -.align 4 -_vpaes_encrypt_2x: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - adrp x11, .Lk_mc_forward+16 - add x11, x11, :lo12:.Lk_mc_forward+16 - // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo - ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key - and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - and v9.16b, v15.16b, v17.16b - ushr v8.16b, v15.16b, #4 - tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 - tbl v9.16b, {v20.16b}, v9.16b - // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi - tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 - tbl v10.16b, {v21.16b}, v8.16b - eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 - eor v8.16b, v9.16b, v16.16b - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - eor v8.16b, v8.16b, v10.16b - b .Lenc_2x_entry - -.align 4 -.Lenc_2x_loop: - // middle of middle round - add x10, x11, #0x40 - tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u - tbl v12.16b, {v25.16b}, v10.16b - ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] - tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t - tbl v8.16b, {v24.16b}, v11.16b - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - eor v12.16b, v12.16b, v16.16b - tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u - tbl v13.16b, {v27.16b}, v10.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - eor v8.16b, v8.16b, v12.16b - tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t - tbl v10.16b, {v26.16b}, v11.16b - ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] - tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B - tbl v11.16b, {v8.16b}, v1.16b - eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A - eor v10.16b, v10.16b, v13.16b - tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D - tbl v8.16b, {v8.16b}, v4.16b - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B - eor v11.16b, v11.16b, v10.16b - tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C - tbl v12.16b, {v11.16b},v1.16b - eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D - eor v8.16b, v8.16b, v11.16b - and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D - eor v8.16b, v8.16b, v12.16b - sub w8, w8, #1 // nr-- - -.Lenc_2x_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - and v9.16b, v8.16b, v17.16b - ushr v8.16b, v8.16b, #4 - tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k - tbl v13.16b, {v19.16b},v9.16b - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - eor v9.16b, v9.16b, v8.16b - tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v11.16b, {v18.16b},v8.16b - tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - tbl v12.16b, {v18.16b},v9.16b - eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v11.16b, v11.16b, v13.16b - eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - eor v12.16b, v12.16b, v13.16b - tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v10.16b, {v18.16b},v11.16b - tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - tbl v11.16b, {v18.16b},v12.16b - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v10.16b, v10.16b, v9.16b - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - eor v11.16b, v11.16b, v8.16b - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 - cbnz w8, .Lenc_2x_loop - - // middle of last round - add x10, x11, #0x80 - // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo - // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - tbl v12.16b, {v22.16b}, v10.16b - ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] - tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t - tbl v8.16b, {v23.16b}, v11.16b - eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - eor v12.16b, v12.16b, v16.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A - eor v8.16b, v8.16b, v12.16b - tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 - tbl v1.16b, {v8.16b},v1.16b - ret -.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x - -.type _vpaes_decrypt_preheat,%function -.align 4 -_vpaes_decrypt_preheat: - adrp x10, .Lk_inv - add x10, x10, :lo12:.Lk_inv - movi v17.16b, #0x0f - adrp x11, .Lk_dipt - add x11, x11, :lo12:.Lk_dipt - ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv - ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo - ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd - ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe - ret -.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat - -## -## Decryption core -## -## Same API as encryption core. -## -.type _vpaes_decrypt_core,%function -.align 4 -_vpaes_decrypt_core: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - - // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo - lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 - eor x11, x11, #0x30 // xor $0x30, %r11 - adrp x10, .Lk_sr - add x10, x10, :lo12:.Lk_sr - and x11, x11, #0x30 // and $0x30, %r11 - add x11, x11, x10 - adrp x10, .Lk_mc_forward+48 - add x10, x10, :lo12:.Lk_mc_forward+48 - - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key - and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 - ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 - // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi - tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 - eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - b .Ldec_entry - -.align 4 -.Ldec_loop: -// -// Inverse mix columns -// - // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u - // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t - tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u - tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t - eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 - // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt - - tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu - tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt - - tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu - tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet - - tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu - tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - sub w8, w8, #1 // sub $1,%rax # nr-- - -.Ldec_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 - cbnz w8, .Ldec_loop - - // middle of last round - // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot - ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 - tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t - eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k - eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A - tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 - ret -.size _vpaes_decrypt_core,.-_vpaes_decrypt_core - -.globl vpaes_decrypt -.hidden vpaes_decrypt -.type vpaes_decrypt,%function -.align 4 -vpaes_decrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v7.16b}, [x0] - bl _vpaes_decrypt_preheat - bl _vpaes_decrypt_core - st1 {v0.16b}, [x1] - - ldp x29,x30,[sp],#16 - ret -.size vpaes_decrypt,.-vpaes_decrypt - -// v14-v15 input, v0-v1 output -.type _vpaes_decrypt_2x,%function -.align 4 -_vpaes_decrypt_2x: - mov x9, x2 - ldr w8, [x2,#240] // pull rounds - - // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo - lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 - eor x11, x11, #0x30 // xor $0x30, %r11 - adrp x10, .Lk_sr - add x10, x10, :lo12:.Lk_sr - and x11, x11, #0x30 // and $0x30, %r11 - add x11, x11, x10 - adrp x10, .Lk_mc_forward+48 - add x10, x10, :lo12:.Lk_mc_forward+48 - - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key - and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - and v9.16b, v15.16b, v17.16b - ushr v8.16b, v15.16b, #4 - tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 - tbl v10.16b, {v20.16b},v9.16b - ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 - // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi - tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 - tbl v8.16b, {v21.16b},v8.16b - eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 - eor v10.16b, v10.16b, v16.16b - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - eor v8.16b, v8.16b, v10.16b - b .Ldec_2x_entry - -.align 4 -.Ldec_2x_loop: -// -// Inverse mix columns -// - // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u - // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t - tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u - tbl v12.16b, {v24.16b}, v10.16b - tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t - tbl v9.16b, {v25.16b}, v11.16b - eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 - eor v8.16b, v12.16b, v16.16b - // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt - - tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu - tbl v12.16b, {v26.16b}, v10.16b - tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v8.16b, {v8.16b},v5.16b - tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt - tbl v9.16b, {v27.16b}, v11.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - eor v8.16b, v8.16b, v12.16b - // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b - // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt - - tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu - tbl v12.16b, {v28.16b}, v10.16b - tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v8.16b, {v8.16b},v5.16b - tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt - tbl v9.16b, {v29.16b}, v11.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - eor v8.16b, v8.16b, v12.16b - // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b - // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet - - tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu - tbl v12.16b, {v30.16b}, v10.16b - tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch - tbl v8.16b, {v8.16b},v5.16b - tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet - tbl v9.16b, {v31.16b}, v11.16b - eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - eor v8.16b, v8.16b, v12.16b - ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 - eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - eor v8.16b, v8.16b, v9.16b - sub w8, w8, #1 // sub $1,%rax # nr-- - -.Ldec_2x_entry: - // top of round - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - and v9.16b, v8.16b, v17.16b - ushr v8.16b, v8.16b, #4 - tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - tbl v10.16b, {v19.16b},v9.16b - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - eor v9.16b, v9.16b, v8.16b - tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - tbl v11.16b, {v18.16b},v8.16b - tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - tbl v12.16b, {v18.16b},v9.16b - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - eor v11.16b, v11.16b, v10.16b - eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - eor v12.16b, v12.16b, v10.16b - tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - tbl v10.16b, {v18.16b},v11.16b - tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - tbl v11.16b, {v18.16b},v12.16b - eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io - eor v10.16b, v10.16b, v9.16b - eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - eor v11.16b, v11.16b, v8.16b - ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 - cbnz w8, .Ldec_2x_loop - - // middle of last round - // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou - tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - tbl v12.16b, {v22.16b}, v10.16b - // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot - tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t - tbl v9.16b, {v23.16b}, v11.16b - ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 - eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k - eor v12.16b, v12.16b, v16.16b - eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A - eor v8.16b, v9.16b, v12.16b - tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 - tbl v1.16b, {v8.16b},v2.16b - ret -.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x -######################################################## -## ## -## AES key schedule ## -## ## -######################################################## -.type _vpaes_key_preheat,%function -.align 4 -_vpaes_key_preheat: - adrp x10, .Lk_inv - add x10, x10, :lo12:.Lk_inv - movi v16.16b, #0x5b // .Lk_s63 - adrp x11, .Lk_sb1 - add x11, x11, :lo12:.Lk_sb1 - movi v17.16b, #0x0f // .Lk_s0F - ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt - adrp x10, .Lk_dksd - add x10, x10, :lo12:.Lk_dksd - ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1 - adrp x11, .Lk_mc_forward - add x11, x11, :lo12:.Lk_mc_forward - ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb - ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9 - ld1 {v8.2d}, [x10] // .Lk_rcon - ld1 {v9.2d}, [x11] // .Lk_mc_forward[0] - ret -.size _vpaes_key_preheat,.-_vpaes_key_preheat - -.type _vpaes_schedule_core,%function -.align 4 -_vpaes_schedule_core: - stp x29, x30, [sp,#-16]! - add x29,sp,#0 - - bl _vpaes_key_preheat // load the tables - - ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) - - // input transform - mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 - bl _vpaes_schedule_transform - mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 - - adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10 - add x10, x10, :lo12:.Lk_sr - - add x8, x8, x10 - cbnz w3, .Lschedule_am_decrypting - - // encrypting, output zeroth round key after transform - st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) - b .Lschedule_go - -.Lschedule_am_decrypting: - // decrypting, output zeroth round key after shiftrows - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 - tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) - eor x8, x8, #0x30 // xor $0x30, %r8 - -.Lschedule_go: - cmp w1, #192 // cmp $192, %esi - b.hi .Lschedule_256 - b.eq .Lschedule_192 - // 128: fall though - -## -## .schedule_128 -## -## 128-bit specific part of key schedule. -## -## This schedule is really simple, because all its parts -## are accomplished by the subroutines. -## -.Lschedule_128: - mov x0, #10 // mov $10, %esi - -.Loop_schedule_128: - sub x0, x0, #1 // dec %esi - bl _vpaes_schedule_round - cbz x0, .Lschedule_mangle_last - bl _vpaes_schedule_mangle // write output - b .Loop_schedule_128 - -## -## .aes_schedule_192 -## -## 192-bit specific part of key schedule. -## -## The main body of this schedule is the same as the 128-bit -## schedule, but with more smearing. The long, high side is -## stored in %xmm7 as before, and the short, low side is in -## the high bits of %xmm6. -## -## This schedule is somewhat nastier, however, because each -## round produces 192 bits of key material, or 1.5 round keys. -## Therefore, on each cycle we do 2 rounds and produce 3 round -## keys. -## -.align 4 -.Lschedule_192: - sub x0, x0, #8 - ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) - bl _vpaes_schedule_transform // input transform - mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part - eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 - ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros - mov x0, #4 // mov $4, %esi - -.Loop_schedule_192: - sub x0, x0, #1 // dec %esi - bl _vpaes_schedule_round - ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 - bl _vpaes_schedule_mangle // save key n - bl _vpaes_schedule_192_smear - bl _vpaes_schedule_mangle // save key n+1 - bl _vpaes_schedule_round - cbz x0, .Lschedule_mangle_last - bl _vpaes_schedule_mangle // save key n+2 - bl _vpaes_schedule_192_smear - b .Loop_schedule_192 - -## -## .aes_schedule_256 -## -## 256-bit specific part of key schedule. -## -## The structure here is very similar to the 128-bit -## schedule, but with an additional "low side" in -## %xmm6. The low side's rounds are the same as the -## high side's, except no rcon and no rotation. -## -.align 4 -.Lschedule_256: - ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) - bl _vpaes_schedule_transform // input transform - mov x0, #7 // mov $7, %esi - -.Loop_schedule_256: - sub x0, x0, #1 // dec %esi - bl _vpaes_schedule_mangle // output low result - mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 - - // high round - bl _vpaes_schedule_round - cbz x0, .Lschedule_mangle_last - bl _vpaes_schedule_mangle - - // low round. swap xmm7 and xmm6 - dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 - movi v4.16b, #0 - mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 - mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 - bl _vpaes_schedule_low_round - mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 - - b .Loop_schedule_256 - -## -## .aes_schedule_mangle_last -## -## Mangler for last round of key schedule -## Mangles %xmm0 -## when encrypting, outputs out(%xmm0) ^ 63 -## when decrypting, outputs unskew(%xmm0) -## -## Always called right before return... jumps to cleanup and exits -## -.align 4 -.Lschedule_mangle_last: - // schedule last round key from xmm0 - adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew - add x11, x11, :lo12:.Lk_deskew - - cbnz w3, .Lschedule_mangle_last_dec - - // encrypting - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 - adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform - add x11, x11, :lo12:.Lk_opt - add x2, x2, #32 // add $32, %rdx - tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute - -.Lschedule_mangle_last_dec: - ld1 {v20.2d,v21.2d}, [x11] // reload constants - sub x2, x2, #16 // add $-16, %rdx - eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 - bl _vpaes_schedule_transform // output transform - st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key - - // cleanup - eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 - eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 - eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 - eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 - eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 - eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 - eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 - eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 - ldp x29, x30, [sp],#16 - ret -.size _vpaes_schedule_core,.-_vpaes_schedule_core - -## -## .aes_schedule_192_smear -## -## Smear the short, low side in the 192-bit key schedule. -## -## Inputs: -## %xmm7: high side, b a x y -## %xmm6: low side, d c 0 0 -## %xmm13: 0 -## -## Outputs: -## %xmm6: b+c+d b+c 0 0 -## %xmm0: b+c+d b+c b a -## -.type _vpaes_schedule_192_smear,%function -.align 4 -_vpaes_schedule_192_smear: - movi v1.16b, #0 - dup v0.4s, v7.s[3] - ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 - ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a - eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 - eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 - eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a - mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 - ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros - ret -.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear - -## -## .aes_schedule_round -## -## Runs one main round of the key schedule on %xmm0, %xmm7 -## -## Specifically, runs subbytes on the high dword of %xmm0 -## then rotates it by one byte and xors into the low dword of -## %xmm7. -## -## Adds rcon from low byte of %xmm8, then rotates %xmm8 for -## next rcon. -## -## Smears the dwords of %xmm7 by xoring the low into the -## second low, result into third, result into highest. -## -## Returns results in %xmm7 = %xmm0. -## Clobbers %xmm1-%xmm4, %r11. -## -.type _vpaes_schedule_round,%function -.align 4 -_vpaes_schedule_round: - // extract rcon from xmm8 - movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 - ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 - ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 - eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 - - // rotate - dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 - ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 - - // fall through... - - // low round: same as high round, but no rotation and no rcon. -_vpaes_schedule_low_round: - // smear xmm7 - ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 - eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 - ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 - - // subbytes - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i - eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 - tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j - tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7 - tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak - eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak - eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io - eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo - tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou - tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t - eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output - - // add in smeared stuff - eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 - eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 - ret -.size _vpaes_schedule_round,.-_vpaes_schedule_round - -## -## .aes_schedule_transform -## -## Linear-transform %xmm0 according to tables at (%r11) -## -## Requires that %xmm9 = 0x0F0F... as in preheat -## Output in %xmm0 -## Clobbers %xmm1, %xmm2 -## -.type _vpaes_schedule_transform,%function -.align 4 -_vpaes_schedule_transform: - and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 - ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 - // vmovdqa (%r11), %xmm2 # lo - tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 - // vmovdqa 16(%r11), %xmm1 # hi - tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 - eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 - ret -.size _vpaes_schedule_transform,.-_vpaes_schedule_transform - -## -## .aes_schedule_mangle -## -## Mangle xmm0 from (basis-transformed) standard version -## to our version. -## -## On encrypt, -## xor with 0x63 -## multiply by circulant 0,1,1,1 -## apply shiftrows transform -## -## On decrypt, -## xor with 0x63 -## multiply by "inverse mixcolumns" circulant E,B,D,9 -## deskew -## apply shiftrows transform -## -## -## Writes out to (%rdx), and increments or decrements it -## Keeps track of round number mod 4 in %r8 -## Preserves xmm0 -## Clobbers xmm1-xmm5 -## -.type _vpaes_schedule_mangle,%function -.align 4 -_vpaes_schedule_mangle: - mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later - // vmovdqa .Lk_mc_forward(%rip),%xmm5 - cbnz w3, .Lschedule_mangle_dec - - // encrypting - eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4 - add x2, x2, #16 // add $16, %rdx - tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 - tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 - tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 - eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 - eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 - - b .Lschedule_mangle_both -.align 4 -.Lschedule_mangle_dec: - // inverse mix columns - // lea .Lk_dksd(%rip),%r11 - ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi - and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo - - // vmovdqa 0x00(%r11), %xmm2 - tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - // vmovdqa 0x10(%r11), %xmm3 - tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 - tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 - - // vmovdqa 0x20(%r11), %xmm2 - tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 - // vmovdqa 0x30(%r11), %xmm3 - tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 - tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 - - // vmovdqa 0x40(%r11), %xmm2 - tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 - // vmovdqa 0x50(%r11), %xmm3 - tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 - - // vmovdqa 0x60(%r11), %xmm2 - tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 - tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 - // vmovdqa 0x70(%r11), %xmm4 - tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 - ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 - eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 - eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 - - sub x2, x2, #16 // add $-16, %rdx - -.Lschedule_mangle_both: - tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - add x8, x8, #64-16 // add $-16, %r8 - and x8, x8, #~(1<<6) // and $0x30, %r8 - st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) - ret -.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle - -.globl vpaes_set_encrypt_key -.hidden vpaes_set_encrypt_key -.type vpaes_set_encrypt_key,%function -.align 4 -vpaes_set_encrypt_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - - lsr w9, w1, #5 // shr $5,%eax - add w9, w9, #5 // $5,%eax - str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - - mov w3, #0 // mov $0,%ecx - mov x8, #0x30 // mov $0x30,%r8d - bl _vpaes_schedule_core - eor x0, x0, x0 - - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret -.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key - -.globl vpaes_set_decrypt_key -.hidden vpaes_set_decrypt_key -.type vpaes_set_decrypt_key,%function -.align 4 -vpaes_set_decrypt_key: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - - lsr w9, w1, #5 // shr $5,%eax - add w9, w9, #5 // $5,%eax - str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - lsl w9, w9, #4 // shl $4,%eax - add x2, x2, #16 // lea 16(%rdx,%rax),%rdx - add x2, x2, x9 - - mov w3, #1 // mov $1,%ecx - lsr w8, w1, #1 // shr $1,%r8d - and x8, x8, #32 // and $32,%r8d - eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 - bl _vpaes_schedule_core - - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret -.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key -.globl vpaes_cbc_encrypt -.hidden vpaes_cbc_encrypt -.type vpaes_cbc_encrypt,%function -.align 4 -vpaes_cbc_encrypt: - cbz x2, .Lcbc_abort - cmp w5, #0 // check direction - b.eq vpaes_cbc_decrypt - - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - mov x17, x2 // reassign - mov x2, x3 // reassign - - ld1 {v0.16b}, [x4] // load ivec - bl _vpaes_encrypt_preheat - b .Lcbc_enc_loop - -.align 4 -.Lcbc_enc_loop: - ld1 {v7.16b}, [x0],#16 // load input - eor v7.16b, v7.16b, v0.16b // xor with ivec - bl _vpaes_encrypt_core - st1 {v0.16b}, [x1],#16 // save output - subs x17, x17, #16 - b.hi .Lcbc_enc_loop - - st1 {v0.16b}, [x4] // write ivec - - ldp x29,x30,[sp],#16 -.Lcbc_abort: - ret -.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt - -.type vpaes_cbc_decrypt,%function -.align 4 -vpaes_cbc_decrypt: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - stp d10,d11,[sp,#-16]! - stp d12,d13,[sp,#-16]! - stp d14,d15,[sp,#-16]! - - mov x17, x2 // reassign - mov x2, x3 // reassign - ld1 {v6.16b}, [x4] // load ivec - bl _vpaes_decrypt_preheat - tst x17, #16 - b.eq .Lcbc_dec_loop2x - - ld1 {v7.16b}, [x0], #16 // load input - bl _vpaes_decrypt_core - eor v0.16b, v0.16b, v6.16b // xor with ivec - orr v6.16b, v7.16b, v7.16b // next ivec value - st1 {v0.16b}, [x1], #16 - subs x17, x17, #16 - b.ls .Lcbc_dec_done - -.align 4 -.Lcbc_dec_loop2x: - ld1 {v14.16b,v15.16b}, [x0], #32 - bl _vpaes_decrypt_2x - eor v0.16b, v0.16b, v6.16b // xor with ivec - eor v1.16b, v1.16b, v14.16b - orr v6.16b, v15.16b, v15.16b - st1 {v0.16b,v1.16b}, [x1], #32 - subs x17, x17, #32 - b.hi .Lcbc_dec_loop2x - -.Lcbc_dec_done: - st1 {v6.16b}, [x4] - - ldp d14,d15,[sp],#16 - ldp d12,d13,[sp],#16 - ldp d10,d11,[sp],#16 - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret -.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt -.globl vpaes_ctr32_encrypt_blocks -.hidden vpaes_ctr32_encrypt_blocks -.type vpaes_ctr32_encrypt_blocks,%function -.align 4 -vpaes_ctr32_encrypt_blocks: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - stp d8,d9,[sp,#-16]! // ABI spec says so - stp d10,d11,[sp,#-16]! - stp d12,d13,[sp,#-16]! - stp d14,d15,[sp,#-16]! - - cbz x2, .Lctr32_done - - // Note, unlike the other functions, x2 here is measured in blocks, - // not bytes. - mov x17, x2 - mov x2, x3 - - // Load the IV and counter portion. - ldr w6, [x4, #12] - ld1 {v7.16b}, [x4] - - bl _vpaes_encrypt_preheat - tst x17, #1 - rev w6, w6 // The counter is big-endian. - b.eq .Lctr32_prep_loop - - // Handle one block so the remaining block count is even for - // _vpaes_encrypt_2x. - ld1 {v6.16b}, [x0], #16 // .Load input ahead of time - bl _vpaes_encrypt_core - eor v0.16b, v0.16b, v6.16b // XOR input and result - st1 {v0.16b}, [x1], #16 - subs x17, x17, #1 - // Update the counter. - add w6, w6, #1 - rev w7, w6 - mov v7.s[3], w7 - b.ls .Lctr32_done - -.Lctr32_prep_loop: - // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x - // uses v14 and v15. - mov v15.16b, v7.16b - mov v14.16b, v7.16b - add w6, w6, #1 - rev w7, w6 - mov v15.s[3], w7 - -.Lctr32_loop: - ld1 {v6.16b,v7.16b}, [x0], #32 // .Load input ahead of time - bl _vpaes_encrypt_2x - eor v0.16b, v0.16b, v6.16b // XOR input and result - eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) - st1 {v0.16b,v1.16b}, [x1], #32 - subs x17, x17, #2 - // Update the counter. - add w7, w6, #1 - add w6, w6, #2 - rev w7, w7 - mov v14.s[3], w7 - rev w7, w6 - mov v15.s[3], w7 - b.hi .Lctr32_loop - -.Lctr32_done: - ldp d14,d15,[sp],#16 - ldp d12,d13,[sp],#16 - ldp d10,d11,[sp],#16 - ldp d8,d9,[sp],#16 - ldp x29,x30,[sp],#16 - ret -.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/test/trampoline-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/test/trampoline-armv8.S deleted file mode 100644 index 9a21cc2c6e..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/test/trampoline-armv8.S +++ /dev/null @@ -1,688 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -// abi_test_trampoline loads callee-saved registers from |state|, calls |func| -// with |argv|, then saves the callee-saved registers into |state|. It returns -// the result of |func|. The |unwind| argument is unused. -// uint64_t abi_test_trampoline(void (*func)(...), CallerState *state, -// const uint64_t *argv, size_t argc, -// uint64_t unwind); -.type abi_test_trampoline, %function -.globl abi_test_trampoline -.hidden abi_test_trampoline -.align 4 -abi_test_trampoline: -.Labi_test_trampoline_begin: - // Stack layout (low to high addresses) - // x29,x30 (16 bytes) - // d8-d15 (64 bytes) - // x19-x28 (80 bytes) - // x1 (8 bytes) - // padding (8 bytes) - stp x29, x30, [sp, #-176]! - mov x29, sp - - // Saved callee-saved registers and |state|. - stp d8, d9, [sp, #16] - stp d10, d11, [sp, #32] - stp d12, d13, [sp, #48] - stp d14, d15, [sp, #64] - stp x19, x20, [sp, #80] - stp x21, x22, [sp, #96] - stp x23, x24, [sp, #112] - stp x25, x26, [sp, #128] - stp x27, x28, [sp, #144] - str x1, [sp, #160] - - // Load registers from |state|, with the exception of x29. x29 is the - // frame pointer and also callee-saved, but AAPCS64 allows platforms to - // mandate that x29 always point to a frame. iOS64 does so, which means - // we cannot fill x29 with entropy without violating ABI rules - // ourselves. x29 is tested separately below. - ldp d8, d9, [x1], #16 - ldp d10, d11, [x1], #16 - ldp d12, d13, [x1], #16 - ldp d14, d15, [x1], #16 - ldp x19, x20, [x1], #16 - ldp x21, x22, [x1], #16 - ldp x23, x24, [x1], #16 - ldp x25, x26, [x1], #16 - ldp x27, x28, [x1], #16 - - // Move parameters into temporary registers. - mov x9, x0 - mov x10, x2 - mov x11, x3 - - // Load parameters into registers. - cbz x11, .Largs_done - ldr x0, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x1, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x2, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x3, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x4, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x5, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x6, [x10], #8 - subs x11, x11, #1 - b.eq .Largs_done - ldr x7, [x10], #8 - -.Largs_done: - blr x9 - - // Reload |state| and store registers. - ldr x1, [sp, #160] - stp d8, d9, [x1], #16 - stp d10, d11, [x1], #16 - stp d12, d13, [x1], #16 - stp d14, d15, [x1], #16 - stp x19, x20, [x1], #16 - stp x21, x22, [x1], #16 - stp x23, x24, [x1], #16 - stp x25, x26, [x1], #16 - stp x27, x28, [x1], #16 - - // |func| is required to preserve x29, the frame pointer. We cannot load - // random values into x29 (see comment above), so compare it against the - // expected value and zero the field of |state| if corrupted. - mov x9, sp - cmp x29, x9 - b.eq .Lx29_ok - str xzr, [x1] - -.Lx29_ok: - // Restore callee-saved registers. - ldp d8, d9, [sp, #16] - ldp d10, d11, [sp, #32] - ldp d12, d13, [sp, #48] - ldp d14, d15, [sp, #64] - ldp x19, x20, [sp, #80] - ldp x21, x22, [sp, #96] - ldp x23, x24, [sp, #112] - ldp x25, x26, [sp, #128] - ldp x27, x28, [sp, #144] - - ldp x29, x30, [sp], #176 - ret -.size abi_test_trampoline,.-abi_test_trampoline -.type abi_test_clobber_x0, %function -.globl abi_test_clobber_x0 -.hidden abi_test_clobber_x0 -.align 4 -abi_test_clobber_x0: - mov x0, xzr - ret -.size abi_test_clobber_x0,.-abi_test_clobber_x0 -.type abi_test_clobber_x1, %function -.globl abi_test_clobber_x1 -.hidden abi_test_clobber_x1 -.align 4 -abi_test_clobber_x1: - mov x1, xzr - ret -.size abi_test_clobber_x1,.-abi_test_clobber_x1 -.type abi_test_clobber_x2, %function -.globl abi_test_clobber_x2 -.hidden abi_test_clobber_x2 -.align 4 -abi_test_clobber_x2: - mov x2, xzr - ret -.size abi_test_clobber_x2,.-abi_test_clobber_x2 -.type abi_test_clobber_x3, %function -.globl abi_test_clobber_x3 -.hidden abi_test_clobber_x3 -.align 4 -abi_test_clobber_x3: - mov x3, xzr - ret -.size abi_test_clobber_x3,.-abi_test_clobber_x3 -.type abi_test_clobber_x4, %function -.globl abi_test_clobber_x4 -.hidden abi_test_clobber_x4 -.align 4 -abi_test_clobber_x4: - mov x4, xzr - ret -.size abi_test_clobber_x4,.-abi_test_clobber_x4 -.type abi_test_clobber_x5, %function -.globl abi_test_clobber_x5 -.hidden abi_test_clobber_x5 -.align 4 -abi_test_clobber_x5: - mov x5, xzr - ret -.size abi_test_clobber_x5,.-abi_test_clobber_x5 -.type abi_test_clobber_x6, %function -.globl abi_test_clobber_x6 -.hidden abi_test_clobber_x6 -.align 4 -abi_test_clobber_x6: - mov x6, xzr - ret -.size abi_test_clobber_x6,.-abi_test_clobber_x6 -.type abi_test_clobber_x7, %function -.globl abi_test_clobber_x7 -.hidden abi_test_clobber_x7 -.align 4 -abi_test_clobber_x7: - mov x7, xzr - ret -.size abi_test_clobber_x7,.-abi_test_clobber_x7 -.type abi_test_clobber_x8, %function -.globl abi_test_clobber_x8 -.hidden abi_test_clobber_x8 -.align 4 -abi_test_clobber_x8: - mov x8, xzr - ret -.size abi_test_clobber_x8,.-abi_test_clobber_x8 -.type abi_test_clobber_x9, %function -.globl abi_test_clobber_x9 -.hidden abi_test_clobber_x9 -.align 4 -abi_test_clobber_x9: - mov x9, xzr - ret -.size abi_test_clobber_x9,.-abi_test_clobber_x9 -.type abi_test_clobber_x10, %function -.globl abi_test_clobber_x10 -.hidden abi_test_clobber_x10 -.align 4 -abi_test_clobber_x10: - mov x10, xzr - ret -.size abi_test_clobber_x10,.-abi_test_clobber_x10 -.type abi_test_clobber_x11, %function -.globl abi_test_clobber_x11 -.hidden abi_test_clobber_x11 -.align 4 -abi_test_clobber_x11: - mov x11, xzr - ret -.size abi_test_clobber_x11,.-abi_test_clobber_x11 -.type abi_test_clobber_x12, %function -.globl abi_test_clobber_x12 -.hidden abi_test_clobber_x12 -.align 4 -abi_test_clobber_x12: - mov x12, xzr - ret -.size abi_test_clobber_x12,.-abi_test_clobber_x12 -.type abi_test_clobber_x13, %function -.globl abi_test_clobber_x13 -.hidden abi_test_clobber_x13 -.align 4 -abi_test_clobber_x13: - mov x13, xzr - ret -.size abi_test_clobber_x13,.-abi_test_clobber_x13 -.type abi_test_clobber_x14, %function -.globl abi_test_clobber_x14 -.hidden abi_test_clobber_x14 -.align 4 -abi_test_clobber_x14: - mov x14, xzr - ret -.size abi_test_clobber_x14,.-abi_test_clobber_x14 -.type abi_test_clobber_x15, %function -.globl abi_test_clobber_x15 -.hidden abi_test_clobber_x15 -.align 4 -abi_test_clobber_x15: - mov x15, xzr - ret -.size abi_test_clobber_x15,.-abi_test_clobber_x15 -.type abi_test_clobber_x16, %function -.globl abi_test_clobber_x16 -.hidden abi_test_clobber_x16 -.align 4 -abi_test_clobber_x16: - mov x16, xzr - ret -.size abi_test_clobber_x16,.-abi_test_clobber_x16 -.type abi_test_clobber_x17, %function -.globl abi_test_clobber_x17 -.hidden abi_test_clobber_x17 -.align 4 -abi_test_clobber_x17: - mov x17, xzr - ret -.size abi_test_clobber_x17,.-abi_test_clobber_x17 -.type abi_test_clobber_x19, %function -.globl abi_test_clobber_x19 -.hidden abi_test_clobber_x19 -.align 4 -abi_test_clobber_x19: - mov x19, xzr - ret -.size abi_test_clobber_x19,.-abi_test_clobber_x19 -.type abi_test_clobber_x20, %function -.globl abi_test_clobber_x20 -.hidden abi_test_clobber_x20 -.align 4 -abi_test_clobber_x20: - mov x20, xzr - ret -.size abi_test_clobber_x20,.-abi_test_clobber_x20 -.type abi_test_clobber_x21, %function -.globl abi_test_clobber_x21 -.hidden abi_test_clobber_x21 -.align 4 -abi_test_clobber_x21: - mov x21, xzr - ret -.size abi_test_clobber_x21,.-abi_test_clobber_x21 -.type abi_test_clobber_x22, %function -.globl abi_test_clobber_x22 -.hidden abi_test_clobber_x22 -.align 4 -abi_test_clobber_x22: - mov x22, xzr - ret -.size abi_test_clobber_x22,.-abi_test_clobber_x22 -.type abi_test_clobber_x23, %function -.globl abi_test_clobber_x23 -.hidden abi_test_clobber_x23 -.align 4 -abi_test_clobber_x23: - mov x23, xzr - ret -.size abi_test_clobber_x23,.-abi_test_clobber_x23 -.type abi_test_clobber_x24, %function -.globl abi_test_clobber_x24 -.hidden abi_test_clobber_x24 -.align 4 -abi_test_clobber_x24: - mov x24, xzr - ret -.size abi_test_clobber_x24,.-abi_test_clobber_x24 -.type abi_test_clobber_x25, %function -.globl abi_test_clobber_x25 -.hidden abi_test_clobber_x25 -.align 4 -abi_test_clobber_x25: - mov x25, xzr - ret -.size abi_test_clobber_x25,.-abi_test_clobber_x25 -.type abi_test_clobber_x26, %function -.globl abi_test_clobber_x26 -.hidden abi_test_clobber_x26 -.align 4 -abi_test_clobber_x26: - mov x26, xzr - ret -.size abi_test_clobber_x26,.-abi_test_clobber_x26 -.type abi_test_clobber_x27, %function -.globl abi_test_clobber_x27 -.hidden abi_test_clobber_x27 -.align 4 -abi_test_clobber_x27: - mov x27, xzr - ret -.size abi_test_clobber_x27,.-abi_test_clobber_x27 -.type abi_test_clobber_x28, %function -.globl abi_test_clobber_x28 -.hidden abi_test_clobber_x28 -.align 4 -abi_test_clobber_x28: - mov x28, xzr - ret -.size abi_test_clobber_x28,.-abi_test_clobber_x28 -.type abi_test_clobber_x29, %function -.globl abi_test_clobber_x29 -.hidden abi_test_clobber_x29 -.align 4 -abi_test_clobber_x29: - mov x29, xzr - ret -.size abi_test_clobber_x29,.-abi_test_clobber_x29 -.type abi_test_clobber_d0, %function -.globl abi_test_clobber_d0 -.hidden abi_test_clobber_d0 -.align 4 -abi_test_clobber_d0: - fmov d0, xzr - ret -.size abi_test_clobber_d0,.-abi_test_clobber_d0 -.type abi_test_clobber_d1, %function -.globl abi_test_clobber_d1 -.hidden abi_test_clobber_d1 -.align 4 -abi_test_clobber_d1: - fmov d1, xzr - ret -.size abi_test_clobber_d1,.-abi_test_clobber_d1 -.type abi_test_clobber_d2, %function -.globl abi_test_clobber_d2 -.hidden abi_test_clobber_d2 -.align 4 -abi_test_clobber_d2: - fmov d2, xzr - ret -.size abi_test_clobber_d2,.-abi_test_clobber_d2 -.type abi_test_clobber_d3, %function -.globl abi_test_clobber_d3 -.hidden abi_test_clobber_d3 -.align 4 -abi_test_clobber_d3: - fmov d3, xzr - ret -.size abi_test_clobber_d3,.-abi_test_clobber_d3 -.type abi_test_clobber_d4, %function -.globl abi_test_clobber_d4 -.hidden abi_test_clobber_d4 -.align 4 -abi_test_clobber_d4: - fmov d4, xzr - ret -.size abi_test_clobber_d4,.-abi_test_clobber_d4 -.type abi_test_clobber_d5, %function -.globl abi_test_clobber_d5 -.hidden abi_test_clobber_d5 -.align 4 -abi_test_clobber_d5: - fmov d5, xzr - ret -.size abi_test_clobber_d5,.-abi_test_clobber_d5 -.type abi_test_clobber_d6, %function -.globl abi_test_clobber_d6 -.hidden abi_test_clobber_d6 -.align 4 -abi_test_clobber_d6: - fmov d6, xzr - ret -.size abi_test_clobber_d6,.-abi_test_clobber_d6 -.type abi_test_clobber_d7, %function -.globl abi_test_clobber_d7 -.hidden abi_test_clobber_d7 -.align 4 -abi_test_clobber_d7: - fmov d7, xzr - ret -.size abi_test_clobber_d7,.-abi_test_clobber_d7 -.type abi_test_clobber_d8, %function -.globl abi_test_clobber_d8 -.hidden abi_test_clobber_d8 -.align 4 -abi_test_clobber_d8: - fmov d8, xzr - ret -.size abi_test_clobber_d8,.-abi_test_clobber_d8 -.type abi_test_clobber_d9, %function -.globl abi_test_clobber_d9 -.hidden abi_test_clobber_d9 -.align 4 -abi_test_clobber_d9: - fmov d9, xzr - ret -.size abi_test_clobber_d9,.-abi_test_clobber_d9 -.type abi_test_clobber_d10, %function -.globl abi_test_clobber_d10 -.hidden abi_test_clobber_d10 -.align 4 -abi_test_clobber_d10: - fmov d10, xzr - ret -.size abi_test_clobber_d10,.-abi_test_clobber_d10 -.type abi_test_clobber_d11, %function -.globl abi_test_clobber_d11 -.hidden abi_test_clobber_d11 -.align 4 -abi_test_clobber_d11: - fmov d11, xzr - ret -.size abi_test_clobber_d11,.-abi_test_clobber_d11 -.type abi_test_clobber_d12, %function -.globl abi_test_clobber_d12 -.hidden abi_test_clobber_d12 -.align 4 -abi_test_clobber_d12: - fmov d12, xzr - ret -.size abi_test_clobber_d12,.-abi_test_clobber_d12 -.type abi_test_clobber_d13, %function -.globl abi_test_clobber_d13 -.hidden abi_test_clobber_d13 -.align 4 -abi_test_clobber_d13: - fmov d13, xzr - ret -.size abi_test_clobber_d13,.-abi_test_clobber_d13 -.type abi_test_clobber_d14, %function -.globl abi_test_clobber_d14 -.hidden abi_test_clobber_d14 -.align 4 -abi_test_clobber_d14: - fmov d14, xzr - ret -.size abi_test_clobber_d14,.-abi_test_clobber_d14 -.type abi_test_clobber_d15, %function -.globl abi_test_clobber_d15 -.hidden abi_test_clobber_d15 -.align 4 -abi_test_clobber_d15: - fmov d15, xzr - ret -.size abi_test_clobber_d15,.-abi_test_clobber_d15 -.type abi_test_clobber_d16, %function -.globl abi_test_clobber_d16 -.hidden abi_test_clobber_d16 -.align 4 -abi_test_clobber_d16: - fmov d16, xzr - ret -.size abi_test_clobber_d16,.-abi_test_clobber_d16 -.type abi_test_clobber_d17, %function -.globl abi_test_clobber_d17 -.hidden abi_test_clobber_d17 -.align 4 -abi_test_clobber_d17: - fmov d17, xzr - ret -.size abi_test_clobber_d17,.-abi_test_clobber_d17 -.type abi_test_clobber_d18, %function -.globl abi_test_clobber_d18 -.hidden abi_test_clobber_d18 -.align 4 -abi_test_clobber_d18: - fmov d18, xzr - ret -.size abi_test_clobber_d18,.-abi_test_clobber_d18 -.type abi_test_clobber_d19, %function -.globl abi_test_clobber_d19 -.hidden abi_test_clobber_d19 -.align 4 -abi_test_clobber_d19: - fmov d19, xzr - ret -.size abi_test_clobber_d19,.-abi_test_clobber_d19 -.type abi_test_clobber_d20, %function -.globl abi_test_clobber_d20 -.hidden abi_test_clobber_d20 -.align 4 -abi_test_clobber_d20: - fmov d20, xzr - ret -.size abi_test_clobber_d20,.-abi_test_clobber_d20 -.type abi_test_clobber_d21, %function -.globl abi_test_clobber_d21 -.hidden abi_test_clobber_d21 -.align 4 -abi_test_clobber_d21: - fmov d21, xzr - ret -.size abi_test_clobber_d21,.-abi_test_clobber_d21 -.type abi_test_clobber_d22, %function -.globl abi_test_clobber_d22 -.hidden abi_test_clobber_d22 -.align 4 -abi_test_clobber_d22: - fmov d22, xzr - ret -.size abi_test_clobber_d22,.-abi_test_clobber_d22 -.type abi_test_clobber_d23, %function -.globl abi_test_clobber_d23 -.hidden abi_test_clobber_d23 -.align 4 -abi_test_clobber_d23: - fmov d23, xzr - ret -.size abi_test_clobber_d23,.-abi_test_clobber_d23 -.type abi_test_clobber_d24, %function -.globl abi_test_clobber_d24 -.hidden abi_test_clobber_d24 -.align 4 -abi_test_clobber_d24: - fmov d24, xzr - ret -.size abi_test_clobber_d24,.-abi_test_clobber_d24 -.type abi_test_clobber_d25, %function -.globl abi_test_clobber_d25 -.hidden abi_test_clobber_d25 -.align 4 -abi_test_clobber_d25: - fmov d25, xzr - ret -.size abi_test_clobber_d25,.-abi_test_clobber_d25 -.type abi_test_clobber_d26, %function -.globl abi_test_clobber_d26 -.hidden abi_test_clobber_d26 -.align 4 -abi_test_clobber_d26: - fmov d26, xzr - ret -.size abi_test_clobber_d26,.-abi_test_clobber_d26 -.type abi_test_clobber_d27, %function -.globl abi_test_clobber_d27 -.hidden abi_test_clobber_d27 -.align 4 -abi_test_clobber_d27: - fmov d27, xzr - ret -.size abi_test_clobber_d27,.-abi_test_clobber_d27 -.type abi_test_clobber_d28, %function -.globl abi_test_clobber_d28 -.hidden abi_test_clobber_d28 -.align 4 -abi_test_clobber_d28: - fmov d28, xzr - ret -.size abi_test_clobber_d28,.-abi_test_clobber_d28 -.type abi_test_clobber_d29, %function -.globl abi_test_clobber_d29 -.hidden abi_test_clobber_d29 -.align 4 -abi_test_clobber_d29: - fmov d29, xzr - ret -.size abi_test_clobber_d29,.-abi_test_clobber_d29 -.type abi_test_clobber_d30, %function -.globl abi_test_clobber_d30 -.hidden abi_test_clobber_d30 -.align 4 -abi_test_clobber_d30: - fmov d30, xzr - ret -.size abi_test_clobber_d30,.-abi_test_clobber_d30 -.type abi_test_clobber_d31, %function -.globl abi_test_clobber_d31 -.hidden abi_test_clobber_d31 -.align 4 -abi_test_clobber_d31: - fmov d31, xzr - ret -.size abi_test_clobber_d31,.-abi_test_clobber_d31 -.type abi_test_clobber_v8_upper, %function -.globl abi_test_clobber_v8_upper -.hidden abi_test_clobber_v8_upper -.align 4 -abi_test_clobber_v8_upper: - fmov v8.d[1], xzr - ret -.size abi_test_clobber_v8_upper,.-abi_test_clobber_v8_upper -.type abi_test_clobber_v9_upper, %function -.globl abi_test_clobber_v9_upper -.hidden abi_test_clobber_v9_upper -.align 4 -abi_test_clobber_v9_upper: - fmov v9.d[1], xzr - ret -.size abi_test_clobber_v9_upper,.-abi_test_clobber_v9_upper -.type abi_test_clobber_v10_upper, %function -.globl abi_test_clobber_v10_upper -.hidden abi_test_clobber_v10_upper -.align 4 -abi_test_clobber_v10_upper: - fmov v10.d[1], xzr - ret -.size abi_test_clobber_v10_upper,.-abi_test_clobber_v10_upper -.type abi_test_clobber_v11_upper, %function -.globl abi_test_clobber_v11_upper -.hidden abi_test_clobber_v11_upper -.align 4 -abi_test_clobber_v11_upper: - fmov v11.d[1], xzr - ret -.size abi_test_clobber_v11_upper,.-abi_test_clobber_v11_upper -.type abi_test_clobber_v12_upper, %function -.globl abi_test_clobber_v12_upper -.hidden abi_test_clobber_v12_upper -.align 4 -abi_test_clobber_v12_upper: - fmov v12.d[1], xzr - ret -.size abi_test_clobber_v12_upper,.-abi_test_clobber_v12_upper -.type abi_test_clobber_v13_upper, %function -.globl abi_test_clobber_v13_upper -.hidden abi_test_clobber_v13_upper -.align 4 -abi_test_clobber_v13_upper: - fmov v13.d[1], xzr - ret -.size abi_test_clobber_v13_upper,.-abi_test_clobber_v13_upper -.type abi_test_clobber_v14_upper, %function -.globl abi_test_clobber_v14_upper -.hidden abi_test_clobber_v14_upper -.align 4 -abi_test_clobber_v14_upper: - fmov v14.d[1], xzr - ret -.size abi_test_clobber_v14_upper,.-abi_test_clobber_v14_upper -.type abi_test_clobber_v15_upper, %function -.globl abi_test_clobber_v15_upper -.hidden abi_test_clobber_v15_upper -.align 4 -abi_test_clobber_v15_upper: - fmov v15.d[1], xzr - ret -.size abi_test_clobber_v15_upper,.-abi_test_clobber_v15_upper -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-aarch64/crypto/third_party/sike/asm/fp-armv8.S b/packager/third_party/boringssl/linux-aarch64/crypto/third_party/sike/asm/fp-armv8.S deleted file mode 100644 index 63e8d1c381..0000000000 --- a/packager/third_party/boringssl/linux-aarch64/crypto/third_party/sike/asm/fp-armv8.S +++ /dev/null @@ -1,999 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__aarch64__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.section .rodata - -# p434 x 2 -.Lp434x2: -.quad 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF -.quad 0xFB82ECF5C5FFFFFF, 0xF78CB8F062B15D47 -.quad 0xD9F8BFAD038A40AC, 0x0004683E4E2EE688 - -# p434 + 1 -.Lp434p1: -.quad 0xFDC1767AE3000000, 0x7BC65C783158AEA3 -.quad 0x6CFC5FD681C52056, 0x0002341F27177344 - -.text -.globl sike_mpmul -.hidden sike_mpmul -.align 4 -sike_mpmul: - stp x29, x30, [sp,#-96]! - add x29, sp, #0 - stp x19, x20, [sp,#16] - stp x21, x22, [sp,#32] - stp x23, x24, [sp,#48] - stp x25, x26, [sp,#64] - stp x27, x28, [sp,#80] - - ldp x3, x4, [x0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x10, x11, [x1,#0] - ldp x12, x13, [x1,#16] - ldp x14, x15, [x1,#32] - ldr x16, [x1,#48] - - // x3-x7 <- AH + AL, x7 <- carry - adds x3, x3, x7 - adcs x4, x4, x8 - adcs x5, x5, x9 - adcs x6, x6, xzr - adc x7, xzr, xzr - - // x10-x13 <- BH + BL, x8 <- carry - adds x10, x10, x14 - adcs x11, x11, x15 - adcs x12, x12, x16 - adcs x13, x13, xzr - adc x8, xzr, xzr - - // x9 <- combined carry - and x9, x7, x8 - // x7-x8 <- mask - sub x7, xzr, x7 - sub x8, xzr, x8 - - // x15-x19 <- masked (BH + BL) - and x14, x10, x7 - and x15, x11, x7 - and x16, x12, x7 - and x17, x13, x7 - - // x20-x23 <- masked (AH + AL) - and x20, x3, x8 - and x21, x4, x8 - and x22, x5, x8 - and x23, x6, x8 - - // x15-x19, x7 <- masked (AH+AL) + masked (BH+BL), step 1 - adds x14, x14, x20 - adcs x15, x15, x21 - adcs x16, x16, x22 - adcs x17, x17, x23 - adc x7, x9, xzr - - // x8-x9,x19,x20-x24 <- (AH+AL) x (BH+BL), low part - stp x3, x4, [x2,#0] - // A0-A1 <- AH + AL, T0 <- mask - adds x3, x3, x5 - adcs x4, x4, x6 - adc x25, xzr, xzr - - // C6, T1 <- BH + BL, C7 <- mask - adds x23, x10, x12 - adcs x26, x11, x13 - adc x24, xzr, xzr - - // C0-C1 <- masked (BH + BL) - sub x19, xzr, x25 - sub x20, xzr, x24 - and x8, x23, x19 - and x9, x26, x19 - - // C4-C5 <- masked (AH + AL), T0 <- combined carry - and x21, x3, x20 - and x22, x4, x20 - mul x19, x3, x23 - mul x20, x3, x26 - and x25, x25, x24 - - // C0-C1, T0 <- (AH+AL) x (BH+BL), part 1 - adds x8, x21, x8 - umulh x21, x3, x26 - adcs x9, x22, x9 - umulh x22, x3, x23 - adc x25, x25, xzr - - // C2-C5 <- (AH+AL) x (BH+BL), low part - mul x3, x4, x23 - umulh x23, x4, x23 - adds x20, x20, x22 - adc x21, x21, xzr - - mul x24, x4, x26 - umulh x26, x4, x26 - adds x20, x20, x3 - adcs x21, x21, x23 - adc x22, xzr, xzr - - adds x21, x21, x24 - adc x22, x22, x26 - - ldp x3, x4, [x2,#0] - - // C2-C5, T0 <- (AH+AL) x (BH+BL), final part - adds x21, x8, x21 - umulh x24, x3, x10 - umulh x26, x3, x11 - adcs x22, x9, x22 - mul x8, x3, x10 - mul x9, x3, x11 - adc x25, x25, xzr - - // C0-C1, T1, C7 <- AL x BL - mul x3, x4, x10 - umulh x10, x4, x10 - adds x9, x9, x24 - adc x26, x26, xzr - - mul x23, x4, x11 - umulh x11, x4, x11 - adds x9, x9, x3 - adcs x26, x26, x10 - adc x24, xzr, xzr - - adds x26, x26, x23 - adc x24, x24, x11 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - mul x3, x5, x12 - umulh x10, x5, x12 - subs x19, x19, x8 - sbcs x20, x20, x9 - sbcs x21, x21, x26 - mul x4, x5, x13 - umulh x23, x5, x13 - sbcs x22, x22, x24 - sbc x25, x25, xzr - - // A0, A1, C6, B0 <- AH x BH - mul x5, x6, x12 - umulh x12, x6, x12 - adds x4, x4, x10 - adc x23, x23, xzr - - mul x11, x6, x13 - umulh x13, x6, x13 - adds x4, x4, x5 - adcs x23, x23, x12 - adc x10, xzr, xzr - - adds x23, x23, x11 - adc x10, x10, x13 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH - subs x19, x19, x3 - sbcs x20, x20, x4 - sbcs x21, x21, x23 - sbcs x22, x22, x10 - sbc x25, x25, xzr - - adds x19, x19, x26 - adcs x20, x20, x24 - adcs x21, x21, x3 - adcs x22, x22, x4 - adcs x23, x25, x23 - adc x24, x10, xzr - - - // x15-x19, x7 <- (AH+AL) x (BH+BL), final step - adds x14, x14, x21 - adcs x15, x15, x22 - adcs x16, x16, x23 - adcs x17, x17, x24 - adc x7, x7, xzr - - // Load AL - ldp x3, x4, [x0] - ldp x5, x6, [x0,#16] - // Load BL - ldp x10, x11, [x1,#0] - ldp x12, x13, [x1,#16] - - // Temporarily store x8 in x2 - stp x8, x9, [x2,#0] - // x21-x28 <- AL x BL - // A0-A1 <- AH + AL, T0 <- mask - adds x3, x3, x5 - adcs x4, x4, x6 - adc x8, xzr, xzr - - // C6, T1 <- BH + BL, C7 <- mask - adds x27, x10, x12 - adcs x9, x11, x13 - adc x28, xzr, xzr - - // C0-C1 <- masked (BH + BL) - sub x23, xzr, x8 - sub x24, xzr, x28 - and x21, x27, x23 - and x22, x9, x23 - - // C4-C5 <- masked (AH + AL), T0 <- combined carry - and x25, x3, x24 - and x26, x4, x24 - mul x23, x3, x27 - mul x24, x3, x9 - and x8, x8, x28 - - // C0-C1, T0 <- (AH+AL) x (BH+BL), part 1 - adds x21, x25, x21 - umulh x25, x3, x9 - adcs x22, x26, x22 - umulh x26, x3, x27 - adc x8, x8, xzr - - // C2-C5 <- (AH+AL) x (BH+BL), low part - mul x3, x4, x27 - umulh x27, x4, x27 - adds x24, x24, x26 - adc x25, x25, xzr - - mul x28, x4, x9 - umulh x9, x4, x9 - adds x24, x24, x3 - adcs x25, x25, x27 - adc x26, xzr, xzr - - adds x25, x25, x28 - adc x26, x26, x9 - - ldp x3, x4, [x0,#0] - - // C2-C5, T0 <- (AH+AL) x (BH+BL), final part - adds x25, x21, x25 - umulh x28, x3, x10 - umulh x9, x3, x11 - adcs x26, x22, x26 - mul x21, x3, x10 - mul x22, x3, x11 - adc x8, x8, xzr - - // C0-C1, T1, C7 <- AL x BL - mul x3, x4, x10 - umulh x10, x4, x10 - adds x22, x22, x28 - adc x9, x9, xzr - - mul x27, x4, x11 - umulh x11, x4, x11 - adds x22, x22, x3 - adcs x9, x9, x10 - adc x28, xzr, xzr - - adds x9, x9, x27 - adc x28, x28, x11 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - mul x3, x5, x12 - umulh x10, x5, x12 - subs x23, x23, x21 - sbcs x24, x24, x22 - sbcs x25, x25, x9 - mul x4, x5, x13 - umulh x27, x5, x13 - sbcs x26, x26, x28 - sbc x8, x8, xzr - - // A0, A1, C6, B0 <- AH x BH - mul x5, x6, x12 - umulh x12, x6, x12 - adds x4, x4, x10 - adc x27, x27, xzr - - mul x11, x6, x13 - umulh x13, x6, x13 - adds x4, x4, x5 - adcs x27, x27, x12 - adc x10, xzr, xzr - - adds x27, x27, x11 - adc x10, x10, x13 - - - // C2-C5, T0 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH - subs x23, x23, x3 - sbcs x24, x24, x4 - sbcs x25, x25, x27 - sbcs x26, x26, x10 - sbc x8, x8, xzr - - adds x23, x23, x9 - adcs x24, x24, x28 - adcs x25, x25, x3 - adcs x26, x26, x4 - adcs x27, x8, x27 - adc x28, x10, xzr - - // Restore x8 - ldp x8, x9, [x2,#0] - - // x8-x10,x20,x15-x17,x19 <- maskd (AH+AL) x (BH+BL) - ALxBL - subs x8, x8, x21 - sbcs x9, x9, x22 - sbcs x19, x19, x23 - sbcs x20, x20, x24 - sbcs x14, x14, x25 - sbcs x15, x15, x26 - sbcs x16, x16, x27 - sbcs x17, x17, x28 - sbc x7, x7, xzr - - // Store ALxBL, low - stp x21, x22, [x2] - stp x23, x24, [x2,#16] - - // Load AH - ldp x3, x4, [x0,#32] - ldr x5, [x0,#48] - // Load BH - ldp x10, x11, [x1,#32] - ldr x12, [x1,#48] - - adds x8, x8, x25 - adcs x9, x9, x26 - adcs x19, x19, x27 - adcs x20, x20, x28 - adc x1, xzr, xzr - - add x0, x0, #32 - // Temporarily store x8,x9 in x2 - stp x8,x9, [x2,#32] - // x21-x28 <- AH x BH - - // A0 * B0 - mul x21, x3, x10 // C0 - umulh x24, x3, x10 - - // A0 * B1 - mul x22, x3, x11 - umulh x23, x3, x11 - - // A1 * B0 - mul x8, x4, x10 - umulh x9, x4, x10 - adds x22, x22, x24 - adc x23, x23, xzr - - // A0 * B2 - mul x27, x3, x12 - umulh x28, x3, x12 - adds x22, x22, x8 // C1 - adcs x23, x23, x9 - adc x24, xzr, xzr - - // A2 * B0 - mul x8, x5, x10 - umulh x25, x5, x10 - adds x23, x23, x27 - adcs x24, x24, x25 - adc x25, xzr, xzr - - // A1 * B1 - mul x27, x4, x11 - umulh x9, x4, x11 - adds x23, x23, x8 - adcs x24, x24, x28 - adc x25, x25, xzr - - // A1 * B2 - mul x8, x4, x12 - umulh x28, x4, x12 - adds x23, x23, x27 // C2 - adcs x24, x24, x9 - adc x25, x25, xzr - - // A2 * B1 - mul x27, x5, x11 - umulh x9, x5, x11 - adds x24, x24, x8 - adcs x25, x25, x28 - adc x26, xzr, xzr - - // A2 * B2 - mul x8, x5, x12 - umulh x28, x5, x12 - adds x24, x24, x27 // C3 - adcs x25, x25, x9 - adc x26, x26, xzr - - adds x25, x25, x8 // C4 - adc x26, x26, x28 // C5 - - // Restore x8,x9 - ldp x8,x9, [x2,#32] - - neg x1, x1 - - // x8-x9,x19,x20,x14-x17 <- (AH+AL) x (BH+BL) - ALxBL - AHxBH - subs x8, x8, x21 - sbcs x9, x9, x22 - sbcs x19, x19, x23 - sbcs x20, x20, x24 - sbcs x14, x14, x25 - sbcs x15, x15, x26 - sbcs x16, x16, xzr - sbcs x17, x17, xzr - sbc x7, x7, xzr - - // Store (AH+AL) x (BH+BL) - ALxBL - AHxBH, low - stp x8, x9, [x2,#32] - stp x19, x20, [x2,#48] - - adds x1, x1, #1 - adcs x14, x14, x21 - adcs x15, x15, x22 - adcs x16, x16, x23 - adcs x17, x17, x24 - adcs x25, x7, x25 - adc x26, x26, xzr - - stp x14, x15, [x2,#64] - stp x16, x17, [x2,#80] - stp x25, x26, [x2,#96] - - ldp x19, x20, [x29,#16] - ldp x21, x22, [x29,#32] - ldp x23, x24, [x29,#48] - ldp x25, x26, [x29,#64] - ldp x27, x28, [x29,#80] - ldp x29, x30, [sp],#96 - ret -.globl sike_fprdc -.hidden sike_fprdc -.align 4 -sike_fprdc: - stp x29, x30, [sp, #-96]! - add x29, sp, xzr - stp x19, x20, [sp,#16] - stp x21, x22, [sp,#32] - stp x23, x24, [sp,#48] - stp x25, x26, [sp,#64] - stp x27, x28, [sp,#80] - - ldp x2, x3, [x0,#0] // a[0-1] - - // Load the prime constant - adrp x26, .Lp434p1 - add x26, x26, :lo12:.Lp434p1 - ldp x23, x24, [x26, #0x0] - ldp x25, x26, [x26,#0x10] - - // a[0-1] * p434+1 - mul x4, x2, x23 // C0 - umulh x7, x2, x23 - - mul x5, x2, x24 - umulh x6, x2, x24 - - mul x10, x3, x23 - umulh x11, x3, x23 - adds x5, x5, x7 - adc x6, x6, xzr - - mul x27, x2, x25 - umulh x28, x2, x25 - adds x5, x5, x10 // C1 - adcs x6, x6, x11 - adc x7, xzr, xzr - - mul x10, x3, x24 - umulh x11, x3, x24 - adds x6, x6, x27 - adcs x7, x7, x28 - adc x8, xzr, xzr - - mul x27, x2, x26 - umulh x28, x2, x26 - adds x6, x6, x10 // C2 - adcs x7, x7, x11 - adc x8, x8, xzr - - mul x10, x3, x25 - umulh x11, x3, x25 - adds x7, x7, x27 - adcs x8, x8, x28 - adc x9, xzr, xzr - - mul x27, x3, x26 - umulh x28, x3, x26 - adds x7, x7, x10 // C3 - adcs x8, x8, x11 - adc x9, x9, xzr - adds x8, x8, x27 // C4 - adc x9, x9, x28 // C5 - - - - ldp x10, x11, [x0, #0x18] - ldp x12, x13, [x0, #0x28] - ldp x14, x15, [x0, #0x38] - ldp x16, x17, [x0, #0x48] - ldp x19, x20, [x0, #0x58] - ldr x21, [x0, #0x68] - - adds x10, x10, x4 - adcs x11, x11, x5 - adcs x12, x12, x6 - adcs x13, x13, x7 - adcs x14, x14, x8 - adcs x15, x15, x9 - adcs x22, x16, xzr - adcs x17, x17, xzr - adcs x19, x19, xzr - adcs x20, x20, xzr - adc x21, x21, xzr - - ldr x2, [x0,#0x10] // a[2] - // a[2-3] * p434+1 - mul x4, x2, x23 // C0 - umulh x7, x2, x23 - - mul x5, x2, x24 - umulh x6, x2, x24 - - mul x0, x10, x23 - umulh x3, x10, x23 - adds x5, x5, x7 - adc x6, x6, xzr - - mul x27, x2, x25 - umulh x28, x2, x25 - adds x5, x5, x0 // C1 - adcs x6, x6, x3 - adc x7, xzr, xzr - - mul x0, x10, x24 - umulh x3, x10, x24 - adds x6, x6, x27 - adcs x7, x7, x28 - adc x8, xzr, xzr - - mul x27, x2, x26 - umulh x28, x2, x26 - adds x6, x6, x0 // C2 - adcs x7, x7, x3 - adc x8, x8, xzr - - mul x0, x10, x25 - umulh x3, x10, x25 - adds x7, x7, x27 - adcs x8, x8, x28 - adc x9, xzr, xzr - - mul x27, x10, x26 - umulh x28, x10, x26 - adds x7, x7, x0 // C3 - adcs x8, x8, x3 - adc x9, x9, xzr - adds x8, x8, x27 // C4 - adc x9, x9, x28 // C5 - - - - adds x12, x12, x4 - adcs x13, x13, x5 - adcs x14, x14, x6 - adcs x15, x15, x7 - adcs x16, x22, x8 - adcs x17, x17, x9 - adcs x22, x19, xzr - adcs x20, x20, xzr - adc x21, x21, xzr - - mul x4, x11, x23 // C0 - umulh x7, x11, x23 - - mul x5, x11, x24 - umulh x6, x11, x24 - - mul x10, x12, x23 - umulh x3, x12, x23 - adds x5, x5, x7 - adc x6, x6, xzr - - mul x27, x11, x25 - umulh x28, x11, x25 - adds x5, x5, x10 // C1 - adcs x6, x6, x3 - adc x7, xzr, xzr - - mul x10, x12, x24 - umulh x3, x12, x24 - adds x6, x6, x27 - adcs x7, x7, x28 - adc x8, xzr, xzr - - mul x27, x11, x26 - umulh x28, x11, x26 - adds x6, x6, x10 // C2 - adcs x7, x7, x3 - adc x8, x8, xzr - - mul x10, x12, x25 - umulh x3, x12, x25 - adds x7, x7, x27 - adcs x8, x8, x28 - adc x9, xzr, xzr - - mul x27, x12, x26 - umulh x28, x12, x26 - adds x7, x7, x10 // C3 - adcs x8, x8, x3 - adc x9, x9, xzr - adds x8, x8, x27 // C4 - adc x9, x9, x28 // C5 - - - adds x14, x14, x4 - adcs x15, x15, x5 - adcs x16, x16, x6 - adcs x17, x17, x7 - adcs x19, x22, x8 - adcs x20, x20, x9 - adc x22, x21, xzr - - stp x14, x15, [x1, #0x0] // C0, C1 - - mul x4, x13, x23 // C0 - umulh x10, x13, x23 - - mul x5, x13, x24 - umulh x27, x13, x24 - adds x5, x5, x10 // C1 - adc x10, xzr, xzr - - mul x6, x13, x25 - umulh x28, x13, x25 - adds x27, x10, x27 - adcs x6, x6, x27 // C2 - adc x10, xzr, xzr - - mul x7, x13, x26 - umulh x8, x13, x26 - adds x28, x10, x28 - adcs x7, x7, x28 // C3 - adc x8, x8, xzr // C4 - - adds x16, x16, x4 - adcs x17, x17, x5 - adcs x19, x19, x6 - adcs x20, x20, x7 - adc x21, x22, x8 - - str x16, [x1, #0x10] - stp x17, x19, [x1, #0x18] - stp x20, x21, [x1, #0x28] - - ldp x19, x20, [x29,#16] - ldp x21, x22, [x29,#32] - ldp x23, x24, [x29,#48] - ldp x25, x26, [x29,#64] - ldp x27, x28, [x29,#80] - ldp x29, x30, [sp],#96 - ret -.globl sike_fpadd -.hidden sike_fpadd -.align 4 -sike_fpadd: - stp x29,x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - ldr x17, [x1,#48] - - // Add a + b - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x13 - adcs x6, x6, x14 - adcs x7, x7, x15 - adcs x8, x8, x16 - adc x9, x9, x17 - - // Subtract 2xp434 - adrp x17, .Lp434x2 - add x17, x17, :lo12:.Lp434x2 - ldp x11, x12, [x17, #0] - ldp x13, x14, [x17, #16] - ldp x15, x16, [x17, #32] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x12 - sbcs x6, x6, x13 - sbcs x7, x7, x14 - sbcs x8, x8, x15 - sbcs x9, x9, x16 - sbc x0, xzr, xzr // x0 can be reused now - - // Add 2xp434 anded with the mask in x0 - and x11, x11, x0 - and x12, x12, x0 - and x13, x13, x0 - and x14, x14, x0 - and x15, x15, x0 - and x16, x16, x0 - - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x12 - adcs x6, x6, x13 - adcs x7, x7, x14 - adcs x8, x8, x15 - adc x9, x9, x16 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - str x9, [x2,#48] - - ldp x29, x30, [sp],#16 - ret -.globl sike_fpsub -.hidden sike_fpsub -.align 4 -sike_fpsub: - stp x29, x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - ldr x17, [x1,#48] - - // Subtract a - b - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - sbcs x9, x9, x17 - sbc x0, xzr, xzr - - // Add 2xp434 anded with the mask in x0 - adrp x17, .Lp434x2 - add x17, x17, :lo12:.Lp434x2 - - // First half - ldp x11, x12, [x17, #0] - ldp x13, x14, [x17, #16] - ldp x15, x16, [x17, #32] - - // Add 2xp434 anded with the mask in x0 - and x11, x11, x0 - and x12, x12, x0 - and x13, x13, x0 - and x14, x14, x0 - and x15, x15, x0 - and x16, x16, x0 - - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x12 - adcs x6, x6, x13 - adcs x7, x7, x14 - adcs x8, x8, x15 - adc x9, x9, x16 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - str x9, [x2,#48] - - ldp x29, x30, [sp],#16 - ret -.globl sike_mpadd_asm -.hidden sike_mpadd_asm -.align 4 -sike_mpadd_asm: - stp x29, x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x7, x8, [x0,#32] - ldr x9, [x0,#48] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - ldr x17, [x1,#48] - - adds x3, x3, x11 - adcs x4, x4, x12 - adcs x5, x5, x13 - adcs x6, x6, x14 - adcs x7, x7, x15 - adcs x8, x8, x16 - adc x9, x9, x17 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - str x9, [x2,#48] - - ldp x29, x30, [sp],#16 - ret -.globl sike_mpsubx2_asm -.hidden sike_mpsubx2_asm -.align 4 -sike_mpsubx2_asm: - stp x29, x30, [sp,#-16]! - add x29, sp, #0 - - ldp x3, x4, [x0,#0] - ldp x5, x6, [x0,#16] - ldp x11, x12, [x1,#0] - ldp x13, x14, [x1,#16] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - ldp x7, x8, [x0,#32] - ldp x9, x10, [x0,#48] - ldp x11, x12, [x1,#32] - ldp x13, x14, [x1,#48] - sbcs x7, x7, x11 - sbcs x8, x8, x12 - sbcs x9, x9, x13 - sbcs x10, x10, x14 - - stp x3, x4, [x2,#0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - stp x9, x10, [x2,#48] - - ldp x3, x4, [x0,#64] - ldp x5, x6, [x0,#80] - ldp x11, x12, [x1,#64] - ldp x13, x14, [x1,#80] - sbcs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - ldp x7, x8, [x0,#96] - ldp x11, x12, [x1,#96] - sbcs x7, x7, x11 - sbcs x8, x8, x12 - sbc x0, xzr, xzr - - stp x3, x4, [x2,#64] - stp x5, x6, [x2,#80] - stp x7, x8, [x2,#96] - - ldp x29, x30, [sp],#16 - ret -.globl sike_mpdblsubx2_asm -.hidden sike_mpdblsubx2_asm -.align 4 -sike_mpdblsubx2_asm: - stp x29, x30, [sp, #-16]! - add x29, sp, #0 - - ldp x3, x4, [x2, #0] - ldp x5, x6, [x2,#16] - ldp x7, x8, [x2,#32] - - ldp x11, x12, [x0, #0] - ldp x13, x14, [x0,#16] - ldp x15, x16, [x0,#32] - - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - - // x9 stores carry - adc x9, xzr, xzr - - ldp x11, x12, [x1, #0] - ldp x13, x14, [x1,#16] - ldp x15, x16, [x1,#32] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - adc x9, x9, xzr - - stp x3, x4, [x2, #0] - stp x5, x6, [x2,#16] - stp x7, x8, [x2,#32] - - ldp x3, x4, [x2,#48] - ldp x5, x6, [x2,#64] - ldp x7, x8, [x2,#80] - - ldp x11, x12, [x0,#48] - ldp x13, x14, [x0,#64] - ldp x15, x16, [x0,#80] - - // x9 = 2 - x9 - neg x9, x9 - add x9, x9, #2 - - subs x3, x3, x9 - sbcs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - adc x9, xzr, xzr - - ldp x11, x12, [x1,#48] - ldp x13, x14, [x1,#64] - ldp x15, x16, [x1,#80] - subs x3, x3, x11 - sbcs x4, x4, x12 - sbcs x5, x5, x13 - sbcs x6, x6, x14 - sbcs x7, x7, x15 - sbcs x8, x8, x16 - adc x9, x9, xzr - - stp x3, x4, [x2,#48] - stp x5, x6, [x2,#64] - stp x7, x8, [x2,#80] - - ldp x3, x4, [x2,#96] - ldp x11, x12, [x0,#96] - ldp x13, x14, [x1,#96] - - // x9 = 2 - x9 - neg x9, x9 - add x9, x9, #2 - - subs x3, x3, x9 - sbcs x3, x3, x11 - sbcs x4, x4, x12 - subs x3, x3, x13 - sbc x4, x4, x14 - stp x3, x4, [x2,#96] - - ldp x29, x30, [sp],#16 - ret -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/chacha/chacha-armv4.S b/packager/third_party/boringssl/linux-arm/crypto/chacha/chacha-armv4.S deleted file mode 100644 index 363aeee5f5..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/chacha/chacha-armv4.S +++ /dev/null @@ -1,1493 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. -.arch armv7-a - -.text -#if defined(__thumb2__) || defined(__clang__) -.syntax unified -#endif -#if defined(__thumb2__) -.thumb -#else -.code 32 -#endif - -#if defined(__thumb2__) || defined(__clang__) -#define ldrhsb ldrbhs -#endif - -.align 5 -.Lsigma: -.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral -.Lone: -.long 1,0,0,0 -#if __ARM_MAX_ARCH__>=7 -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-.LChaCha20_ctr32 -#else -.word -1 -#endif - -.globl ChaCha20_ctr32 -.hidden ChaCha20_ctr32 -.type ChaCha20_ctr32,%function -.align 5 -ChaCha20_ctr32: -.LChaCha20_ctr32: - ldr r12,[sp,#0] @ pull pointer to counter and nonce - stmdb sp!,{r0,r1,r2,r4-r11,lr} -#if __ARM_ARCH__<7 && !defined(__thumb2__) - sub r14,pc,#16 @ ChaCha20_ctr32 -#else - adr r14,.LChaCha20_ctr32 -#endif - cmp r2,#0 @ len==0? -#ifdef __thumb2__ - itt eq -#endif - addeq sp,sp,#4*3 - beq .Lno_data -#if __ARM_MAX_ARCH__>=7 - cmp r2,#192 @ test len - bls .Lshort - ldr r4,[r14,#-32] - ldr r4,[r14,r4] -# ifdef __APPLE__ - ldr r4,[r4] -# endif - tst r4,#ARMV7_NEON - bne .LChaCha20_neon -.Lshort: -#endif - ldmia r12,{r4,r5,r6,r7} @ load counter and nonce - sub sp,sp,#4*(16) @ off-load area - sub r14,r14,#64 @ .Lsigma - stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce - ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key - ldmia r14,{r0,r1,r2,r3} @ load sigma - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key - stmdb sp!,{r0,r1,r2,r3} @ copy sigma - str r10,[sp,#4*(16+10)] @ off-load "rx" - str r11,[sp,#4*(16+11)] @ off-load "rx" - b .Loop_outer_enter - -.align 4 -.Loop_outer: - ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material - str r11,[sp,#4*(32+2)] @ save len - str r12, [sp,#4*(32+1)] @ save inp - str r14, [sp,#4*(32+0)] @ save out -.Loop_outer_enter: - ldr r11, [sp,#4*(15)] - ldr r12,[sp,#4*(12)] @ modulo-scheduled load - ldr r10, [sp,#4*(13)] - ldr r14,[sp,#4*(14)] - str r11, [sp,#4*(16+15)] - mov r11,#10 - b .Loop - -.align 4 -.Loop: - subs r11,r11,#1 - add r0,r0,r4 - mov r12,r12,ror#16 - add r1,r1,r5 - mov r10,r10,ror#16 - eor r12,r12,r0,ror#16 - eor r10,r10,r1,ror#16 - add r8,r8,r12 - mov r4,r4,ror#20 - add r9,r9,r10 - mov r5,r5,ror#20 - eor r4,r4,r8,ror#20 - eor r5,r5,r9,ror#20 - add r0,r0,r4 - mov r12,r12,ror#24 - add r1,r1,r5 - mov r10,r10,ror#24 - eor r12,r12,r0,ror#24 - eor r10,r10,r1,ror#24 - add r8,r8,r12 - mov r4,r4,ror#25 - add r9,r9,r10 - mov r5,r5,ror#25 - str r10,[sp,#4*(16+13)] - ldr r10,[sp,#4*(16+15)] - eor r4,r4,r8,ror#25 - eor r5,r5,r9,ror#25 - str r8,[sp,#4*(16+8)] - ldr r8,[sp,#4*(16+10)] - add r2,r2,r6 - mov r14,r14,ror#16 - str r9,[sp,#4*(16+9)] - ldr r9,[sp,#4*(16+11)] - add r3,r3,r7 - mov r10,r10,ror#16 - eor r14,r14,r2,ror#16 - eor r10,r10,r3,ror#16 - add r8,r8,r14 - mov r6,r6,ror#20 - add r9,r9,r10 - mov r7,r7,ror#20 - eor r6,r6,r8,ror#20 - eor r7,r7,r9,ror#20 - add r2,r2,r6 - mov r14,r14,ror#24 - add r3,r3,r7 - mov r10,r10,ror#24 - eor r14,r14,r2,ror#24 - eor r10,r10,r3,ror#24 - add r8,r8,r14 - mov r6,r6,ror#25 - add r9,r9,r10 - mov r7,r7,ror#25 - eor r6,r6,r8,ror#25 - eor r7,r7,r9,ror#25 - add r0,r0,r5 - mov r10,r10,ror#16 - add r1,r1,r6 - mov r12,r12,ror#16 - eor r10,r10,r0,ror#16 - eor r12,r12,r1,ror#16 - add r8,r8,r10 - mov r5,r5,ror#20 - add r9,r9,r12 - mov r6,r6,ror#20 - eor r5,r5,r8,ror#20 - eor r6,r6,r9,ror#20 - add r0,r0,r5 - mov r10,r10,ror#24 - add r1,r1,r6 - mov r12,r12,ror#24 - eor r10,r10,r0,ror#24 - eor r12,r12,r1,ror#24 - add r8,r8,r10 - mov r5,r5,ror#25 - str r10,[sp,#4*(16+15)] - ldr r10,[sp,#4*(16+13)] - add r9,r9,r12 - mov r6,r6,ror#25 - eor r5,r5,r8,ror#25 - eor r6,r6,r9,ror#25 - str r8,[sp,#4*(16+10)] - ldr r8,[sp,#4*(16+8)] - add r2,r2,r7 - mov r10,r10,ror#16 - str r9,[sp,#4*(16+11)] - ldr r9,[sp,#4*(16+9)] - add r3,r3,r4 - mov r14,r14,ror#16 - eor r10,r10,r2,ror#16 - eor r14,r14,r3,ror#16 - add r8,r8,r10 - mov r7,r7,ror#20 - add r9,r9,r14 - mov r4,r4,ror#20 - eor r7,r7,r8,ror#20 - eor r4,r4,r9,ror#20 - add r2,r2,r7 - mov r10,r10,ror#24 - add r3,r3,r4 - mov r14,r14,ror#24 - eor r10,r10,r2,ror#24 - eor r14,r14,r3,ror#24 - add r8,r8,r10 - mov r7,r7,ror#25 - add r9,r9,r14 - mov r4,r4,ror#25 - eor r7,r7,r8,ror#25 - eor r4,r4,r9,ror#25 - bne .Loop - - ldr r11,[sp,#4*(32+2)] @ load len - - str r8, [sp,#4*(16+8)] @ modulo-scheduled store - str r9, [sp,#4*(16+9)] - str r12,[sp,#4*(16+12)] - str r10, [sp,#4*(16+13)] - str r14,[sp,#4*(16+14)] - - @ at this point we have first half of 512-bit result in - @ rx and second half at sp+4*(16+8) - - cmp r11,#64 @ done yet? -#ifdef __thumb2__ - itete lo -#endif - addlo r12,sp,#4*(0) @ shortcut or ... - ldrhs r12,[sp,#4*(32+1)] @ ... load inp - addlo r14,sp,#4*(0) @ shortcut or ... - ldrhs r14,[sp,#4*(32+0)] @ ... load out - - ldr r8,[sp,#4*(0)] @ load key material - ldr r9,[sp,#4*(1)] - -#if __ARM_ARCH__>=6 || !defined(__ARMEB__) -# if __ARM_ARCH__<7 - orr r10,r12,r14 - tst r10,#3 @ are input and output aligned? - ldr r10,[sp,#4*(2)] - bne .Lunaligned - cmp r11,#64 @ restore flags -# else - ldr r10,[sp,#4*(2)] -# endif - ldr r11,[sp,#4*(3)] - - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] - - add r2,r2,r10 - add r3,r3,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r0,r0,r8 @ xor with input - eorhs r1,r1,r9 - add r8,sp,#4*(4) - str r0,[r14],#16 @ store output -# ifdef __thumb2__ - itt hs -# endif - eorhs r2,r2,r10 - eorhs r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r1,[r14,#-12] - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - add r5,r5,r9 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] - add r6,r6,r10 - add r7,r7,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r4,r4,r8 - eorhs r5,r5,r9 - add r8,sp,#4*(8) - str r4,[r14],#16 @ store output -# ifdef __thumb2__ - itt hs -# endif - eorhs r6,r6,r10 - eorhs r7,r7,r11 - str r5,[r14,#-12] - ldmia r8,{r8,r9,r10,r11} @ load key material - str r6,[r14,#-8] - add r0,sp,#4*(16+8) - str r7,[r14,#-4] - - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half - - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] -# ifdef __thumb2__ - itt hi -# endif - strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it - strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it - add r2,r2,r10 - add r3,r3,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r0,r0,r8 - eorhs r1,r1,r9 - add r8,sp,#4*(12) - str r0,[r14],#16 @ store output -# ifdef __thumb2__ - itt hs -# endif - eorhs r2,r2,r10 - eorhs r3,r3,r11 - str r1,[r14,#-12] - ldmia r8,{r8,r9,r10,r11} @ load key material - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - add r5,r5,r9 -# ifdef __thumb2__ - itt hi -# endif - addhi r8,r8,#1 @ next counter value - strhi r8,[sp,#4*(12)] @ save next counter value -# ifdef __thumb2__ - itt hs -# endif - ldrhs r8,[r12],#16 @ load input - ldrhs r9,[r12,#-12] - add r6,r6,r10 - add r7,r7,r11 -# ifdef __thumb2__ - itt hs -# endif - ldrhs r10,[r12,#-8] - ldrhs r11,[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif -# ifdef __thumb2__ - itt hs -# endif - eorhs r4,r4,r8 - eorhs r5,r5,r9 -# ifdef __thumb2__ - it ne -# endif - ldrne r8,[sp,#4*(32+2)] @ re-load len -# ifdef __thumb2__ - itt hs -# endif - eorhs r6,r6,r10 - eorhs r7,r7,r11 - str r4,[r14],#16 @ store output - str r5,[r14,#-12] -# ifdef __thumb2__ - it hs -# endif - subhs r11,r8,#64 @ len-=64 - str r6,[r14,#-8] - str r7,[r14,#-4] - bhi .Loop_outer - - beq .Ldone -# if __ARM_ARCH__<7 - b .Ltail - -.align 4 -.Lunaligned:@ unaligned endian-neutral path - cmp r11,#64 @ restore flags -# endif -#endif -#if __ARM_ARCH__<7 - ldr r11,[sp,#4*(3)] - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 - add r2,r2,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r3,r3,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r0,r8,r0 @ xor with input (or zero) - eor r1,r9,r1 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r2,r10,r2 - strb r0,[r14],#16 @ store output - eor r3,r11,r3 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r1,[r14,#-12] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-8] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r3,[r14,#-4] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-15] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r1,[r14,#-11] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-7] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r3,[r14,#-3] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-14] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r1,[r14,#-10] - strb r2,[r14,#-6] - eor r0,r8,r0,lsr#8 - strb r3,[r14,#-2] - eor r1,r9,r1,lsr#8 - strb r0,[r14,#-13] - eor r2,r10,r2,lsr#8 - strb r1,[r14,#-9] - eor r3,r11,r3,lsr#8 - strb r2,[r14,#-5] - strb r3,[r14,#-1] - add r8,sp,#4*(4+0) - ldmia r8,{r8,r9,r10,r11} @ load key material - add r0,sp,#4*(16+8) - add r4,r4,r8 @ accumulate key material - add r5,r5,r9 - add r6,r6,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r7,r7,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r4,r8,r4 @ xor with input (or zero) - eor r5,r9,r5 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r6,r10,r6 - strb r4,[r14],#16 @ store output - eor r7,r11,r7 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r5,[r14,#-12] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-8] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r7,[r14,#-4] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-15] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r5,[r14,#-11] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-7] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r7,[r14,#-3] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-14] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r5,[r14,#-10] - strb r6,[r14,#-6] - eor r4,r8,r4,lsr#8 - strb r7,[r14,#-2] - eor r5,r9,r5,lsr#8 - strb r4,[r14,#-13] - eor r6,r10,r6,lsr#8 - strb r5,[r14,#-9] - eor r7,r11,r7,lsr#8 - strb r6,[r14,#-5] - strb r7,[r14,#-1] - add r8,sp,#4*(4+4) - ldmia r8,{r8,r9,r10,r11} @ load key material - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half -# ifdef __thumb2__ - itt hi -# endif - strhi r10,[sp,#4*(16+10)] @ copy "rx" - strhi r11,[sp,#4*(16+11)] @ copy "rx" - add r0,r0,r8 @ accumulate key material - add r1,r1,r9 - add r2,r2,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r3,r3,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r0,r8,r0 @ xor with input (or zero) - eor r1,r9,r1 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r2,r10,r2 - strb r0,[r14],#16 @ store output - eor r3,r11,r3 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r1,[r14,#-12] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-8] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r3,[r14,#-4] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-15] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r1,[r14,#-11] - eor r0,r8,r0,lsr#8 - strb r2,[r14,#-7] - eor r1,r9,r1,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r3,[r14,#-3] - eor r2,r10,r2,lsr#8 - strb r0,[r14,#-14] - eor r3,r11,r3,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r1,[r14,#-10] - strb r2,[r14,#-6] - eor r0,r8,r0,lsr#8 - strb r3,[r14,#-2] - eor r1,r9,r1,lsr#8 - strb r0,[r14,#-13] - eor r2,r10,r2,lsr#8 - strb r1,[r14,#-9] - eor r3,r11,r3,lsr#8 - strb r2,[r14,#-5] - strb r3,[r14,#-1] - add r8,sp,#4*(4+8) - ldmia r8,{r8,r9,r10,r11} @ load key material - add r4,r4,r8 @ accumulate key material -# ifdef __thumb2__ - itt hi -# endif - addhi r8,r8,#1 @ next counter value - strhi r8,[sp,#4*(12)] @ save next counter value - add r5,r5,r9 - add r6,r6,r10 -# ifdef __thumb2__ - itete lo -# endif - eorlo r8,r8,r8 @ zero or ... - ldrhsb r8,[r12],#16 @ ... load input - eorlo r9,r9,r9 - ldrhsb r9,[r12,#-12] - - add r7,r7,r11 -# ifdef __thumb2__ - itete lo -# endif - eorlo r10,r10,r10 - ldrhsb r10,[r12,#-8] - eorlo r11,r11,r11 - ldrhsb r11,[r12,#-4] - - eor r4,r8,r4 @ xor with input (or zero) - eor r5,r9,r5 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-15] @ load more input - ldrhsb r9,[r12,#-11] - eor r6,r10,r6 - strb r4,[r14],#16 @ store output - eor r7,r11,r7 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-7] - ldrhsb r11,[r12,#-3] - strb r5,[r14,#-12] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-8] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-14] @ load more input - ldrhsb r9,[r12,#-10] - strb r7,[r14,#-4] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-15] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-6] - ldrhsb r11,[r12,#-2] - strb r5,[r14,#-11] - eor r4,r8,r4,lsr#8 - strb r6,[r14,#-7] - eor r5,r9,r5,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r8,[r12,#-13] @ load more input - ldrhsb r9,[r12,#-9] - strb r7,[r14,#-3] - eor r6,r10,r6,lsr#8 - strb r4,[r14,#-14] - eor r7,r11,r7,lsr#8 -# ifdef __thumb2__ - itt hs -# endif - ldrhsb r10,[r12,#-5] - ldrhsb r11,[r12,#-1] - strb r5,[r14,#-10] - strb r6,[r14,#-6] - eor r4,r8,r4,lsr#8 - strb r7,[r14,#-2] - eor r5,r9,r5,lsr#8 - strb r4,[r14,#-13] - eor r6,r10,r6,lsr#8 - strb r5,[r14,#-9] - eor r7,r11,r7,lsr#8 - strb r6,[r14,#-5] - strb r7,[r14,#-1] -# ifdef __thumb2__ - it ne -# endif - ldrne r8,[sp,#4*(32+2)] @ re-load len -# ifdef __thumb2__ - it hs -# endif - subhs r11,r8,#64 @ len-=64 - bhi .Loop_outer - - beq .Ldone -#endif - -.Ltail: - ldr r12,[sp,#4*(32+1)] @ load inp - add r9,sp,#4*(0) - ldr r14,[sp,#4*(32+0)] @ load out - -.Loop_tail: - ldrb r10,[r9],#1 @ read buffer on stack - ldrb r11,[r12],#1 @ read input - subs r8,r8,#1 - eor r11,r11,r10 - strb r11,[r14],#1 @ store output - bne .Loop_tail - -.Ldone: - add sp,sp,#4*(32+3) -.Lno_data: - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -.size ChaCha20_ctr32,.-ChaCha20_ctr32 -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.type ChaCha20_neon,%function -.align 5 -ChaCha20_neon: - ldr r12,[sp,#0] @ pull pointer to counter and nonce - stmdb sp!,{r0,r1,r2,r4-r11,lr} -.LChaCha20_neon: - adr r14,.Lsigma - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so - stmdb sp!,{r0,r1,r2,r3} - - vld1.32 {q1,q2},[r3] @ load key - ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key - - sub sp,sp,#4*(16+16) - vld1.32 {q3},[r12] @ load counter and nonce - add r12,sp,#4*8 - ldmia r14,{r0,r1,r2,r3} @ load sigma - vld1.32 {q0},[r14]! @ load sigma - vld1.32 {q12},[r14] @ one - vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce - vst1.32 {q0,q1},[sp] @ copy sigma|1/2key - - str r10,[sp,#4*(16+10)] @ off-load "rx" - str r11,[sp,#4*(16+11)] @ off-load "rx" - vshl.i32 d26,d24,#1 @ two - vstr d24,[sp,#4*(16+0)] - vshl.i32 d28,d24,#2 @ four - vstr d26,[sp,#4*(16+2)] - vmov q4,q0 - vstr d28,[sp,#4*(16+4)] - vmov q8,q0 - vmov q5,q1 - vmov q9,q1 - b .Loop_neon_enter - -.align 4 -.Loop_neon_outer: - ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material - cmp r11,#64*2 @ if len<=64*2 - bls .Lbreak_neon @ switch to integer-only - vmov q4,q0 - str r11,[sp,#4*(32+2)] @ save len - vmov q8,q0 - str r12, [sp,#4*(32+1)] @ save inp - vmov q5,q1 - str r14, [sp,#4*(32+0)] @ save out - vmov q9,q1 -.Loop_neon_enter: - ldr r11, [sp,#4*(15)] - vadd.i32 q7,q3,q12 @ counter+1 - ldr r12,[sp,#4*(12)] @ modulo-scheduled load - vmov q6,q2 - ldr r10, [sp,#4*(13)] - vmov q10,q2 - ldr r14,[sp,#4*(14)] - vadd.i32 q11,q7,q12 @ counter+2 - str r11, [sp,#4*(16+15)] - mov r11,#10 - add r12,r12,#3 @ counter+3 - b .Loop_neon - -.align 4 -.Loop_neon: - subs r11,r11,#1 - vadd.i32 q0,q0,q1 - add r0,r0,r4 - vadd.i32 q4,q4,q5 - mov r12,r12,ror#16 - vadd.i32 q8,q8,q9 - add r1,r1,r5 - veor q3,q3,q0 - mov r10,r10,ror#16 - veor q7,q7,q4 - eor r12,r12,r0,ror#16 - veor q11,q11,q8 - eor r10,r10,r1,ror#16 - vrev32.16 q3,q3 - add r8,r8,r12 - vrev32.16 q7,q7 - mov r4,r4,ror#20 - vrev32.16 q11,q11 - add r9,r9,r10 - vadd.i32 q2,q2,q3 - mov r5,r5,ror#20 - vadd.i32 q6,q6,q7 - eor r4,r4,r8,ror#20 - vadd.i32 q10,q10,q11 - eor r5,r5,r9,ror#20 - veor q12,q1,q2 - add r0,r0,r4 - veor q13,q5,q6 - mov r12,r12,ror#24 - veor q14,q9,q10 - add r1,r1,r5 - vshr.u32 q1,q12,#20 - mov r10,r10,ror#24 - vshr.u32 q5,q13,#20 - eor r12,r12,r0,ror#24 - vshr.u32 q9,q14,#20 - eor r10,r10,r1,ror#24 - vsli.32 q1,q12,#12 - add r8,r8,r12 - vsli.32 q5,q13,#12 - mov r4,r4,ror#25 - vsli.32 q9,q14,#12 - add r9,r9,r10 - vadd.i32 q0,q0,q1 - mov r5,r5,ror#25 - vadd.i32 q4,q4,q5 - str r10,[sp,#4*(16+13)] - vadd.i32 q8,q8,q9 - ldr r10,[sp,#4*(16+15)] - veor q12,q3,q0 - eor r4,r4,r8,ror#25 - veor q13,q7,q4 - eor r5,r5,r9,ror#25 - veor q14,q11,q8 - str r8,[sp,#4*(16+8)] - vshr.u32 q3,q12,#24 - ldr r8,[sp,#4*(16+10)] - vshr.u32 q7,q13,#24 - add r2,r2,r6 - vshr.u32 q11,q14,#24 - mov r14,r14,ror#16 - vsli.32 q3,q12,#8 - str r9,[sp,#4*(16+9)] - vsli.32 q7,q13,#8 - ldr r9,[sp,#4*(16+11)] - vsli.32 q11,q14,#8 - add r3,r3,r7 - vadd.i32 q2,q2,q3 - mov r10,r10,ror#16 - vadd.i32 q6,q6,q7 - eor r14,r14,r2,ror#16 - vadd.i32 q10,q10,q11 - eor r10,r10,r3,ror#16 - veor q12,q1,q2 - add r8,r8,r14 - veor q13,q5,q6 - mov r6,r6,ror#20 - veor q14,q9,q10 - add r9,r9,r10 - vshr.u32 q1,q12,#25 - mov r7,r7,ror#20 - vshr.u32 q5,q13,#25 - eor r6,r6,r8,ror#20 - vshr.u32 q9,q14,#25 - eor r7,r7,r9,ror#20 - vsli.32 q1,q12,#7 - add r2,r2,r6 - vsli.32 q5,q13,#7 - mov r14,r14,ror#24 - vsli.32 q9,q14,#7 - add r3,r3,r7 - vext.8 q2,q2,q2,#8 - mov r10,r10,ror#24 - vext.8 q6,q6,q6,#8 - eor r14,r14,r2,ror#24 - vext.8 q10,q10,q10,#8 - eor r10,r10,r3,ror#24 - vext.8 q1,q1,q1,#4 - add r8,r8,r14 - vext.8 q5,q5,q5,#4 - mov r6,r6,ror#25 - vext.8 q9,q9,q9,#4 - add r9,r9,r10 - vext.8 q3,q3,q3,#12 - mov r7,r7,ror#25 - vext.8 q7,q7,q7,#12 - eor r6,r6,r8,ror#25 - vext.8 q11,q11,q11,#12 - eor r7,r7,r9,ror#25 - vadd.i32 q0,q0,q1 - add r0,r0,r5 - vadd.i32 q4,q4,q5 - mov r10,r10,ror#16 - vadd.i32 q8,q8,q9 - add r1,r1,r6 - veor q3,q3,q0 - mov r12,r12,ror#16 - veor q7,q7,q4 - eor r10,r10,r0,ror#16 - veor q11,q11,q8 - eor r12,r12,r1,ror#16 - vrev32.16 q3,q3 - add r8,r8,r10 - vrev32.16 q7,q7 - mov r5,r5,ror#20 - vrev32.16 q11,q11 - add r9,r9,r12 - vadd.i32 q2,q2,q3 - mov r6,r6,ror#20 - vadd.i32 q6,q6,q7 - eor r5,r5,r8,ror#20 - vadd.i32 q10,q10,q11 - eor r6,r6,r9,ror#20 - veor q12,q1,q2 - add r0,r0,r5 - veor q13,q5,q6 - mov r10,r10,ror#24 - veor q14,q9,q10 - add r1,r1,r6 - vshr.u32 q1,q12,#20 - mov r12,r12,ror#24 - vshr.u32 q5,q13,#20 - eor r10,r10,r0,ror#24 - vshr.u32 q9,q14,#20 - eor r12,r12,r1,ror#24 - vsli.32 q1,q12,#12 - add r8,r8,r10 - vsli.32 q5,q13,#12 - mov r5,r5,ror#25 - vsli.32 q9,q14,#12 - str r10,[sp,#4*(16+15)] - vadd.i32 q0,q0,q1 - ldr r10,[sp,#4*(16+13)] - vadd.i32 q4,q4,q5 - add r9,r9,r12 - vadd.i32 q8,q8,q9 - mov r6,r6,ror#25 - veor q12,q3,q0 - eor r5,r5,r8,ror#25 - veor q13,q7,q4 - eor r6,r6,r9,ror#25 - veor q14,q11,q8 - str r8,[sp,#4*(16+10)] - vshr.u32 q3,q12,#24 - ldr r8,[sp,#4*(16+8)] - vshr.u32 q7,q13,#24 - add r2,r2,r7 - vshr.u32 q11,q14,#24 - mov r10,r10,ror#16 - vsli.32 q3,q12,#8 - str r9,[sp,#4*(16+11)] - vsli.32 q7,q13,#8 - ldr r9,[sp,#4*(16+9)] - vsli.32 q11,q14,#8 - add r3,r3,r4 - vadd.i32 q2,q2,q3 - mov r14,r14,ror#16 - vadd.i32 q6,q6,q7 - eor r10,r10,r2,ror#16 - vadd.i32 q10,q10,q11 - eor r14,r14,r3,ror#16 - veor q12,q1,q2 - add r8,r8,r10 - veor q13,q5,q6 - mov r7,r7,ror#20 - veor q14,q9,q10 - add r9,r9,r14 - vshr.u32 q1,q12,#25 - mov r4,r4,ror#20 - vshr.u32 q5,q13,#25 - eor r7,r7,r8,ror#20 - vshr.u32 q9,q14,#25 - eor r4,r4,r9,ror#20 - vsli.32 q1,q12,#7 - add r2,r2,r7 - vsli.32 q5,q13,#7 - mov r10,r10,ror#24 - vsli.32 q9,q14,#7 - add r3,r3,r4 - vext.8 q2,q2,q2,#8 - mov r14,r14,ror#24 - vext.8 q6,q6,q6,#8 - eor r10,r10,r2,ror#24 - vext.8 q10,q10,q10,#8 - eor r14,r14,r3,ror#24 - vext.8 q1,q1,q1,#12 - add r8,r8,r10 - vext.8 q5,q5,q5,#12 - mov r7,r7,ror#25 - vext.8 q9,q9,q9,#12 - add r9,r9,r14 - vext.8 q3,q3,q3,#4 - mov r4,r4,ror#25 - vext.8 q7,q7,q7,#4 - eor r7,r7,r8,ror#25 - vext.8 q11,q11,q11,#4 - eor r4,r4,r9,ror#25 - bne .Loop_neon - - add r11,sp,#32 - vld1.32 {q12,q13},[sp] @ load key material - vld1.32 {q14,q15},[r11] - - ldr r11,[sp,#4*(32+2)] @ load len - - str r8, [sp,#4*(16+8)] @ modulo-scheduled store - str r9, [sp,#4*(16+9)] - str r12,[sp,#4*(16+12)] - str r10, [sp,#4*(16+13)] - str r14,[sp,#4*(16+14)] - - @ at this point we have first half of 512-bit result in - @ rx and second half at sp+4*(16+8) - - ldr r12,[sp,#4*(32+1)] @ load inp - ldr r14,[sp,#4*(32+0)] @ load out - - vadd.i32 q0,q0,q12 @ accumulate key material - vadd.i32 q4,q4,q12 - vadd.i32 q8,q8,q12 - vldr d24,[sp,#4*(16+0)] @ one - - vadd.i32 q1,q1,q13 - vadd.i32 q5,q5,q13 - vadd.i32 q9,q9,q13 - vldr d26,[sp,#4*(16+2)] @ two - - vadd.i32 q2,q2,q14 - vadd.i32 q6,q6,q14 - vadd.i32 q10,q10,q14 - vadd.i32 d14,d14,d24 @ counter+1 - vadd.i32 d22,d22,d26 @ counter+2 - - vadd.i32 q3,q3,q15 - vadd.i32 q7,q7,q15 - vadd.i32 q11,q11,q15 - - cmp r11,#64*4 - blo .Ltail_neon - - vld1.8 {q12,q13},[r12]! @ load input - mov r11,sp - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 @ xor with input - veor q1,q1,q13 - vld1.8 {q12,q13},[r12]! - veor q2,q2,q14 - veor q3,q3,q15 - vld1.8 {q14,q15},[r12]! - - veor q4,q4,q12 - vst1.8 {q0,q1},[r14]! @ store output - veor q5,q5,q13 - vld1.8 {q12,q13},[r12]! - veor q6,q6,q14 - vst1.8 {q2,q3},[r14]! - veor q7,q7,q15 - vld1.8 {q14,q15},[r12]! - - veor q8,q8,q12 - vld1.32 {q0,q1},[r11]! @ load for next iteration - veor d25,d25,d25 - vldr d24,[sp,#4*(16+4)] @ four - veor q9,q9,q13 - vld1.32 {q2,q3},[r11] - veor q10,q10,q14 - vst1.8 {q4,q5},[r14]! - veor q11,q11,q15 - vst1.8 {q6,q7},[r14]! - - vadd.i32 d6,d6,d24 @ next counter value - vldr d24,[sp,#4*(16+0)] @ one - - ldmia sp,{r8,r9,r10,r11} @ load key material - add r0,r0,r8 @ accumulate key material - ldr r8,[r12],#16 @ load input - vst1.8 {q8,q9},[r14]! - add r1,r1,r9 - ldr r9,[r12,#-12] - vst1.8 {q10,q11},[r14]! - add r2,r2,r10 - ldr r10,[r12,#-8] - add r3,r3,r11 - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif - eor r0,r0,r8 @ xor with input - add r8,sp,#4*(4) - eor r1,r1,r9 - str r0,[r14],#16 @ store output - eor r2,r2,r10 - str r1,[r14,#-12] - eor r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - ldr r8,[r12],#16 @ load input - add r5,r5,r9 - ldr r9,[r12,#-12] - add r6,r6,r10 - ldr r10,[r12,#-8] - add r7,r7,r11 - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - eor r4,r4,r8 - add r8,sp,#4*(8) - eor r5,r5,r9 - str r4,[r14],#16 @ store output - eor r6,r6,r10 - str r5,[r14,#-12] - eor r7,r7,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r6,[r14,#-8] - add r0,sp,#4*(16+8) - str r7,[r14,#-4] - - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half - - add r0,r0,r8 @ accumulate key material - ldr r8,[r12],#16 @ load input - add r1,r1,r9 - ldr r9,[r12,#-12] -# ifdef __thumb2__ - it hi -# endif - strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it - add r2,r2,r10 - ldr r10,[r12,#-8] -# ifdef __thumb2__ - it hi -# endif - strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it - add r3,r3,r11 - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif - eor r0,r0,r8 - add r8,sp,#4*(12) - eor r1,r1,r9 - str r0,[r14],#16 @ store output - eor r2,r2,r10 - str r1,[r14,#-12] - eor r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - str r2,[r14,#-8] - str r3,[r14,#-4] - - add r4,r4,r8 @ accumulate key material - add r8,r8,#4 @ next counter value - add r5,r5,r9 - str r8,[sp,#4*(12)] @ save next counter value - ldr r8,[r12],#16 @ load input - add r6,r6,r10 - add r4,r4,#3 @ counter+3 - ldr r9,[r12,#-12] - add r7,r7,r11 - ldr r10,[r12,#-8] - ldr r11,[r12,#-4] -# ifdef __ARMEB__ - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - eor r4,r4,r8 -# ifdef __thumb2__ - it hi -# endif - ldrhi r8,[sp,#4*(32+2)] @ re-load len - eor r5,r5,r9 - eor r6,r6,r10 - str r4,[r14],#16 @ store output - eor r7,r7,r11 - str r5,[r14,#-12] - sub r11,r8,#64*4 @ len-=64*4 - str r6,[r14,#-8] - str r7,[r14,#-4] - bhi .Loop_neon_outer - - b .Ldone_neon - -.align 4 -.Lbreak_neon: - @ harmonize NEON and integer-only stack frames: load data - @ from NEON frame, but save to integer-only one; distance - @ between the two is 4*(32+4+16-32)=4*(20). - - str r11, [sp,#4*(20+32+2)] @ save len - add r11,sp,#4*(32+4) - str r12, [sp,#4*(20+32+1)] @ save inp - str r14, [sp,#4*(20+32+0)] @ save out - - ldr r12,[sp,#4*(16+10)] - ldr r14,[sp,#4*(16+11)] - vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement - str r12,[sp,#4*(20+16+10)] @ copy "rx" - str r14,[sp,#4*(20+16+11)] @ copy "rx" - - ldr r11, [sp,#4*(15)] - ldr r12,[sp,#4*(12)] @ modulo-scheduled load - ldr r10, [sp,#4*(13)] - ldr r14,[sp,#4*(14)] - str r11, [sp,#4*(20+16+15)] - add r11,sp,#4*(20) - vst1.32 {q0,q1},[r11]! @ copy key - add sp,sp,#4*(20) @ switch frame - vst1.32 {q2,q3},[r11] - mov r11,#10 - b .Loop @ go integer-only - -.align 4 -.Ltail_neon: - cmp r11,#64*3 - bhs .L192_or_more_neon - cmp r11,#64*2 - bhs .L128_or_more_neon - cmp r11,#64*1 - bhs .L64_or_more_neon - - add r8,sp,#4*(8) - vst1.8 {q0,q1},[sp] - add r10,sp,#4*(0) - vst1.8 {q2,q3},[r8] - b .Loop_tail_neon - -.align 4 -.L64_or_more_neon: - vld1.8 {q12,q13},[r12]! - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 - veor q1,q1,q13 - veor q2,q2,q14 - veor q3,q3,q15 - vst1.8 {q0,q1},[r14]! - vst1.8 {q2,q3},[r14]! - - beq .Ldone_neon - - add r8,sp,#4*(8) - vst1.8 {q4,q5},[sp] - add r10,sp,#4*(0) - vst1.8 {q6,q7},[r8] - sub r11,r11,#64*1 @ len-=64*1 - b .Loop_tail_neon - -.align 4 -.L128_or_more_neon: - vld1.8 {q12,q13},[r12]! - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 - veor q1,q1,q13 - vld1.8 {q12,q13},[r12]! - veor q2,q2,q14 - veor q3,q3,q15 - vld1.8 {q14,q15},[r12]! - - veor q4,q4,q12 - veor q5,q5,q13 - vst1.8 {q0,q1},[r14]! - veor q6,q6,q14 - vst1.8 {q2,q3},[r14]! - veor q7,q7,q15 - vst1.8 {q4,q5},[r14]! - vst1.8 {q6,q7},[r14]! - - beq .Ldone_neon - - add r8,sp,#4*(8) - vst1.8 {q8,q9},[sp] - add r10,sp,#4*(0) - vst1.8 {q10,q11},[r8] - sub r11,r11,#64*2 @ len-=64*2 - b .Loop_tail_neon - -.align 4 -.L192_or_more_neon: - vld1.8 {q12,q13},[r12]! - vld1.8 {q14,q15},[r12]! - veor q0,q0,q12 - veor q1,q1,q13 - vld1.8 {q12,q13},[r12]! - veor q2,q2,q14 - veor q3,q3,q15 - vld1.8 {q14,q15},[r12]! - - veor q4,q4,q12 - veor q5,q5,q13 - vld1.8 {q12,q13},[r12]! - veor q6,q6,q14 - vst1.8 {q0,q1},[r14]! - veor q7,q7,q15 - vld1.8 {q14,q15},[r12]! - - veor q8,q8,q12 - vst1.8 {q2,q3},[r14]! - veor q9,q9,q13 - vst1.8 {q4,q5},[r14]! - veor q10,q10,q14 - vst1.8 {q6,q7},[r14]! - veor q11,q11,q15 - vst1.8 {q8,q9},[r14]! - vst1.8 {q10,q11},[r14]! - - beq .Ldone_neon - - ldmia sp,{r8,r9,r10,r11} @ load key material - add r0,r0,r8 @ accumulate key material - add r8,sp,#4*(4) - add r1,r1,r9 - add r2,r2,r10 - add r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - - add r4,r4,r8 @ accumulate key material - add r8,sp,#4*(8) - add r5,r5,r9 - add r6,r6,r10 - add r7,r7,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7} - add r0,sp,#4*(16+8) - - ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half - - add r0,r0,r8 @ accumulate key material - add r8,sp,#4*(12) - add r1,r1,r9 - add r2,r2,r10 - add r3,r3,r11 - ldmia r8,{r8,r9,r10,r11} @ load key material - - add r4,r4,r8 @ accumulate key material - add r8,sp,#4*(8) - add r5,r5,r9 - add r4,r4,#3 @ counter+3 - add r6,r6,r10 - add r7,r7,r11 - ldr r11,[sp,#4*(32+2)] @ re-load len -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 - rev r4,r4 - rev r5,r5 - rev r6,r6 - rev r7,r7 -# endif - stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7} - add r10,sp,#4*(0) - sub r11,r11,#64*3 @ len-=64*3 - -.Loop_tail_neon: - ldrb r8,[r10],#1 @ read buffer on stack - ldrb r9,[r12],#1 @ read input - subs r11,r11,#1 - eor r8,r8,r9 - strb r8,[r14],#1 @ store output - bne .Loop_tail_neon - -.Ldone_neon: - add sp,sp,#4*(32+4) - vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15} - add sp,sp,#4*(16+3) - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -.size ChaCha20_neon,.-ChaCha20_neon -.comm OPENSSL_armcap_P,4,4 -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/aes-armv4.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/aes-armv4.S deleted file mode 100644 index cfe2a36649..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/aes-armv4.S +++ /dev/null @@ -1,1222 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ ==================================================================== - -@ AES for ARMv4 - -@ January 2007. -@ -@ Code uses single 1K S-box and is >2 times faster than code generated -@ by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which -@ allows to merge logical or arithmetic operation with shift or rotate -@ in one instruction and emit combined result every cycle. The module -@ is endian-neutral. The performance is ~42 cycles/byte for 128-bit -@ key [on single-issue Xscale PXA250 core]. - -@ May 2007. -@ -@ AES_set_[en|de]crypt_key is added. - -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 12% improvement on -@ Cortex A8 core and ~25 cycles per byte processed with 128-bit key. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 16% -@ improvement on Cortex A8 core and ~21.5 cycles per byte. - -#ifndef __KERNEL__ -# include -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -#endif - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 AES -@ instructions are in aesv8-armx.pl.) -.arch armv7-a - -.text -#if defined(__thumb2__) && !defined(__APPLE__) -.syntax unified -.thumb -#else -.code 32 -#undef __thumb2__ -#endif - -.type AES_Te,%object -.align 5 -AES_Te: -.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d -.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554 -.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d -.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a -.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87 -.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b -.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea -.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b -.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a -.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f -.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108 -.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f -.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e -.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5 -.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d -.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f -.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e -.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb -.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce -.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497 -.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c -.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed -.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b -.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a -.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16 -.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594 -.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81 -.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3 -.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a -.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504 -.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163 -.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d -.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f -.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739 -.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47 -.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395 -.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f -.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883 -.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c -.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76 -.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e -.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4 -.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6 -.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b -.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7 -.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0 -.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25 -.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818 -.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72 -.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651 -.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21 -.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85 -.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa -.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12 -.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0 -.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9 -.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133 -.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7 -.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920 -.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a -.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17 -.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8 -.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11 -.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a -@ Te4[256] -.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 -.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 -.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0 -.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 -.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc -.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 -.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a -.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 -.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0 -.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 -.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b -.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf -.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85 -.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 -.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5 -.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 -.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17 -.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 -.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88 -.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb -.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c -.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 -.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9 -.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 -.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6 -.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a -.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e -.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e -.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94 -.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf -.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68 -.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 -@ rcon[] -.word 0x01000000, 0x02000000, 0x04000000, 0x08000000 -.word 0x10000000, 0x20000000, 0x40000000, 0x80000000 -.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0 -.size AES_Te,.-AES_Te - -@ void aes_nohw_encrypt(const unsigned char *in, unsigned char *out, -@ const AES_KEY *key) { -.globl aes_nohw_encrypt -.hidden aes_nohw_encrypt -.type aes_nohw_encrypt,%function -.align 5 -aes_nohw_encrypt: -#ifndef __thumb2__ - sub r3,pc,#8 @ aes_nohw_encrypt -#else - adr r3,. -#endif - stmdb sp!,{r1,r4-r12,lr} -#if defined(__thumb2__) || defined(__APPLE__) - adr r10,AES_Te -#else - sub r10,r3,#aes_nohw_encrypt-AES_Te @ Te -#endif - mov r12,r0 @ inp - mov r11,r2 -#if __ARM_ARCH__<7 - ldrb r0,[r12,#3] @ load input data in endian-neutral - ldrb r4,[r12,#2] @ manner... - ldrb r5,[r12,#1] - ldrb r6,[r12,#0] - orr r0,r0,r4,lsl#8 - ldrb r1,[r12,#7] - orr r0,r0,r5,lsl#16 - ldrb r4,[r12,#6] - orr r0,r0,r6,lsl#24 - ldrb r5,[r12,#5] - ldrb r6,[r12,#4] - orr r1,r1,r4,lsl#8 - ldrb r2,[r12,#11] - orr r1,r1,r5,lsl#16 - ldrb r4,[r12,#10] - orr r1,r1,r6,lsl#24 - ldrb r5,[r12,#9] - ldrb r6,[r12,#8] - orr r2,r2,r4,lsl#8 - ldrb r3,[r12,#15] - orr r2,r2,r5,lsl#16 - ldrb r4,[r12,#14] - orr r2,r2,r6,lsl#24 - ldrb r5,[r12,#13] - ldrb r6,[r12,#12] - orr r3,r3,r4,lsl#8 - orr r3,r3,r5,lsl#16 - orr r3,r3,r6,lsl#24 -#else - ldr r0,[r12,#0] - ldr r1,[r12,#4] - ldr r2,[r12,#8] - ldr r3,[r12,#12] -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif -#endif - bl _armv4_AES_encrypt - - ldr r12,[sp],#4 @ pop out -#if __ARM_ARCH__>=7 -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif - str r0,[r12,#0] - str r1,[r12,#4] - str r2,[r12,#8] - str r3,[r12,#12] -#else - mov r4,r0,lsr#24 @ write output in endian-neutral - mov r5,r0,lsr#16 @ manner... - mov r6,r0,lsr#8 - strb r4,[r12,#0] - strb r5,[r12,#1] - mov r4,r1,lsr#24 - strb r6,[r12,#2] - mov r5,r1,lsr#16 - strb r0,[r12,#3] - mov r6,r1,lsr#8 - strb r4,[r12,#4] - strb r5,[r12,#5] - mov r4,r2,lsr#24 - strb r6,[r12,#6] - mov r5,r2,lsr#16 - strb r1,[r12,#7] - mov r6,r2,lsr#8 - strb r4,[r12,#8] - strb r5,[r12,#9] - mov r4,r3,lsr#24 - strb r6,[r12,#10] - mov r5,r3,lsr#16 - strb r2,[r12,#11] - mov r6,r3,lsr#8 - strb r4,[r12,#12] - strb r5,[r12,#13] - strb r6,[r12,#14] - strb r3,[r12,#15] -#endif -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size aes_nohw_encrypt,.-aes_nohw_encrypt - -.type _armv4_AES_encrypt,%function -.align 2 -_armv4_AES_encrypt: - str lr,[sp,#-4]! @ push lr - ldmia r11!,{r4,r5,r6,r7} - eor r0,r0,r4 - ldr r12,[r11,#240-16] - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 - sub r12,r12,#1 - mov lr,#255 - - and r7,lr,r0 - and r8,lr,r0,lsr#8 - and r9,lr,r0,lsr#16 - mov r0,r0,lsr#24 -.Lenc_loop: - ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0] - and r7,lr,r1,lsr#16 @ i0 - ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8] - and r8,lr,r1 - ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16] - and r9,lr,r1,lsr#8 - ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24] - mov r1,r1,lsr#24 - - ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16] - ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0] - ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8] - eor r0,r0,r7,ror#8 - ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24] - and r7,lr,r2,lsr#8 @ i0 - eor r5,r5,r8,ror#8 - and r8,lr,r2,lsr#16 @ i1 - eor r6,r6,r9,ror#8 - and r9,lr,r2 - ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8] - eor r1,r1,r4,ror#24 - ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16] - mov r2,r2,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0] - eor r0,r0,r7,ror#16 - ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24] - and r7,lr,r3 @ i0 - eor r1,r1,r8,ror#8 - and r8,lr,r3,lsr#8 @ i1 - eor r6,r6,r9,ror#16 - and r9,lr,r3,lsr#16 @ i2 - ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0] - eor r2,r2,r5,ror#16 - ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8] - mov r3,r3,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16] - eor r0,r0,r7,ror#24 - ldr r7,[r11],#16 - eor r1,r1,r8,ror#16 - ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24] - eor r2,r2,r9,ror#8 - ldr r4,[r11,#-12] - eor r3,r3,r6,ror#8 - - ldr r5,[r11,#-8] - eor r0,r0,r7 - ldr r6,[r11,#-4] - and r7,lr,r0 - eor r1,r1,r4 - and r8,lr,r0,lsr#8 - eor r2,r2,r5 - and r9,lr,r0,lsr#16 - eor r3,r3,r6 - mov r0,r0,lsr#24 - - subs r12,r12,#1 - bne .Lenc_loop - - add r10,r10,#2 - - ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0] - and r7,lr,r1,lsr#16 @ i0 - ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8] - and r8,lr,r1 - ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16] - and r9,lr,r1,lsr#8 - ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24] - mov r1,r1,lsr#24 - - ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16] - ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0] - ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8] - eor r0,r7,r0,lsl#8 - ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24] - and r7,lr,r2,lsr#8 @ i0 - eor r5,r8,r5,lsl#8 - and r8,lr,r2,lsr#16 @ i1 - eor r6,r9,r6,lsl#8 - and r9,lr,r2 - ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8] - eor r1,r4,r1,lsl#24 - ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16] - mov r2,r2,lsr#24 - - ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0] - eor r0,r7,r0,lsl#8 - ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24] - and r7,lr,r3 @ i0 - eor r1,r1,r8,lsl#16 - and r8,lr,r3,lsr#8 @ i1 - eor r6,r9,r6,lsl#8 - and r9,lr,r3,lsr#16 @ i2 - ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0] - eor r2,r5,r2,lsl#24 - ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8] - mov r3,r3,lsr#24 - - ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16] - eor r0,r7,r0,lsl#8 - ldr r7,[r11,#0] - ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24] - eor r1,r1,r8,lsl#8 - ldr r4,[r11,#4] - eor r2,r2,r9,lsl#16 - ldr r5,[r11,#8] - eor r3,r6,r3,lsl#24 - ldr r6,[r11,#12] - - eor r0,r0,r7 - eor r1,r1,r4 - eor r2,r2,r5 - eor r3,r3,r6 - - sub r10,r10,#2 - ldr pc,[sp],#4 @ pop and return -.size _armv4_AES_encrypt,.-_armv4_AES_encrypt - -.globl aes_nohw_set_encrypt_key -.hidden aes_nohw_set_encrypt_key -.type aes_nohw_set_encrypt_key,%function -.align 5 -aes_nohw_set_encrypt_key: -_armv4_AES_set_encrypt_key: -#ifndef __thumb2__ - sub r3,pc,#8 @ aes_nohw_set_encrypt_key -#else - adr r3,. -#endif - teq r0,#0 -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - moveq r0,#-1 - beq .Labrt - teq r2,#0 -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - moveq r0,#-1 - beq .Labrt - - teq r1,#128 - beq .Lok - teq r1,#192 - beq .Lok - teq r1,#256 -#ifdef __thumb2__ - itt ne @ Thumb2 thing, sanity check in ARM -#endif - movne r0,#-1 - bne .Labrt - -.Lok: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - mov r12,r0 @ inp - mov lr,r1 @ bits - mov r11,r2 @ key - -#if defined(__thumb2__) || defined(__APPLE__) - adr r10,AES_Te+1024 @ Te4 -#else - sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4 -#endif - -#if __ARM_ARCH__<7 - ldrb r0,[r12,#3] @ load input data in endian-neutral - ldrb r4,[r12,#2] @ manner... - ldrb r5,[r12,#1] - ldrb r6,[r12,#0] - orr r0,r0,r4,lsl#8 - ldrb r1,[r12,#7] - orr r0,r0,r5,lsl#16 - ldrb r4,[r12,#6] - orr r0,r0,r6,lsl#24 - ldrb r5,[r12,#5] - ldrb r6,[r12,#4] - orr r1,r1,r4,lsl#8 - ldrb r2,[r12,#11] - orr r1,r1,r5,lsl#16 - ldrb r4,[r12,#10] - orr r1,r1,r6,lsl#24 - ldrb r5,[r12,#9] - ldrb r6,[r12,#8] - orr r2,r2,r4,lsl#8 - ldrb r3,[r12,#15] - orr r2,r2,r5,lsl#16 - ldrb r4,[r12,#14] - orr r2,r2,r6,lsl#24 - ldrb r5,[r12,#13] - ldrb r6,[r12,#12] - orr r3,r3,r4,lsl#8 - str r0,[r11],#16 - orr r3,r3,r5,lsl#16 - str r1,[r11,#-12] - orr r3,r3,r6,lsl#24 - str r2,[r11,#-8] - str r3,[r11,#-4] -#else - ldr r0,[r12,#0] - ldr r1,[r12,#4] - ldr r2,[r12,#8] - ldr r3,[r12,#12] -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif - str r0,[r11],#16 - str r1,[r11,#-12] - str r2,[r11,#-8] - str r3,[r11,#-4] -#endif - - teq lr,#128 - bne .Lnot128 - mov r12,#10 - str r12,[r11,#240-16] - add r6,r10,#256 @ rcon - mov lr,#255 - -.L128_loop: - and r5,lr,r3,lsr#24 - and r7,lr,r3,lsr#16 - ldrb r5,[r10,r5] - and r8,lr,r3,lsr#8 - ldrb r7,[r10,r7] - and r9,lr,r3 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#24 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r6],#4 @ rcon[i++] - orr r5,r5,r9,lsl#8 - eor r5,r5,r4 - eor r0,r0,r5 @ rk[4]=rk[0]^... - eor r1,r1,r0 @ rk[5]=rk[1]^rk[4] - str r0,[r11],#16 - eor r2,r2,r1 @ rk[6]=rk[2]^rk[5] - str r1,[r11,#-12] - eor r3,r3,r2 @ rk[7]=rk[3]^rk[6] - str r2,[r11,#-8] - subs r12,r12,#1 - str r3,[r11,#-4] - bne .L128_loop - sub r2,r11,#176 - b .Ldone - -.Lnot128: -#if __ARM_ARCH__<7 - ldrb r8,[r12,#19] - ldrb r4,[r12,#18] - ldrb r5,[r12,#17] - ldrb r6,[r12,#16] - orr r8,r8,r4,lsl#8 - ldrb r9,[r12,#23] - orr r8,r8,r5,lsl#16 - ldrb r4,[r12,#22] - orr r8,r8,r6,lsl#24 - ldrb r5,[r12,#21] - ldrb r6,[r12,#20] - orr r9,r9,r4,lsl#8 - orr r9,r9,r5,lsl#16 - str r8,[r11],#8 - orr r9,r9,r6,lsl#24 - str r9,[r11,#-4] -#else - ldr r8,[r12,#16] - ldr r9,[r12,#20] -#ifdef __ARMEL__ - rev r8,r8 - rev r9,r9 -#endif - str r8,[r11],#8 - str r9,[r11,#-4] -#endif - - teq lr,#192 - bne .Lnot192 - mov r12,#12 - str r12,[r11,#240-24] - add r6,r10,#256 @ rcon - mov lr,#255 - mov r12,#8 - -.L192_loop: - and r5,lr,r9,lsr#24 - and r7,lr,r9,lsr#16 - ldrb r5,[r10,r5] - and r8,lr,r9,lsr#8 - ldrb r7,[r10,r7] - and r9,lr,r9 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#24 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r6],#4 @ rcon[i++] - orr r5,r5,r9,lsl#8 - eor r9,r5,r4 - eor r0,r0,r9 @ rk[6]=rk[0]^... - eor r1,r1,r0 @ rk[7]=rk[1]^rk[6] - str r0,[r11],#24 - eor r2,r2,r1 @ rk[8]=rk[2]^rk[7] - str r1,[r11,#-20] - eor r3,r3,r2 @ rk[9]=rk[3]^rk[8] - str r2,[r11,#-16] - subs r12,r12,#1 - str r3,[r11,#-12] -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - subeq r2,r11,#216 - beq .Ldone - - ldr r7,[r11,#-32] - ldr r8,[r11,#-28] - eor r7,r7,r3 @ rk[10]=rk[4]^rk[9] - eor r9,r8,r7 @ rk[11]=rk[5]^rk[10] - str r7,[r11,#-8] - str r9,[r11,#-4] - b .L192_loop - -.Lnot192: -#if __ARM_ARCH__<7 - ldrb r8,[r12,#27] - ldrb r4,[r12,#26] - ldrb r5,[r12,#25] - ldrb r6,[r12,#24] - orr r8,r8,r4,lsl#8 - ldrb r9,[r12,#31] - orr r8,r8,r5,lsl#16 - ldrb r4,[r12,#30] - orr r8,r8,r6,lsl#24 - ldrb r5,[r12,#29] - ldrb r6,[r12,#28] - orr r9,r9,r4,lsl#8 - orr r9,r9,r5,lsl#16 - str r8,[r11],#8 - orr r9,r9,r6,lsl#24 - str r9,[r11,#-4] -#else - ldr r8,[r12,#24] - ldr r9,[r12,#28] -#ifdef __ARMEL__ - rev r8,r8 - rev r9,r9 -#endif - str r8,[r11],#8 - str r9,[r11,#-4] -#endif - - mov r12,#14 - str r12,[r11,#240-32] - add r6,r10,#256 @ rcon - mov lr,#255 - mov r12,#7 - -.L256_loop: - and r5,lr,r9,lsr#24 - and r7,lr,r9,lsr#16 - ldrb r5,[r10,r5] - and r8,lr,r9,lsr#8 - ldrb r7,[r10,r7] - and r9,lr,r9 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#24 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r6],#4 @ rcon[i++] - orr r5,r5,r9,lsl#8 - eor r9,r5,r4 - eor r0,r0,r9 @ rk[8]=rk[0]^... - eor r1,r1,r0 @ rk[9]=rk[1]^rk[8] - str r0,[r11],#32 - eor r2,r2,r1 @ rk[10]=rk[2]^rk[9] - str r1,[r11,#-28] - eor r3,r3,r2 @ rk[11]=rk[3]^rk[10] - str r2,[r11,#-24] - subs r12,r12,#1 - str r3,[r11,#-20] -#ifdef __thumb2__ - itt eq @ Thumb2 thing, sanity check in ARM -#endif - subeq r2,r11,#256 - beq .Ldone - - and r5,lr,r3 - and r7,lr,r3,lsr#8 - ldrb r5,[r10,r5] - and r8,lr,r3,lsr#16 - ldrb r7,[r10,r7] - and r9,lr,r3,lsr#24 - ldrb r8,[r10,r8] - orr r5,r5,r7,lsl#8 - ldrb r9,[r10,r9] - orr r5,r5,r8,lsl#16 - ldr r4,[r11,#-48] - orr r5,r5,r9,lsl#24 - - ldr r7,[r11,#-44] - ldr r8,[r11,#-40] - eor r4,r4,r5 @ rk[12]=rk[4]^... - ldr r9,[r11,#-36] - eor r7,r7,r4 @ rk[13]=rk[5]^rk[12] - str r4,[r11,#-16] - eor r8,r8,r7 @ rk[14]=rk[6]^rk[13] - str r7,[r11,#-12] - eor r9,r9,r8 @ rk[15]=rk[7]^rk[14] - str r8,[r11,#-8] - str r9,[r11,#-4] - b .L256_loop - -.align 2 -.Ldone: mov r0,#0 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} -.Labrt: -#if __ARM_ARCH__>=5 - bx lr @ .word 0xe12fff1e -#else - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size aes_nohw_set_encrypt_key,.-aes_nohw_set_encrypt_key - -.globl aes_nohw_set_decrypt_key -.hidden aes_nohw_set_decrypt_key -.type aes_nohw_set_decrypt_key,%function -.align 5 -aes_nohw_set_decrypt_key: - str lr,[sp,#-4]! @ push lr - bl _armv4_AES_set_encrypt_key - teq r0,#0 - ldr lr,[sp],#4 @ pop lr - bne .Labrt - - mov r0,r2 @ aes_nohw_set_encrypt_key preserves r2, - mov r1,r2 @ which is AES_KEY *key - b _armv4_AES_set_enc2dec_key -.size aes_nohw_set_decrypt_key,.-aes_nohw_set_decrypt_key - -@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out) -.globl AES_set_enc2dec_key -.hidden AES_set_enc2dec_key -.type AES_set_enc2dec_key,%function -.align 5 -AES_set_enc2dec_key: -_armv4_AES_set_enc2dec_key: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - - ldr r12,[r0,#240] - mov r7,r0 @ input - add r8,r0,r12,lsl#4 - mov r11,r1 @ output - add r10,r1,r12,lsl#4 - str r12,[r1,#240] - -.Linv: ldr r0,[r7],#16 - ldr r1,[r7,#-12] - ldr r2,[r7,#-8] - ldr r3,[r7,#-4] - ldr r4,[r8],#-16 - ldr r5,[r8,#16+4] - ldr r6,[r8,#16+8] - ldr r9,[r8,#16+12] - str r0,[r10],#-16 - str r1,[r10,#16+4] - str r2,[r10,#16+8] - str r3,[r10,#16+12] - str r4,[r11],#16 - str r5,[r11,#-12] - str r6,[r11,#-8] - str r9,[r11,#-4] - teq r7,r8 - bne .Linv - - ldr r0,[r7] - ldr r1,[r7,#4] - ldr r2,[r7,#8] - ldr r3,[r7,#12] - str r0,[r11] - str r1,[r11,#4] - str r2,[r11,#8] - str r3,[r11,#12] - sub r11,r11,r12,lsl#3 - ldr r0,[r11,#16]! @ prefetch tp1 - mov r7,#0x80 - mov r8,#0x1b - orr r7,r7,#0x8000 - orr r8,r8,#0x1b00 - orr r7,r7,r7,lsl#16 - orr r8,r8,r8,lsl#16 - sub r12,r12,#1 - mvn r9,r7 - mov r12,r12,lsl#2 @ (rounds-1)*4 - -.Lmix: and r4,r0,r7 - and r1,r0,r9 - sub r4,r4,r4,lsr#7 - and r4,r4,r8 - eor r1,r4,r1,lsl#1 @ tp2 - - and r4,r1,r7 - and r2,r1,r9 - sub r4,r4,r4,lsr#7 - and r4,r4,r8 - eor r2,r4,r2,lsl#1 @ tp4 - - and r4,r2,r7 - and r3,r2,r9 - sub r4,r4,r4,lsr#7 - and r4,r4,r8 - eor r3,r4,r3,lsl#1 @ tp8 - - eor r4,r1,r2 - eor r5,r0,r3 @ tp9 - eor r4,r4,r3 @ tpe - eor r4,r4,r1,ror#24 - eor r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8) - eor r4,r4,r2,ror#16 - eor r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16) - eor r4,r4,r5,ror#8 @ ^= ROTATE(tp9,24) - - ldr r0,[r11,#4] @ prefetch tp1 - str r4,[r11],#4 - subs r12,r12,#1 - bne .Lmix - - mov r0,#0 -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size AES_set_enc2dec_key,.-AES_set_enc2dec_key - -.type AES_Td,%object -.align 5 -AES_Td: -.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96 -.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393 -.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25 -.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f -.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1 -.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6 -.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da -.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844 -.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd -.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4 -.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45 -.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94 -.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7 -.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a -.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5 -.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c -.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1 -.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a -.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75 -.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051 -.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46 -.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff -.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77 -.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb -.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000 -.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e -.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927 -.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a -.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e -.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16 -.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d -.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8 -.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd -.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34 -.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163 -.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120 -.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d -.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0 -.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422 -.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef -.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36 -.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4 -.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662 -.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5 -.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3 -.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b -.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8 -.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6 -.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6 -.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0 -.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815 -.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f -.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df -.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f -.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e -.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713 -.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89 -.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c -.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf -.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86 -.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f -.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541 -.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190 -.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 -@ Td4[256] -.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 -.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb -.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 -.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb -.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d -.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e -.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 -.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 -.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 -.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 -.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda -.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 -.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a -.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 -.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 -.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b -.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea -.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 -.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 -.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e -.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 -.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b -.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 -.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 -.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 -.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f -.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d -.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef -.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 -.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 -.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 -.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d -.size AES_Td,.-AES_Td - -@ void aes_nohw_decrypt(const unsigned char *in, unsigned char *out, -@ const AES_KEY *key) { -.globl aes_nohw_decrypt -.hidden aes_nohw_decrypt -.type aes_nohw_decrypt,%function -.align 5 -aes_nohw_decrypt: -#ifndef __thumb2__ - sub r3,pc,#8 @ aes_nohw_decrypt -#else - adr r3,. -#endif - stmdb sp!,{r1,r4-r12,lr} -#if defined(__thumb2__) || defined(__APPLE__) - adr r10,AES_Td -#else - sub r10,r3,#aes_nohw_decrypt-AES_Td @ Td -#endif - mov r12,r0 @ inp - mov r11,r2 -#if __ARM_ARCH__<7 - ldrb r0,[r12,#3] @ load input data in endian-neutral - ldrb r4,[r12,#2] @ manner... - ldrb r5,[r12,#1] - ldrb r6,[r12,#0] - orr r0,r0,r4,lsl#8 - ldrb r1,[r12,#7] - orr r0,r0,r5,lsl#16 - ldrb r4,[r12,#6] - orr r0,r0,r6,lsl#24 - ldrb r5,[r12,#5] - ldrb r6,[r12,#4] - orr r1,r1,r4,lsl#8 - ldrb r2,[r12,#11] - orr r1,r1,r5,lsl#16 - ldrb r4,[r12,#10] - orr r1,r1,r6,lsl#24 - ldrb r5,[r12,#9] - ldrb r6,[r12,#8] - orr r2,r2,r4,lsl#8 - ldrb r3,[r12,#15] - orr r2,r2,r5,lsl#16 - ldrb r4,[r12,#14] - orr r2,r2,r6,lsl#24 - ldrb r5,[r12,#13] - ldrb r6,[r12,#12] - orr r3,r3,r4,lsl#8 - orr r3,r3,r5,lsl#16 - orr r3,r3,r6,lsl#24 -#else - ldr r0,[r12,#0] - ldr r1,[r12,#4] - ldr r2,[r12,#8] - ldr r3,[r12,#12] -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif -#endif - bl _armv4_AES_decrypt - - ldr r12,[sp],#4 @ pop out -#if __ARM_ARCH__>=7 -#ifdef __ARMEL__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -#endif - str r0,[r12,#0] - str r1,[r12,#4] - str r2,[r12,#8] - str r3,[r12,#12] -#else - mov r4,r0,lsr#24 @ write output in endian-neutral - mov r5,r0,lsr#16 @ manner... - mov r6,r0,lsr#8 - strb r4,[r12,#0] - strb r5,[r12,#1] - mov r4,r1,lsr#24 - strb r6,[r12,#2] - mov r5,r1,lsr#16 - strb r0,[r12,#3] - mov r6,r1,lsr#8 - strb r4,[r12,#4] - strb r5,[r12,#5] - mov r4,r2,lsr#24 - strb r6,[r12,#6] - mov r5,r2,lsr#16 - strb r1,[r12,#7] - mov r6,r2,lsr#8 - strb r4,[r12,#8] - strb r5,[r12,#9] - mov r4,r3,lsr#24 - strb r6,[r12,#10] - mov r5,r3,lsr#16 - strb r2,[r12,#11] - mov r6,r3,lsr#8 - strb r4,[r12,#12] - strb r5,[r12,#13] - strb r6,[r12,#14] - strb r3,[r12,#15] -#endif -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size aes_nohw_decrypt,.-aes_nohw_decrypt - -.type _armv4_AES_decrypt,%function -.align 2 -_armv4_AES_decrypt: - str lr,[sp,#-4]! @ push lr - ldmia r11!,{r4,r5,r6,r7} - eor r0,r0,r4 - ldr r12,[r11,#240-16] - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 - sub r12,r12,#1 - mov lr,#255 - - and r7,lr,r0,lsr#16 - and r8,lr,r0,lsr#8 - and r9,lr,r0 - mov r0,r0,lsr#24 -.Ldec_loop: - ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16] - and r7,lr,r1 @ i0 - ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8] - and r8,lr,r1,lsr#16 - ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0] - and r9,lr,r1,lsr#8 - ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24] - mov r1,r1,lsr#24 - - ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0] - ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16] - ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8] - eor r0,r0,r7,ror#24 - ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24] - and r7,lr,r2,lsr#8 @ i0 - eor r5,r8,r5,ror#8 - and r8,lr,r2 @ i1 - eor r6,r9,r6,ror#8 - and r9,lr,r2,lsr#16 - ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8] - eor r1,r1,r4,ror#8 - ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0] - mov r2,r2,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16] - eor r0,r0,r7,ror#16 - ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24] - and r7,lr,r3,lsr#16 @ i0 - eor r1,r1,r8,ror#24 - and r8,lr,r3,lsr#8 @ i1 - eor r6,r9,r6,ror#8 - and r9,lr,r3 @ i2 - ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16] - eor r2,r2,r5,ror#8 - ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8] - mov r3,r3,lsr#24 - - ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0] - eor r0,r0,r7,ror#8 - ldr r7,[r11],#16 - eor r1,r1,r8,ror#16 - ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24] - eor r2,r2,r9,ror#24 - - ldr r4,[r11,#-12] - eor r0,r0,r7 - ldr r5,[r11,#-8] - eor r3,r3,r6,ror#8 - ldr r6,[r11,#-4] - and r7,lr,r0,lsr#16 - eor r1,r1,r4 - and r8,lr,r0,lsr#8 - eor r2,r2,r5 - and r9,lr,r0 - eor r3,r3,r6 - mov r0,r0,lsr#24 - - subs r12,r12,#1 - bne .Ldec_loop - - add r10,r10,#1024 - - ldr r5,[r10,#0] @ prefetch Td4 - ldr r6,[r10,#32] - ldr r4,[r10,#64] - ldr r5,[r10,#96] - ldr r6,[r10,#128] - ldr r4,[r10,#160] - ldr r5,[r10,#192] - ldr r6,[r10,#224] - - ldrb r0,[r10,r0] @ Td4[s0>>24] - ldrb r4,[r10,r7] @ Td4[s0>>16] - and r7,lr,r1 @ i0 - ldrb r5,[r10,r8] @ Td4[s0>>8] - and r8,lr,r1,lsr#16 - ldrb r6,[r10,r9] @ Td4[s0>>0] - and r9,lr,r1,lsr#8 - - add r1,r10,r1,lsr#24 - ldrb r7,[r10,r7] @ Td4[s1>>0] - ldrb r1,[r1] @ Td4[s1>>24] - ldrb r8,[r10,r8] @ Td4[s1>>16] - eor r0,r7,r0,lsl#24 - ldrb r9,[r10,r9] @ Td4[s1>>8] - eor r1,r4,r1,lsl#8 - and r7,lr,r2,lsr#8 @ i0 - eor r5,r5,r8,lsl#8 - and r8,lr,r2 @ i1 - ldrb r7,[r10,r7] @ Td4[s2>>8] - eor r6,r6,r9,lsl#8 - ldrb r8,[r10,r8] @ Td4[s2>>0] - and r9,lr,r2,lsr#16 - - add r2,r10,r2,lsr#24 - ldrb r2,[r2] @ Td4[s2>>24] - eor r0,r0,r7,lsl#8 - ldrb r9,[r10,r9] @ Td4[s2>>16] - eor r1,r8,r1,lsl#16 - and r7,lr,r3,lsr#16 @ i0 - eor r2,r5,r2,lsl#16 - and r8,lr,r3,lsr#8 @ i1 - ldrb r7,[r10,r7] @ Td4[s3>>16] - eor r6,r6,r9,lsl#16 - ldrb r8,[r10,r8] @ Td4[s3>>8] - and r9,lr,r3 @ i2 - - add r3,r10,r3,lsr#24 - ldrb r9,[r10,r9] @ Td4[s3>>0] - ldrb r3,[r3] @ Td4[s3>>24] - eor r0,r0,r7,lsl#16 - ldr r7,[r11,#0] - eor r1,r1,r8,lsl#8 - ldr r4,[r11,#4] - eor r2,r9,r2,lsl#8 - ldr r5,[r11,#8] - eor r3,r6,r3,lsl#24 - ldr r6,[r11,#12] - - eor r0,r0,r7 - eor r1,r1,r4 - eor r2,r2,r5 - eor r3,r3,r6 - - sub r10,r10,#1024 - ldr pc,[sp],#4 @ pop and return -.size _armv4_AES_decrypt,.-_armv4_AES_decrypt -.byte 65,69,83,32,102,111,114,32,65,82,77,118,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/aesv8-armx32.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/aesv8-armx32.S deleted file mode 100644 index 5d6e22d029..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/aesv8-armx32.S +++ /dev/null @@ -1,781 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -#if __ARM_MAX_ARCH__>=7 -.text -.arch armv7-a @ don't confuse not-so-latest binutils with argv8 :-) -.fpu neon -.code 32 -#undef __thumb2__ -.align 5 -.Lrcon: -.long 0x01,0x01,0x01,0x01 -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat -.long 0x1b,0x1b,0x1b,0x1b - -.text - -.globl aes_hw_set_encrypt_key -.hidden aes_hw_set_encrypt_key -.type aes_hw_set_encrypt_key,%function -.align 5 -aes_hw_set_encrypt_key: -.Lenc_key: - mov r3,#-1 - cmp r0,#0 - beq .Lenc_key_abort - cmp r2,#0 - beq .Lenc_key_abort - mov r3,#-2 - cmp r1,#128 - blt .Lenc_key_abort - cmp r1,#256 - bgt .Lenc_key_abort - tst r1,#0x3f - bne .Lenc_key_abort - - adr r3,.Lrcon - cmp r1,#192 - - veor q0,q0,q0 - vld1.8 {q3},[r0]! - mov r1,#8 @ reuse r1 - vld1.32 {q1,q2},[r3]! - - blt .Loop128 - beq .L192 - b .L256 - -.align 4 -.Loop128: - vtbl.8 d20,{q3},d4 - vtbl.8 d21,{q3},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q3},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - subs r1,r1,#1 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - vshl.u8 q1,q1,#1 - veor q3,q3,q10 - bne .Loop128 - - vld1.32 {q1},[r3] - - vtbl.8 d20,{q3},d4 - vtbl.8 d21,{q3},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q3},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - vshl.u8 q1,q1,#1 - veor q3,q3,q10 - - vtbl.8 d20,{q3},d4 - vtbl.8 d21,{q3},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q3},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - veor q3,q3,q10 - vst1.32 {q3},[r2] - add r2,r2,#0x50 - - mov r12,#10 - b .Ldone - -.align 4 -.L192: - vld1.8 {d16},[r0]! - vmov.i8 q10,#8 @ borrow q10 - vst1.32 {q3},[r2]! - vsub.i8 q2,q2,q10 @ adjust the mask - -.Loop192: - vtbl.8 d20,{q8},d4 - vtbl.8 d21,{q8},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {d16},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - subs r1,r1,#1 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - - vdup.32 q9,d7[1] - veor q9,q9,q8 - veor q10,q10,q1 - vext.8 q8,q0,q8,#12 - vshl.u8 q1,q1,#1 - veor q8,q8,q9 - veor q3,q3,q10 - veor q8,q8,q10 - vst1.32 {q3},[r2]! - bne .Loop192 - - mov r12,#12 - add r2,r2,#0x20 - b .Ldone - -.align 4 -.L256: - vld1.8 {q8},[r0] - mov r1,#7 - mov r12,#14 - vst1.32 {q3},[r2]! - -.Loop256: - vtbl.8 d20,{q8},d4 - vtbl.8 d21,{q8},d5 - vext.8 q9,q0,q3,#12 - vst1.32 {q8},[r2]! -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - subs r1,r1,#1 - - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q3,q3,q9 - vext.8 q9,q0,q9,#12 - veor q10,q10,q1 - veor q3,q3,q9 - vshl.u8 q1,q1,#1 - veor q3,q3,q10 - vst1.32 {q3},[r2]! - beq .Ldone - - vdup.32 q10,d7[1] - vext.8 q9,q0,q8,#12 -.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 - - veor q8,q8,q9 - vext.8 q9,q0,q9,#12 - veor q8,q8,q9 - vext.8 q9,q0,q9,#12 - veor q8,q8,q9 - - veor q8,q8,q10 - b .Loop256 - -.Ldone: - str r12,[r2] - mov r3,#0 - -.Lenc_key_abort: - mov r0,r3 @ return value - - bx lr -.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key - -.globl aes_hw_set_decrypt_key -.hidden aes_hw_set_decrypt_key -.type aes_hw_set_decrypt_key,%function -.align 5 -aes_hw_set_decrypt_key: - stmdb sp!,{r4,lr} - bl .Lenc_key - - cmp r0,#0 - bne .Ldec_key_abort - - sub r2,r2,#240 @ restore original r2 - mov r4,#-16 - add r0,r2,r12,lsl#4 @ end of key schedule - - vld1.32 {q0},[r2] - vld1.32 {q1},[r0] - vst1.32 {q0},[r0],r4 - vst1.32 {q1},[r2]! - -.Loop_imc: - vld1.32 {q0},[r2] - vld1.32 {q1},[r0] -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 - vst1.32 {q0},[r0],r4 - vst1.32 {q1},[r2]! - cmp r0,r2 - bhi .Loop_imc - - vld1.32 {q0},[r2] -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 - vst1.32 {q0},[r0] - - eor r0,r0,r0 @ return value -.Ldec_key_abort: - ldmia sp!,{r4,pc} -.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key -.globl aes_hw_encrypt -.hidden aes_hw_encrypt -.type aes_hw_encrypt,%function -.align 5 -aes_hw_encrypt: - ldr r3,[r2,#240] - vld1.32 {q0},[r2]! - vld1.8 {q2},[r0] - sub r3,r3,#2 - vld1.32 {q1},[r2]! - -.Loop_enc: -.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 -.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 - vld1.32 {q0},[r2]! - subs r3,r3,#2 -.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 -.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 - vld1.32 {q1},[r2]! - bgt .Loop_enc - -.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 -.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 - vld1.32 {q0},[r2] -.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 - veor q2,q2,q0 - - vst1.8 {q2},[r1] - bx lr -.size aes_hw_encrypt,.-aes_hw_encrypt -.globl aes_hw_decrypt -.hidden aes_hw_decrypt -.type aes_hw_decrypt,%function -.align 5 -aes_hw_decrypt: - ldr r3,[r2,#240] - vld1.32 {q0},[r2]! - vld1.8 {q2},[r0] - sub r3,r3,#2 - vld1.32 {q1},[r2]! - -.Loop_dec: -.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 -.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 - vld1.32 {q0},[r2]! - subs r3,r3,#2 -.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 -.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 - vld1.32 {q1},[r2]! - bgt .Loop_dec - -.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 -.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 - vld1.32 {q0},[r2] -.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 - veor q2,q2,q0 - - vst1.8 {q2},[r1] - bx lr -.size aes_hw_decrypt,.-aes_hw_decrypt -.globl aes_hw_cbc_encrypt -.hidden aes_hw_cbc_encrypt -.type aes_hw_cbc_encrypt,%function -.align 5 -aes_hw_cbc_encrypt: - mov ip,sp - stmdb sp!,{r4,r5,r6,r7,r8,lr} - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - ldmia ip,{r4,r5} @ load remaining args - subs r2,r2,#16 - mov r8,#16 - blo .Lcbc_abort - moveq r8,#0 - - cmp r5,#0 @ en- or decrypting? - ldr r5,[r3,#240] - and r2,r2,#-16 - vld1.8 {q6},[r4] - vld1.8 {q0},[r0],r8 - - vld1.32 {q8,q9},[r3] @ load key schedule... - sub r5,r5,#6 - add r7,r3,r5,lsl#4 @ pointer to last 7 round keys - sub r5,r5,#2 - vld1.32 {q10,q11},[r7]! - vld1.32 {q12,q13},[r7]! - vld1.32 {q14,q15},[r7]! - vld1.32 {q7},[r7] - - add r7,r3,#32 - mov r6,r5 - beq .Lcbc_dec - - cmp r5,#2 - veor q0,q0,q6 - veor q5,q8,q7 - beq .Lcbc_enc128 - - vld1.32 {q2,q3},[r7] - add r7,r3,#16 - add r6,r3,#16*4 - add r12,r3,#16*5 -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - add r14,r3,#16*6 - add r3,r3,#16*7 - b .Lenter_cbc_enc - -.align 4 -.Loop_cbc_enc: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vst1.8 {q6},[r1]! -.Lenter_cbc_enc: -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q8},[r6] - cmp r5,#4 -.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q9},[r12] - beq .Lcbc_enc192 - -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q8},[r14] -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q9},[r3] - nop - -.Lcbc_enc192: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - subs r2,r2,#16 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - moveq r8,#0 -.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.8 {q8},[r0],r8 -.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - veor q8,q8,q5 -.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.32 {q9},[r7] @ re-pre-load rndkey[1] -.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 - veor q6,q0,q7 - bhs .Loop_cbc_enc - - vst1.8 {q6},[r1]! - b .Lcbc_done - -.align 5 -.Lcbc_enc128: - vld1.32 {q2,q3},[r7] -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - b .Lenter_cbc_enc128 -.Loop_cbc_enc128: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vst1.8 {q6},[r1]! -.Lenter_cbc_enc128: -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - subs r2,r2,#16 -.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - moveq r8,#0 -.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - vld1.8 {q8},[r0],r8 -.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 - veor q8,q8,q5 -.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 - veor q6,q0,q7 - bhs .Loop_cbc_enc128 - - vst1.8 {q6},[r1]! - b .Lcbc_done -.align 5 -.Lcbc_dec: - vld1.8 {q10},[r0]! - subs r2,r2,#32 @ bias - add r6,r5,#2 - vorr q3,q0,q0 - vorr q1,q0,q0 - vorr q11,q10,q10 - blo .Lcbc_dec_tail - - vorr q1,q10,q10 - vld1.8 {q10},[r0]! - vorr q2,q0,q0 - vorr q3,q1,q1 - vorr q11,q10,q10 - -.Loop3x_cbc_dec: -.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q9},[r7]! - bgt .Loop3x_cbc_dec - -.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q4,q6,q7 - subs r2,r2,#0x30 - veor q5,q2,q7 - movlo r6,r2 @ r6, r6, is zero at this point -.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q9,q3,q7 - add r0,r0,r6 @ r0 is adjusted in such way that - @ at exit from the loop q1-q10 - @ are loaded with last "words" - vorr q6,q11,q11 - mov r7,r3 -.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.8 {q2},[r0]! -.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.8 {q3},[r0]! -.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14 -.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 -.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.8 {q11},[r0]! -.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15 -.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 -.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 - vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] - add r6,r5,#2 - veor q4,q4,q0 - veor q5,q5,q1 - veor q10,q10,q9 - vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] - vst1.8 {q4},[r1]! - vorr q0,q2,q2 - vst1.8 {q5},[r1]! - vorr q1,q3,q3 - vst1.8 {q10},[r1]! - vorr q10,q11,q11 - bhs .Loop3x_cbc_dec - - cmn r2,#0x30 - beq .Lcbc_done - nop - -.Lcbc_dec_tail: -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - vld1.32 {q9},[r7]! - bgt .Lcbc_dec_tail - -.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 -.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 -.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - cmn r2,#0x20 -.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q5,q6,q7 -.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 -.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 -.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 -.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 - veor q9,q3,q7 -.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 -.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 - beq .Lcbc_dec_one - veor q5,q5,q1 - veor q9,q9,q10 - vorr q6,q11,q11 - vst1.8 {q5},[r1]! - vst1.8 {q9},[r1]! - b .Lcbc_done - -.Lcbc_dec_one: - veor q5,q5,q10 - vorr q6,q11,q11 - vst1.8 {q5},[r1]! - -.Lcbc_done: - vst1.8 {q6},[r4] -.Lcbc_abort: - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!,{r4,r5,r6,r7,r8,pc} -.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt -.globl aes_hw_ctr32_encrypt_blocks -.hidden aes_hw_ctr32_encrypt_blocks -.type aes_hw_ctr32_encrypt_blocks,%function -.align 5 -aes_hw_ctr32_encrypt_blocks: - mov ip,sp - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr} - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - ldr r4, [ip] @ load remaining arg - ldr r5,[r3,#240] - - ldr r8, [r4, #12] - vld1.32 {q0},[r4] - - vld1.32 {q8,q9},[r3] @ load key schedule... - sub r5,r5,#4 - mov r12,#16 - cmp r2,#2 - add r7,r3,r5,lsl#4 @ pointer to last 5 round keys - sub r5,r5,#2 - vld1.32 {q12,q13},[r7]! - vld1.32 {q14,q15},[r7]! - vld1.32 {q7},[r7] - add r7,r3,#32 - mov r6,r5 - movlo r12,#0 -#ifndef __ARMEB__ - rev r8, r8 -#endif - vorr q1,q0,q0 - add r10, r8, #1 - vorr q10,q0,q0 - add r8, r8, #2 - vorr q6,q0,q0 - rev r10, r10 - vmov.32 d3[1],r10 - bls .Lctr32_tail - rev r12, r8 - sub r2,r2,#3 @ bias - vmov.32 d21[1],r12 - b .Loop3x_ctr32 - -.align 4 -.Loop3x_ctr32: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 -.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 -.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 -.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 -.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 - vld1.32 {q9},[r7]! - bgt .Loop3x_ctr32 - -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 - vld1.8 {q2},[r0]! - vorr q0,q6,q6 -.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 -.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 - vld1.8 {q3},[r0]! - vorr q1,q6,q6 -.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - vld1.8 {q11},[r0]! - mov r7,r3 -.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 -.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 - vorr q10,q6,q6 - add r9,r8,#1 -.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - veor q2,q2,q7 - add r10,r8,#2 -.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12 -.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - veor q3,q3,q7 - add r8,r8,#3 -.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - veor q11,q11,q7 - rev r9,r9 -.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 -.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d1[1], r9 - rev r10,r10 -.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 -.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 -.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 -.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - vmov.32 d3[1], r10 - rev r12,r8 -.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 -.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d21[1], r12 - subs r2,r2,#3 -.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 -.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 -.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15 - - veor q2,q2,q4 - vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] - vst1.8 {q2},[r1]! - veor q3,q3,q5 - mov r6,r5 - vst1.8 {q3},[r1]! - veor q11,q11,q9 - vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] - vst1.8 {q11},[r1]! - bhs .Loop3x_ctr32 - - adds r2,r2,#3 - beq .Lctr32_done - cmp r2,#1 - mov r12,#16 - moveq r12,#0 - -.Lctr32_tail: -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.32 {q8},[r7]! - subs r6,r6,#2 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.32 {q9},[r7]! - bgt .Lctr32_tail - -.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 -.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.8 {q2},[r0],r12 -.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - vld1.8 {q3},[r0] -.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - veor q2,q2,q7 -.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 -.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 -.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14 -.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 - veor q3,q3,q7 -.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 -.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15 - - cmp r2,#1 - veor q2,q2,q0 - veor q3,q3,q1 - vst1.8 {q2},[r1]! - beq .Lctr32_done - vst1.8 {q3},[r1] - -.Lctr32_done: - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc} -.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/armv4-mont.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/armv4-mont.S deleted file mode 100644 index 029689475b..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/armv4-mont.S +++ /dev/null @@ -1,977 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. -.arch armv7-a - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - -#if __ARM_MAX_ARCH__>=7 -.align 5 -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-.Lbn_mul_mont -#endif - -.globl bn_mul_mont -.hidden bn_mul_mont -.type bn_mul_mont,%function - -.align 5 -bn_mul_mont: -.Lbn_mul_mont: - ldr ip,[sp,#4] @ load num - stmdb sp!,{r0,r2} @ sp points at argument block -#if __ARM_MAX_ARCH__>=7 - tst ip,#7 - bne .Lialu - adr r0,.Lbn_mul_mont - ldr r2,.LOPENSSL_armcap - ldr r0,[r0,r2] -#ifdef __APPLE__ - ldr r0,[r0] -#endif - tst r0,#ARMV7_NEON @ NEON available? - ldmia sp, {r0,r2} - beq .Lialu - add sp,sp,#8 - b bn_mul8x_mont_neon -.align 4 -.Lialu: -#endif - cmp ip,#2 - mov r0,ip @ load num -#ifdef __thumb2__ - ittt lt -#endif - movlt r0,#0 - addlt sp,sp,#2*4 - blt .Labrt - - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers - - mov r0,r0,lsl#2 @ rescale r0 for byte count - sub sp,sp,r0 @ alloca(4*num) - sub sp,sp,#4 @ +extra dword - sub r0,r0,#4 @ "num=num-1" - add r4,r2,r0 @ &bp[num-1] - - add r0,sp,r0 @ r0 to point at &tp[num-1] - ldr r8,[r0,#14*4] @ &n0 - ldr r2,[r2] @ bp[0] - ldr r5,[r1],#4 @ ap[0],ap++ - ldr r6,[r3],#4 @ np[0],np++ - ldr r8,[r8] @ *n0 - str r4,[r0,#15*4] @ save &bp[num] - - umull r10,r11,r5,r2 @ ap[0]*bp[0] - str r8,[r0,#14*4] @ save n0 value - mul r8,r10,r8 @ "tp[0]"*n0 - mov r12,#0 - umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" - mov r4,sp - -.L1st: - ldr r5,[r1],#4 @ ap[j],ap++ - mov r10,r11 - ldr r6,[r3],#4 @ np[j],np++ - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[j]*bp[0] - mov r14,#0 - umlal r12,r14,r6,r8 @ np[j]*n0 - adds r12,r12,r10 - str r12,[r4],#4 @ tp[j-1]=,tp++ - adc r12,r14,#0 - cmp r4,r0 - bne .L1st - - adds r12,r12,r11 - ldr r4,[r0,#13*4] @ restore bp - mov r14,#0 - ldr r8,[r0,#14*4] @ restore n0 - adc r14,r14,#0 - str r12,[r0] @ tp[num-1]= - mov r7,sp - str r14,[r0,#4] @ tp[num]= - -.Louter: - sub r7,r0,r7 @ "original" r0-1 value - sub r1,r1,r7 @ "rewind" ap to &ap[1] - ldr r2,[r4,#4]! @ *(++bp) - sub r3,r3,r7 @ "rewind" np to &np[1] - ldr r5,[r1,#-4] @ ap[0] - ldr r10,[sp] @ tp[0] - ldr r6,[r3,#-4] @ np[0] - ldr r7,[sp,#4] @ tp[1] - - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] - str r4,[r0,#13*4] @ save bp - mul r8,r10,r8 - mov r12,#0 - umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" - mov r4,sp - -.Linner: - ldr r5,[r1],#4 @ ap[j],ap++ - adds r10,r11,r7 @ +=tp[j] - ldr r6,[r3],#4 @ np[j],np++ - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[j]*bp[i] - mov r14,#0 - umlal r12,r14,r6,r8 @ np[j]*n0 - adc r11,r11,#0 - ldr r7,[r4,#8] @ tp[j+1] - adds r12,r12,r10 - str r12,[r4],#4 @ tp[j-1]=,tp++ - adc r12,r14,#0 - cmp r4,r0 - bne .Linner - - adds r12,r12,r11 - mov r14,#0 - ldr r4,[r0,#13*4] @ restore bp - adc r14,r14,#0 - ldr r8,[r0,#14*4] @ restore n0 - adds r12,r12,r7 - ldr r7,[r0,#15*4] @ restore &bp[num] - adc r14,r14,#0 - str r12,[r0] @ tp[num-1]= - str r14,[r0,#4] @ tp[num]= - - cmp r4,r7 -#ifdef __thumb2__ - itt ne -#endif - movne r7,sp - bne .Louter - - ldr r2,[r0,#12*4] @ pull rp - mov r5,sp - add r0,r0,#4 @ r0 to point at &tp[num] - sub r5,r0,r5 @ "original" num value - mov r4,sp @ "rewind" r4 - mov r1,r4 @ "borrow" r1 - sub r3,r3,r5 @ "rewind" r3 to &np[0] - - subs r7,r7,r7 @ "clear" carry flag -.Lsub: ldr r7,[r4],#4 - ldr r6,[r3],#4 - sbcs r7,r7,r6 @ tp[j]-np[j] - str r7,[r2],#4 @ rp[j]= - teq r4,r0 @ preserve carry - bne .Lsub - sbcs r14,r14,#0 @ upmost carry - mov r4,sp @ "rewind" r4 - sub r2,r2,r5 @ "rewind" r2 - -.Lcopy: ldr r7,[r4] @ conditional copy - ldr r5,[r2] - str sp,[r4],#4 @ zap tp -#ifdef __thumb2__ - it cc -#endif - movcc r5,r7 - str r5,[r2],#4 - teq r4,r0 @ preserve carry - bne .Lcopy - - mov sp,r0 - add sp,sp,#4 @ skip over tp[num+1] - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers - add sp,sp,#2*4 @ skip over {r0,r2} - mov r0,#1 -.Labrt: -#if __ARM_ARCH__>=5 - bx lr @ bx lr -#else - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size bn_mul_mont,.-bn_mul_mont -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.type bn_mul8x_mont_neon,%function -.align 5 -bn_mul8x_mont_neon: - mov ip,sp - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - ldmia ip,{r4,r5} @ load rest of parameter block - mov ip,sp - - cmp r5,#8 - bhi .LNEON_8n - - @ special case for r5==8, everything is in register bank... - - vld1.32 {d28[0]}, [r2,:32]! - veor d8,d8,d8 - sub r7,sp,r5,lsl#4 - vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-( - and r7,r7,#-64 - vld1.32 {d30[0]}, [r4,:32] - mov sp,r7 @ alloca - vzip.16 d28,d8 - - vmull.u32 q6,d28,d0[0] - vmull.u32 q7,d28,d0[1] - vmull.u32 q8,d28,d1[0] - vshl.i64 d29,d13,#16 - vmull.u32 q9,d28,d1[1] - - vadd.u64 d29,d29,d12 - veor d8,d8,d8 - vmul.u32 d29,d29,d30 - - vmull.u32 q10,d28,d2[0] - vld1.32 {d4,d5,d6,d7}, [r3]! - vmull.u32 q11,d28,d2[1] - vmull.u32 q12,d28,d3[0] - vzip.16 d29,d8 - vmull.u32 q13,d28,d3[1] - - vmlal.u32 q6,d29,d4[0] - sub r9,r5,#1 - vmlal.u32 q7,d29,d4[1] - vmlal.u32 q8,d29,d5[0] - vmlal.u32 q9,d29,d5[1] - - vmlal.u32 q10,d29,d6[0] - vmov q5,q6 - vmlal.u32 q11,d29,d6[1] - vmov q6,q7 - vmlal.u32 q12,d29,d7[0] - vmov q7,q8 - vmlal.u32 q13,d29,d7[1] - vmov q8,q9 - vmov q9,q10 - vshr.u64 d10,d10,#16 - vmov q10,q11 - vmov q11,q12 - vadd.u64 d10,d10,d11 - vmov q12,q13 - veor q13,q13 - vshr.u64 d10,d10,#16 - - b .LNEON_outer8 - -.align 4 -.LNEON_outer8: - vld1.32 {d28[0]}, [r2,:32]! - veor d8,d8,d8 - vzip.16 d28,d8 - vadd.u64 d12,d12,d10 - - vmlal.u32 q6,d28,d0[0] - vmlal.u32 q7,d28,d0[1] - vmlal.u32 q8,d28,d1[0] - vshl.i64 d29,d13,#16 - vmlal.u32 q9,d28,d1[1] - - vadd.u64 d29,d29,d12 - veor d8,d8,d8 - subs r9,r9,#1 - vmul.u32 d29,d29,d30 - - vmlal.u32 q10,d28,d2[0] - vmlal.u32 q11,d28,d2[1] - vmlal.u32 q12,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q13,d28,d3[1] - - vmlal.u32 q6,d29,d4[0] - vmlal.u32 q7,d29,d4[1] - vmlal.u32 q8,d29,d5[0] - vmlal.u32 q9,d29,d5[1] - - vmlal.u32 q10,d29,d6[0] - vmov q5,q6 - vmlal.u32 q11,d29,d6[1] - vmov q6,q7 - vmlal.u32 q12,d29,d7[0] - vmov q7,q8 - vmlal.u32 q13,d29,d7[1] - vmov q8,q9 - vmov q9,q10 - vshr.u64 d10,d10,#16 - vmov q10,q11 - vmov q11,q12 - vadd.u64 d10,d10,d11 - vmov q12,q13 - veor q13,q13 - vshr.u64 d10,d10,#16 - - bne .LNEON_outer8 - - vadd.u64 d12,d12,d10 - mov r7,sp - vshr.u64 d10,d12,#16 - mov r8,r5 - vadd.u64 d13,d13,d10 - add r6,sp,#96 - vshr.u64 d10,d13,#16 - vzip.16 d12,d13 - - b .LNEON_tail_entry - -.align 4 -.LNEON_8n: - veor q6,q6,q6 - sub r7,sp,#128 - veor q7,q7,q7 - sub r7,r7,r5,lsl#4 - veor q8,q8,q8 - and r7,r7,#-64 - veor q9,q9,q9 - mov sp,r7 @ alloca - veor q10,q10,q10 - add r7,r7,#256 - veor q11,q11,q11 - sub r8,r5,#8 - veor q12,q12,q12 - veor q13,q13,q13 - -.LNEON_8n_init: - vst1.64 {q6,q7},[r7,:256]! - subs r8,r8,#8 - vst1.64 {q8,q9},[r7,:256]! - vst1.64 {q10,q11},[r7,:256]! - vst1.64 {q12,q13},[r7,:256]! - bne .LNEON_8n_init - - add r6,sp,#256 - vld1.32 {d0,d1,d2,d3},[r1]! - add r10,sp,#8 - vld1.32 {d30[0]},[r4,:32] - mov r9,r5 - b .LNEON_8n_outer - -.align 4 -.LNEON_8n_outer: - vld1.32 {d28[0]},[r2,:32]! @ *b++ - veor d8,d8,d8 - vzip.16 d28,d8 - add r7,sp,#128 - vld1.32 {d4,d5,d6,d7},[r3]! - - vmlal.u32 q6,d28,d0[0] - vmlal.u32 q7,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q8,d28,d1[0] - vshl.i64 d29,d13,#16 - vmlal.u32 q9,d28,d1[1] - vadd.u64 d29,d29,d12 - vmlal.u32 q10,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q11,d28,d2[1] - vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0] - vmlal.u32 q12,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q13,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q6,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q7,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q8,d29,d5[0] - vshr.u64 d12,d12,#16 - vmlal.u32 q9,d29,d5[1] - vmlal.u32 q10,d29,d6[0] - vadd.u64 d12,d12,d13 - vmlal.u32 q11,d29,d6[1] - vshr.u64 d12,d12,#16 - vmlal.u32 q12,d29,d7[0] - vmlal.u32 q13,d29,d7[1] - vadd.u64 d14,d14,d12 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0] - vmlal.u32 q7,d28,d0[0] - vld1.64 {q6},[r6,:128]! - vmlal.u32 q8,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q9,d28,d1[0] - vshl.i64 d29,d15,#16 - vmlal.u32 q10,d28,d1[1] - vadd.u64 d29,d29,d14 - vmlal.u32 q11,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q12,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1] - vmlal.u32 q13,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q6,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q7,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q8,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q9,d29,d5[0] - vshr.u64 d14,d14,#16 - vmlal.u32 q10,d29,d5[1] - vmlal.u32 q11,d29,d6[0] - vadd.u64 d14,d14,d15 - vmlal.u32 q12,d29,d6[1] - vshr.u64 d14,d14,#16 - vmlal.u32 q13,d29,d7[0] - vmlal.u32 q6,d29,d7[1] - vadd.u64 d16,d16,d14 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1] - vmlal.u32 q8,d28,d0[0] - vld1.64 {q7},[r6,:128]! - vmlal.u32 q9,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q10,d28,d1[0] - vshl.i64 d29,d17,#16 - vmlal.u32 q11,d28,d1[1] - vadd.u64 d29,d29,d16 - vmlal.u32 q12,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q13,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2] - vmlal.u32 q6,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q7,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q8,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q9,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q10,d29,d5[0] - vshr.u64 d16,d16,#16 - vmlal.u32 q11,d29,d5[1] - vmlal.u32 q12,d29,d6[0] - vadd.u64 d16,d16,d17 - vmlal.u32 q13,d29,d6[1] - vshr.u64 d16,d16,#16 - vmlal.u32 q6,d29,d7[0] - vmlal.u32 q7,d29,d7[1] - vadd.u64 d18,d18,d16 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2] - vmlal.u32 q9,d28,d0[0] - vld1.64 {q8},[r6,:128]! - vmlal.u32 q10,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q11,d28,d1[0] - vshl.i64 d29,d19,#16 - vmlal.u32 q12,d28,d1[1] - vadd.u64 d29,d29,d18 - vmlal.u32 q13,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q6,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3] - vmlal.u32 q7,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q8,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q9,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q10,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q11,d29,d5[0] - vshr.u64 d18,d18,#16 - vmlal.u32 q12,d29,d5[1] - vmlal.u32 q13,d29,d6[0] - vadd.u64 d18,d18,d19 - vmlal.u32 q6,d29,d6[1] - vshr.u64 d18,d18,#16 - vmlal.u32 q7,d29,d7[0] - vmlal.u32 q8,d29,d7[1] - vadd.u64 d20,d20,d18 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3] - vmlal.u32 q10,d28,d0[0] - vld1.64 {q9},[r6,:128]! - vmlal.u32 q11,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q12,d28,d1[0] - vshl.i64 d29,d21,#16 - vmlal.u32 q13,d28,d1[1] - vadd.u64 d29,d29,d20 - vmlal.u32 q6,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q7,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4] - vmlal.u32 q8,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q9,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q10,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q11,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q12,d29,d5[0] - vshr.u64 d20,d20,#16 - vmlal.u32 q13,d29,d5[1] - vmlal.u32 q6,d29,d6[0] - vadd.u64 d20,d20,d21 - vmlal.u32 q7,d29,d6[1] - vshr.u64 d20,d20,#16 - vmlal.u32 q8,d29,d7[0] - vmlal.u32 q9,d29,d7[1] - vadd.u64 d22,d22,d20 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4] - vmlal.u32 q11,d28,d0[0] - vld1.64 {q10},[r6,:128]! - vmlal.u32 q12,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q13,d28,d1[0] - vshl.i64 d29,d23,#16 - vmlal.u32 q6,d28,d1[1] - vadd.u64 d29,d29,d22 - vmlal.u32 q7,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q8,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5] - vmlal.u32 q9,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q10,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q11,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q12,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q13,d29,d5[0] - vshr.u64 d22,d22,#16 - vmlal.u32 q6,d29,d5[1] - vmlal.u32 q7,d29,d6[0] - vadd.u64 d22,d22,d23 - vmlal.u32 q8,d29,d6[1] - vshr.u64 d22,d22,#16 - vmlal.u32 q9,d29,d7[0] - vmlal.u32 q10,d29,d7[1] - vadd.u64 d24,d24,d22 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5] - vmlal.u32 q12,d28,d0[0] - vld1.64 {q11},[r6,:128]! - vmlal.u32 q13,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q6,d28,d1[0] - vshl.i64 d29,d25,#16 - vmlal.u32 q7,d28,d1[1] - vadd.u64 d29,d29,d24 - vmlal.u32 q8,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q9,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6] - vmlal.u32 q10,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q11,d28,d3[1] - vld1.32 {d28[0]},[r2,:32]! @ *b++ - vmlal.u32 q12,d29,d4[0] - veor d10,d10,d10 - vmlal.u32 q13,d29,d4[1] - vzip.16 d28,d10 - vmlal.u32 q6,d29,d5[0] - vshr.u64 d24,d24,#16 - vmlal.u32 q7,d29,d5[1] - vmlal.u32 q8,d29,d6[0] - vadd.u64 d24,d24,d25 - vmlal.u32 q9,d29,d6[1] - vshr.u64 d24,d24,#16 - vmlal.u32 q10,d29,d7[0] - vmlal.u32 q11,d29,d7[1] - vadd.u64 d26,d26,d24 - vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6] - vmlal.u32 q13,d28,d0[0] - vld1.64 {q12},[r6,:128]! - vmlal.u32 q6,d28,d0[1] - veor d8,d8,d8 - vmlal.u32 q7,d28,d1[0] - vshl.i64 d29,d27,#16 - vmlal.u32 q8,d28,d1[1] - vadd.u64 d29,d29,d26 - vmlal.u32 q9,d28,d2[0] - vmul.u32 d29,d29,d30 - vmlal.u32 q10,d28,d2[1] - vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7] - vmlal.u32 q11,d28,d3[0] - vzip.16 d29,d8 - vmlal.u32 q12,d28,d3[1] - vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] - vmlal.u32 q13,d29,d4[0] - vld1.32 {d0,d1,d2,d3},[r1]! - vmlal.u32 q6,d29,d4[1] - vmlal.u32 q7,d29,d5[0] - vshr.u64 d26,d26,#16 - vmlal.u32 q8,d29,d5[1] - vmlal.u32 q9,d29,d6[0] - vadd.u64 d26,d26,d27 - vmlal.u32 q10,d29,d6[1] - vshr.u64 d26,d26,#16 - vmlal.u32 q11,d29,d7[0] - vmlal.u32 q12,d29,d7[1] - vadd.u64 d12,d12,d26 - vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7] - add r10,sp,#8 @ rewind - sub r8,r5,#8 - b .LNEON_8n_inner - -.align 4 -.LNEON_8n_inner: - subs r8,r8,#8 - vmlal.u32 q6,d28,d0[0] - vld1.64 {q13},[r6,:128] - vmlal.u32 q7,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0] - vmlal.u32 q8,d28,d1[0] - vld1.32 {d4,d5,d6,d7},[r3]! - vmlal.u32 q9,d28,d1[1] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q10,d28,d2[0] - vmlal.u32 q11,d28,d2[1] - vmlal.u32 q12,d28,d3[0] - vmlal.u32 q13,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1] - vmlal.u32 q6,d29,d4[0] - vmlal.u32 q7,d29,d4[1] - vmlal.u32 q8,d29,d5[0] - vmlal.u32 q9,d29,d5[1] - vmlal.u32 q10,d29,d6[0] - vmlal.u32 q11,d29,d6[1] - vmlal.u32 q12,d29,d7[0] - vmlal.u32 q13,d29,d7[1] - vst1.64 {q6},[r7,:128]! - vmlal.u32 q7,d28,d0[0] - vld1.64 {q6},[r6,:128] - vmlal.u32 q8,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1] - vmlal.u32 q9,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q10,d28,d1[1] - vmlal.u32 q11,d28,d2[0] - vmlal.u32 q12,d28,d2[1] - vmlal.u32 q13,d28,d3[0] - vmlal.u32 q6,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2] - vmlal.u32 q7,d29,d4[0] - vmlal.u32 q8,d29,d4[1] - vmlal.u32 q9,d29,d5[0] - vmlal.u32 q10,d29,d5[1] - vmlal.u32 q11,d29,d6[0] - vmlal.u32 q12,d29,d6[1] - vmlal.u32 q13,d29,d7[0] - vmlal.u32 q6,d29,d7[1] - vst1.64 {q7},[r7,:128]! - vmlal.u32 q8,d28,d0[0] - vld1.64 {q7},[r6,:128] - vmlal.u32 q9,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2] - vmlal.u32 q10,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q11,d28,d1[1] - vmlal.u32 q12,d28,d2[0] - vmlal.u32 q13,d28,d2[1] - vmlal.u32 q6,d28,d3[0] - vmlal.u32 q7,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3] - vmlal.u32 q8,d29,d4[0] - vmlal.u32 q9,d29,d4[1] - vmlal.u32 q10,d29,d5[0] - vmlal.u32 q11,d29,d5[1] - vmlal.u32 q12,d29,d6[0] - vmlal.u32 q13,d29,d6[1] - vmlal.u32 q6,d29,d7[0] - vmlal.u32 q7,d29,d7[1] - vst1.64 {q8},[r7,:128]! - vmlal.u32 q9,d28,d0[0] - vld1.64 {q8},[r6,:128] - vmlal.u32 q10,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3] - vmlal.u32 q11,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q12,d28,d1[1] - vmlal.u32 q13,d28,d2[0] - vmlal.u32 q6,d28,d2[1] - vmlal.u32 q7,d28,d3[0] - vmlal.u32 q8,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4] - vmlal.u32 q9,d29,d4[0] - vmlal.u32 q10,d29,d4[1] - vmlal.u32 q11,d29,d5[0] - vmlal.u32 q12,d29,d5[1] - vmlal.u32 q13,d29,d6[0] - vmlal.u32 q6,d29,d6[1] - vmlal.u32 q7,d29,d7[0] - vmlal.u32 q8,d29,d7[1] - vst1.64 {q9},[r7,:128]! - vmlal.u32 q10,d28,d0[0] - vld1.64 {q9},[r6,:128] - vmlal.u32 q11,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4] - vmlal.u32 q12,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q13,d28,d1[1] - vmlal.u32 q6,d28,d2[0] - vmlal.u32 q7,d28,d2[1] - vmlal.u32 q8,d28,d3[0] - vmlal.u32 q9,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5] - vmlal.u32 q10,d29,d4[0] - vmlal.u32 q11,d29,d4[1] - vmlal.u32 q12,d29,d5[0] - vmlal.u32 q13,d29,d5[1] - vmlal.u32 q6,d29,d6[0] - vmlal.u32 q7,d29,d6[1] - vmlal.u32 q8,d29,d7[0] - vmlal.u32 q9,d29,d7[1] - vst1.64 {q10},[r7,:128]! - vmlal.u32 q11,d28,d0[0] - vld1.64 {q10},[r6,:128] - vmlal.u32 q12,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5] - vmlal.u32 q13,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q6,d28,d1[1] - vmlal.u32 q7,d28,d2[0] - vmlal.u32 q8,d28,d2[1] - vmlal.u32 q9,d28,d3[0] - vmlal.u32 q10,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6] - vmlal.u32 q11,d29,d4[0] - vmlal.u32 q12,d29,d4[1] - vmlal.u32 q13,d29,d5[0] - vmlal.u32 q6,d29,d5[1] - vmlal.u32 q7,d29,d6[0] - vmlal.u32 q8,d29,d6[1] - vmlal.u32 q9,d29,d7[0] - vmlal.u32 q10,d29,d7[1] - vst1.64 {q11},[r7,:128]! - vmlal.u32 q12,d28,d0[0] - vld1.64 {q11},[r6,:128] - vmlal.u32 q13,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6] - vmlal.u32 q6,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q7,d28,d1[1] - vmlal.u32 q8,d28,d2[0] - vmlal.u32 q9,d28,d2[1] - vmlal.u32 q10,d28,d3[0] - vmlal.u32 q11,d28,d3[1] - vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7] - vmlal.u32 q12,d29,d4[0] - vmlal.u32 q13,d29,d4[1] - vmlal.u32 q6,d29,d5[0] - vmlal.u32 q7,d29,d5[1] - vmlal.u32 q8,d29,d6[0] - vmlal.u32 q9,d29,d6[1] - vmlal.u32 q10,d29,d7[0] - vmlal.u32 q11,d29,d7[1] - vst1.64 {q12},[r7,:128]! - vmlal.u32 q13,d28,d0[0] - vld1.64 {q12},[r6,:128] - vmlal.u32 q6,d28,d0[1] - vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7] - vmlal.u32 q7,d28,d1[0] - it ne - addne r6,r6,#16 @ don't advance in last iteration - vmlal.u32 q8,d28,d1[1] - vmlal.u32 q9,d28,d2[0] - vmlal.u32 q10,d28,d2[1] - vmlal.u32 q11,d28,d3[0] - vmlal.u32 q12,d28,d3[1] - it eq - subeq r1,r1,r5,lsl#2 @ rewind - vmlal.u32 q13,d29,d4[0] - vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] - vmlal.u32 q6,d29,d4[1] - vld1.32 {d0,d1,d2,d3},[r1]! - vmlal.u32 q7,d29,d5[0] - add r10,sp,#8 @ rewind - vmlal.u32 q8,d29,d5[1] - vmlal.u32 q9,d29,d6[0] - vmlal.u32 q10,d29,d6[1] - vmlal.u32 q11,d29,d7[0] - vst1.64 {q13},[r7,:128]! - vmlal.u32 q12,d29,d7[1] - - bne .LNEON_8n_inner - add r6,sp,#128 - vst1.64 {q6,q7},[r7,:256]! - veor q2,q2,q2 @ d4-d5 - vst1.64 {q8,q9},[r7,:256]! - veor q3,q3,q3 @ d6-d7 - vst1.64 {q10,q11},[r7,:256]! - vst1.64 {q12},[r7,:128] - - subs r9,r9,#8 - vld1.64 {q6,q7},[r6,:256]! - vld1.64 {q8,q9},[r6,:256]! - vld1.64 {q10,q11},[r6,:256]! - vld1.64 {q12,q13},[r6,:256]! - - itt ne - subne r3,r3,r5,lsl#2 @ rewind - bne .LNEON_8n_outer - - add r7,sp,#128 - vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame - vshr.u64 d10,d12,#16 - vst1.64 {q2,q3},[sp,:256]! - vadd.u64 d13,d13,d10 - vst1.64 {q2,q3}, [sp,:256]! - vshr.u64 d10,d13,#16 - vst1.64 {q2,q3}, [sp,:256]! - vzip.16 d12,d13 - - mov r8,r5 - b .LNEON_tail_entry - -.align 4 -.LNEON_tail: - vadd.u64 d12,d12,d10 - vshr.u64 d10,d12,#16 - vld1.64 {q8,q9}, [r6, :256]! - vadd.u64 d13,d13,d10 - vld1.64 {q10,q11}, [r6, :256]! - vshr.u64 d10,d13,#16 - vld1.64 {q12,q13}, [r6, :256]! - vzip.16 d12,d13 - -.LNEON_tail_entry: - vadd.u64 d14,d14,d10 - vst1.32 {d12[0]}, [r7, :32]! - vshr.u64 d10,d14,#16 - vadd.u64 d15,d15,d10 - vshr.u64 d10,d15,#16 - vzip.16 d14,d15 - vadd.u64 d16,d16,d10 - vst1.32 {d14[0]}, [r7, :32]! - vshr.u64 d10,d16,#16 - vadd.u64 d17,d17,d10 - vshr.u64 d10,d17,#16 - vzip.16 d16,d17 - vadd.u64 d18,d18,d10 - vst1.32 {d16[0]}, [r7, :32]! - vshr.u64 d10,d18,#16 - vadd.u64 d19,d19,d10 - vshr.u64 d10,d19,#16 - vzip.16 d18,d19 - vadd.u64 d20,d20,d10 - vst1.32 {d18[0]}, [r7, :32]! - vshr.u64 d10,d20,#16 - vadd.u64 d21,d21,d10 - vshr.u64 d10,d21,#16 - vzip.16 d20,d21 - vadd.u64 d22,d22,d10 - vst1.32 {d20[0]}, [r7, :32]! - vshr.u64 d10,d22,#16 - vadd.u64 d23,d23,d10 - vshr.u64 d10,d23,#16 - vzip.16 d22,d23 - vadd.u64 d24,d24,d10 - vst1.32 {d22[0]}, [r7, :32]! - vshr.u64 d10,d24,#16 - vadd.u64 d25,d25,d10 - vshr.u64 d10,d25,#16 - vzip.16 d24,d25 - vadd.u64 d26,d26,d10 - vst1.32 {d24[0]}, [r7, :32]! - vshr.u64 d10,d26,#16 - vadd.u64 d27,d27,d10 - vshr.u64 d10,d27,#16 - vzip.16 d26,d27 - vld1.64 {q6,q7}, [r6, :256]! - subs r8,r8,#8 - vst1.32 {d26[0]}, [r7, :32]! - bne .LNEON_tail - - vst1.32 {d10[0]}, [r7, :32] @ top-most bit - sub r3,r3,r5,lsl#2 @ rewind r3 - subs r1,sp,#0 @ clear carry flag - add r2,sp,r5,lsl#2 - -.LNEON_sub: - ldmia r1!, {r4,r5,r6,r7} - ldmia r3!, {r8,r9,r10,r11} - sbcs r8, r4,r8 - sbcs r9, r5,r9 - sbcs r10,r6,r10 - sbcs r11,r7,r11 - teq r1,r2 @ preserves carry - stmia r0!, {r8,r9,r10,r11} - bne .LNEON_sub - - ldr r10, [r1] @ load top-most bit - mov r11,sp - veor q0,q0,q0 - sub r11,r2,r11 @ this is num*4 - veor q1,q1,q1 - mov r1,sp - sub r0,r0,r11 @ rewind r0 - mov r3,r2 @ second 3/4th of frame - sbcs r10,r10,#0 @ result is carry flag - -.LNEON_copy_n_zap: - ldmia r1!, {r4,r5,r6,r7} - ldmia r0, {r8,r9,r10,r11} - it cc - movcc r8, r4 - vst1.64 {q0,q1}, [r3,:256]! @ wipe - itt cc - movcc r9, r5 - movcc r10,r6 - vst1.64 {q0,q1}, [r3,:256]! @ wipe - it cc - movcc r11,r7 - ldmia r1, {r4,r5,r6,r7} - stmia r0!, {r8,r9,r10,r11} - sub r1,r1,#16 - ldmia r0, {r8,r9,r10,r11} - it cc - movcc r8, r4 - vst1.64 {q0,q1}, [r1,:256]! @ wipe - itt cc - movcc r9, r5 - movcc r10,r6 - vst1.64 {q0,q1}, [r3,:256]! @ wipe - it cc - movcc r11,r7 - teq r1,r2 @ preserves carry - stmia r0!, {r8,r9,r10,r11} - bne .LNEON_copy_n_zap - - mov sp,ip - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11} - bx lr @ bx lr -.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon -#endif -.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#if __ARM_MAX_ARCH__>=7 -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/bsaes-armv7.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/bsaes-armv7.S deleted file mode 100644 index 69a8fcacd0..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/bsaes-armv7.S +++ /dev/null @@ -1,1529 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel -@ of Linaro. Permission to use under GPL terms is granted. -@ ==================================================================== - -@ Bit-sliced AES for ARM NEON -@ -@ February 2012. -@ -@ This implementation is direct adaptation of bsaes-x86_64 module for -@ ARM NEON. Except that this module is endian-neutral [in sense that -@ it can be compiled for either endianness] by courtesy of vld1.8's -@ neutrality. Initial version doesn't implement interface to OpenSSL, -@ only low-level primitives and unsupported entry points, just enough -@ to collect performance results, which for Cortex-A8 core are: -@ -@ encrypt 19.5 cycles per byte processed with 128-bit key -@ decrypt 22.1 cycles per byte processed with 128-bit key -@ key conv. 440 cycles per 128-bit key/0.18 of 8x block -@ -@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, -@ which is [much] worse than anticipated (for further details see -@ http://www.openssl.org/~appro/Snapdragon-S4.html). -@ -@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code -@ manages in 20.0 cycles]. -@ -@ When comparing to x86_64 results keep in mind that NEON unit is -@ [mostly] single-issue and thus can't [fully] benefit from -@ instruction-level parallelism. And when comparing to aes-armv4 -@ results keep in mind key schedule conversion overhead (see -@ bsaes-x86_64.pl for further details)... -@ -@ - -@ April-August 2013 -@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard. - -#ifndef __KERNEL__ -# include - -# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} -# define VFP_ABI_POP vldmia sp!,{d8-d15} -# define VFP_ABI_FRAME 0x40 -#else -# define VFP_ABI_PUSH -# define VFP_ABI_POP -# define VFP_ABI_FRAME 0 -# define BSAES_ASM_EXTENDED_KEY -# define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -#endif - -#ifdef __thumb__ -# define adrl adr -#endif - -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.text -.syntax unified @ ARMv7-capable assembler is expected to handle this -#if defined(__thumb2__) && !defined(__APPLE__) -.thumb -#else -.code 32 -# undef __thumb2__ -#endif - -.type _bsaes_decrypt8,%function -.align 4 -_bsaes_decrypt8: - adr r6,. - vldmia r4!, {q9} @ round 0 key -#if defined(__thumb2__) || defined(__APPLE__) - adr r6,.LM0ISR -#else - add r6,r6,#.LM0ISR-_bsaes_decrypt8 -#endif - - vldmia r6!, {q8} @ .LM0ISR - veor q10, q0, q9 @ xor with round0 key - veor q11, q1, q9 - vtbl.8 d0, {q10}, d16 - vtbl.8 d1, {q10}, d17 - veor q12, q2, q9 - vtbl.8 d2, {q11}, d16 - vtbl.8 d3, {q11}, d17 - veor q13, q3, q9 - vtbl.8 d4, {q12}, d16 - vtbl.8 d5, {q12}, d17 - veor q14, q4, q9 - vtbl.8 d6, {q13}, d16 - vtbl.8 d7, {q13}, d17 - veor q15, q5, q9 - vtbl.8 d8, {q14}, d16 - vtbl.8 d9, {q14}, d17 - veor q10, q6, q9 - vtbl.8 d10, {q15}, d16 - vtbl.8 d11, {q15}, d17 - veor q11, q7, q9 - vtbl.8 d12, {q10}, d16 - vtbl.8 d13, {q10}, d17 - vtbl.8 d14, {q11}, d16 - vtbl.8 d15, {q11}, d17 - vmov.i8 q8,#0x55 @ compose .LBS0 - vmov.i8 q9,#0x33 @ compose .LBS1 - vshr.u64 q10, q6, #1 - vshr.u64 q11, q4, #1 - veor q10, q10, q7 - veor q11, q11, q5 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #1 - veor q5, q5, q11 - vshl.u64 q11, q11, #1 - veor q6, q6, q10 - veor q4, q4, q11 - vshr.u64 q10, q2, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q3 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q3, q3, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q2, q2, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose .LBS2 - vshr.u64 q10, q5, #2 - vshr.u64 q11, q4, #2 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q9 - vand q11, q11, q9 - veor q7, q7, q10 - vshl.u64 q10, q10, #2 - veor q6, q6, q11 - vshl.u64 q11, q11, #2 - veor q5, q5, q10 - veor q4, q4, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q3 - veor q11, q11, q2 - vand q10, q10, q9 - vand q11, q11, q9 - veor q3, q3, q10 - vshl.u64 q10, q10, #2 - veor q2, q2, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q3, #4 - vshr.u64 q11, q2, #4 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q6, q6, q11 - vshl.u64 q11, q11, #4 - veor q3, q3, q10 - veor q2, q2, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q5 - veor q11, q11, q4 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q4, q4, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - sub r5,r5,#1 - b .Ldec_sbox -.align 4 -.Ldec_loop: - vldmia r4!, {q8,q9,q10,q11} - veor q8, q8, q0 - veor q9, q9, q1 - vtbl.8 d0, {q8}, d24 - vtbl.8 d1, {q8}, d25 - vldmia r4!, {q8} - veor q10, q10, q2 - vtbl.8 d2, {q9}, d24 - vtbl.8 d3, {q9}, d25 - vldmia r4!, {q9} - veor q11, q11, q3 - vtbl.8 d4, {q10}, d24 - vtbl.8 d5, {q10}, d25 - vldmia r4!, {q10} - vtbl.8 d6, {q11}, d24 - vtbl.8 d7, {q11}, d25 - vldmia r4!, {q11} - veor q8, q8, q4 - veor q9, q9, q5 - vtbl.8 d8, {q8}, d24 - vtbl.8 d9, {q8}, d25 - veor q10, q10, q6 - vtbl.8 d10, {q9}, d24 - vtbl.8 d11, {q9}, d25 - veor q11, q11, q7 - vtbl.8 d12, {q10}, d24 - vtbl.8 d13, {q10}, d25 - vtbl.8 d14, {q11}, d24 - vtbl.8 d15, {q11}, d25 -.Ldec_sbox: - veor q1, q1, q4 - veor q3, q3, q4 - - veor q4, q4, q7 - veor q1, q1, q6 - veor q2, q2, q7 - veor q6, q6, q4 - - veor q0, q0, q1 - veor q2, q2, q5 - veor q7, q7, q6 - veor q3, q3, q0 - veor q5, q5, q0 - veor q1, q1, q3 - veor q11, q3, q0 - veor q10, q7, q4 - veor q9, q1, q6 - veor q13, q4, q0 - vmov q8, q10 - veor q12, q5, q2 - - vorr q10, q10, q9 - veor q15, q11, q8 - vand q14, q11, q12 - vorr q11, q11, q12 - veor q12, q12, q9 - vand q8, q8, q9 - veor q9, q6, q2 - vand q15, q15, q12 - vand q13, q13, q9 - veor q9, q3, q7 - veor q12, q1, q5 - veor q11, q11, q13 - veor q10, q10, q13 - vand q13, q9, q12 - vorr q9, q9, q12 - veor q11, q11, q15 - veor q8, q8, q13 - veor q10, q10, q14 - veor q9, q9, q15 - veor q8, q8, q14 - vand q12, q4, q6 - veor q9, q9, q14 - vand q13, q0, q2 - vand q14, q7, q1 - vorr q15, q3, q5 - veor q11, q11, q12 - veor q9, q9, q14 - veor q8, q8, q15 - veor q10, q10, q13 - - @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 - - @ new smaller inversion - - vand q14, q11, q9 - vmov q12, q8 - - veor q13, q10, q14 - veor q15, q8, q14 - veor q14, q8, q14 @ q14=q15 - - vbsl q13, q9, q8 - vbsl q15, q11, q10 - veor q11, q11, q10 - - vbsl q12, q13, q14 - vbsl q8, q14, q13 - - vand q14, q12, q15 - veor q9, q9, q8 - - veor q14, q14, q11 - veor q12, q5, q2 - veor q8, q1, q6 - veor q10, q15, q14 - vand q10, q10, q5 - veor q5, q5, q1 - vand q11, q1, q15 - vand q5, q5, q14 - veor q1, q11, q10 - veor q5, q5, q11 - veor q15, q15, q13 - veor q14, q14, q9 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q2 - veor q12, q12, q8 - veor q2, q2, q6 - vand q8, q8, q15 - vand q6, q6, q13 - vand q12, q12, q14 - vand q2, q2, q9 - veor q8, q8, q12 - veor q2, q2, q6 - veor q12, q12, q11 - veor q6, q6, q10 - veor q5, q5, q12 - veor q2, q2, q12 - veor q1, q1, q8 - veor q6, q6, q8 - - veor q12, q3, q0 - veor q8, q7, q4 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q0 - veor q12, q12, q8 - veor q0, q0, q4 - vand q8, q8, q15 - vand q4, q4, q13 - vand q12, q12, q14 - vand q0, q0, q9 - veor q8, q8, q12 - veor q0, q0, q4 - veor q12, q12, q11 - veor q4, q4, q10 - veor q15, q15, q13 - veor q14, q14, q9 - veor q10, q15, q14 - vand q10, q10, q3 - veor q3, q3, q7 - vand q11, q7, q15 - vand q3, q3, q14 - veor q7, q11, q10 - veor q3, q3, q11 - veor q3, q3, q12 - veor q0, q0, q12 - veor q7, q7, q8 - veor q4, q4, q8 - veor q1, q1, q7 - veor q6, q6, q5 - - veor q4, q4, q1 - veor q2, q2, q7 - veor q5, q5, q7 - veor q4, q4, q2 - veor q7, q7, q0 - veor q4, q4, q5 - veor q3, q3, q6 - veor q6, q6, q1 - veor q3, q3, q4 - - veor q4, q4, q0 - veor q7, q7, q3 - subs r5,r5,#1 - bcc .Ldec_done - @ multiplication by 0x05-0x00-0x04-0x00 - vext.8 q8, q0, q0, #8 - vext.8 q14, q3, q3, #8 - vext.8 q15, q5, q5, #8 - veor q8, q8, q0 - vext.8 q9, q1, q1, #8 - veor q14, q14, q3 - vext.8 q10, q6, q6, #8 - veor q15, q15, q5 - vext.8 q11, q4, q4, #8 - veor q9, q9, q1 - vext.8 q12, q2, q2, #8 - veor q10, q10, q6 - vext.8 q13, q7, q7, #8 - veor q11, q11, q4 - veor q12, q12, q2 - veor q13, q13, q7 - - veor q0, q0, q14 - veor q1, q1, q14 - veor q6, q6, q8 - veor q2, q2, q10 - veor q4, q4, q9 - veor q1, q1, q15 - veor q6, q6, q15 - veor q2, q2, q14 - veor q7, q7, q11 - veor q4, q4, q14 - veor q3, q3, q12 - veor q2, q2, q15 - veor q7, q7, q15 - veor q5, q5, q13 - vext.8 q8, q0, q0, #12 @ x0 <<< 32 - vext.8 q9, q1, q1, #12 - veor q0, q0, q8 @ x0 ^ (x0 <<< 32) - vext.8 q10, q6, q6, #12 - veor q1, q1, q9 - vext.8 q11, q4, q4, #12 - veor q6, q6, q10 - vext.8 q12, q2, q2, #12 - veor q4, q4, q11 - vext.8 q13, q7, q7, #12 - veor q2, q2, q12 - vext.8 q14, q3, q3, #12 - veor q7, q7, q13 - vext.8 q15, q5, q5, #12 - veor q3, q3, q14 - - veor q9, q9, q0 - veor q5, q5, q15 - vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) - veor q10, q10, q1 - veor q8, q8, q5 - veor q9, q9, q5 - vext.8 q1, q1, q1, #8 - veor q13, q13, q2 - veor q0, q0, q8 - veor q14, q14, q7 - veor q1, q1, q9 - vext.8 q8, q2, q2, #8 - veor q12, q12, q4 - vext.8 q9, q7, q7, #8 - veor q15, q15, q3 - vext.8 q2, q4, q4, #8 - veor q11, q11, q6 - vext.8 q7, q5, q5, #8 - veor q12, q12, q5 - vext.8 q4, q3, q3, #8 - veor q11, q11, q5 - vext.8 q3, q6, q6, #8 - veor q5, q9, q13 - veor q11, q11, q2 - veor q7, q7, q15 - veor q6, q4, q14 - veor q4, q8, q12 - veor q2, q3, q10 - vmov q3, q11 - @ vmov q5, q9 - vldmia r6, {q12} @ .LISR - ite eq @ Thumb2 thing, sanity check in ARM - addeq r6,r6,#0x10 - bne .Ldec_loop - vldmia r6, {q12} @ .LISRM0 - b .Ldec_loop -.align 4 -.Ldec_done: - vmov.i8 q8,#0x55 @ compose .LBS0 - vmov.i8 q9,#0x33 @ compose .LBS1 - vshr.u64 q10, q3, #1 - vshr.u64 q11, q2, #1 - veor q10, q10, q5 - veor q11, q11, q7 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #1 - veor q7, q7, q11 - vshl.u64 q11, q11, #1 - veor q3, q3, q10 - veor q2, q2, q11 - vshr.u64 q10, q6, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q4 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q4, q4, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q6, q6, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose .LBS2 - vshr.u64 q10, q7, #2 - vshr.u64 q11, q2, #2 - veor q10, q10, q5 - veor q11, q11, q3 - vand q10, q10, q9 - vand q11, q11, q9 - veor q5, q5, q10 - vshl.u64 q10, q10, #2 - veor q3, q3, q11 - vshl.u64 q11, q11, #2 - veor q7, q7, q10 - veor q2, q2, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q4 - veor q11, q11, q6 - vand q10, q10, q9 - vand q11, q11, q9 - veor q4, q4, q10 - vshl.u64 q10, q10, #2 - veor q6, q6, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q4, #4 - vshr.u64 q11, q6, #4 - veor q10, q10, q5 - veor q11, q11, q3 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q3, q3, q11 - vshl.u64 q11, q11, #4 - veor q4, q4, q10 - veor q6, q6, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q7 - veor q11, q11, q2 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q2, q2, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - vldmia r4, {q8} @ last round key - veor q6, q6, q8 - veor q4, q4, q8 - veor q2, q2, q8 - veor q7, q7, q8 - veor q3, q3, q8 - veor q5, q5, q8 - veor q0, q0, q8 - veor q1, q1, q8 - bx lr -.size _bsaes_decrypt8,.-_bsaes_decrypt8 - -.type _bsaes_const,%object -.align 6 -_bsaes_const: -.LM0ISR:@ InvShiftRows constants -.quad 0x0a0e0206070b0f03, 0x0004080c0d010509 -.LISR: -.quad 0x0504070602010003, 0x0f0e0d0c080b0a09 -.LISRM0: -.quad 0x01040b0e0205080f, 0x0306090c00070a0d -.LM0SR:@ ShiftRows constants -.quad 0x0a0e02060f03070b, 0x0004080c05090d01 -.LSR: -.quad 0x0504070600030201, 0x0f0e0d0c0a09080b -.LSRM0: -.quad 0x0304090e00050a0f, 0x01060b0c0207080d -.LM0: -.quad 0x02060a0e03070b0f, 0x0004080c0105090d -.LREVM0SR: -.quad 0x090d01050c000408, 0x03070b0f060a0e02 -.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 6 -.size _bsaes_const,.-_bsaes_const - -.type _bsaes_encrypt8,%function -.align 4 -_bsaes_encrypt8: - adr r6,. - vldmia r4!, {q9} @ round 0 key -#if defined(__thumb2__) || defined(__APPLE__) - adr r6,.LM0SR -#else - sub r6,r6,#_bsaes_encrypt8-.LM0SR -#endif - - vldmia r6!, {q8} @ .LM0SR -_bsaes_encrypt8_alt: - veor q10, q0, q9 @ xor with round0 key - veor q11, q1, q9 - vtbl.8 d0, {q10}, d16 - vtbl.8 d1, {q10}, d17 - veor q12, q2, q9 - vtbl.8 d2, {q11}, d16 - vtbl.8 d3, {q11}, d17 - veor q13, q3, q9 - vtbl.8 d4, {q12}, d16 - vtbl.8 d5, {q12}, d17 - veor q14, q4, q9 - vtbl.8 d6, {q13}, d16 - vtbl.8 d7, {q13}, d17 - veor q15, q5, q9 - vtbl.8 d8, {q14}, d16 - vtbl.8 d9, {q14}, d17 - veor q10, q6, q9 - vtbl.8 d10, {q15}, d16 - vtbl.8 d11, {q15}, d17 - veor q11, q7, q9 - vtbl.8 d12, {q10}, d16 - vtbl.8 d13, {q10}, d17 - vtbl.8 d14, {q11}, d16 - vtbl.8 d15, {q11}, d17 -_bsaes_encrypt8_bitslice: - vmov.i8 q8,#0x55 @ compose .LBS0 - vmov.i8 q9,#0x33 @ compose .LBS1 - vshr.u64 q10, q6, #1 - vshr.u64 q11, q4, #1 - veor q10, q10, q7 - veor q11, q11, q5 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #1 - veor q5, q5, q11 - vshl.u64 q11, q11, #1 - veor q6, q6, q10 - veor q4, q4, q11 - vshr.u64 q10, q2, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q3 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q3, q3, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q2, q2, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose .LBS2 - vshr.u64 q10, q5, #2 - vshr.u64 q11, q4, #2 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q9 - vand q11, q11, q9 - veor q7, q7, q10 - vshl.u64 q10, q10, #2 - veor q6, q6, q11 - vshl.u64 q11, q11, #2 - veor q5, q5, q10 - veor q4, q4, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q3 - veor q11, q11, q2 - vand q10, q10, q9 - vand q11, q11, q9 - veor q3, q3, q10 - vshl.u64 q10, q10, #2 - veor q2, q2, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q3, #4 - vshr.u64 q11, q2, #4 - veor q10, q10, q7 - veor q11, q11, q6 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q6, q6, q11 - vshl.u64 q11, q11, #4 - veor q3, q3, q10 - veor q2, q2, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q5 - veor q11, q11, q4 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q4, q4, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - sub r5,r5,#1 - b .Lenc_sbox -.align 4 -.Lenc_loop: - vldmia r4!, {q8,q9,q10,q11} - veor q8, q8, q0 - veor q9, q9, q1 - vtbl.8 d0, {q8}, d24 - vtbl.8 d1, {q8}, d25 - vldmia r4!, {q8} - veor q10, q10, q2 - vtbl.8 d2, {q9}, d24 - vtbl.8 d3, {q9}, d25 - vldmia r4!, {q9} - veor q11, q11, q3 - vtbl.8 d4, {q10}, d24 - vtbl.8 d5, {q10}, d25 - vldmia r4!, {q10} - vtbl.8 d6, {q11}, d24 - vtbl.8 d7, {q11}, d25 - vldmia r4!, {q11} - veor q8, q8, q4 - veor q9, q9, q5 - vtbl.8 d8, {q8}, d24 - vtbl.8 d9, {q8}, d25 - veor q10, q10, q6 - vtbl.8 d10, {q9}, d24 - vtbl.8 d11, {q9}, d25 - veor q11, q11, q7 - vtbl.8 d12, {q10}, d24 - vtbl.8 d13, {q10}, d25 - vtbl.8 d14, {q11}, d24 - vtbl.8 d15, {q11}, d25 -.Lenc_sbox: - veor q2, q2, q1 - veor q5, q5, q6 - veor q3, q3, q0 - veor q6, q6, q2 - veor q5, q5, q0 - - veor q6, q6, q3 - veor q3, q3, q7 - veor q7, q7, q5 - veor q3, q3, q4 - veor q4, q4, q5 - - veor q2, q2, q7 - veor q3, q3, q1 - veor q1, q1, q5 - veor q11, q7, q4 - veor q10, q1, q2 - veor q9, q5, q3 - veor q13, q2, q4 - vmov q8, q10 - veor q12, q6, q0 - - vorr q10, q10, q9 - veor q15, q11, q8 - vand q14, q11, q12 - vorr q11, q11, q12 - veor q12, q12, q9 - vand q8, q8, q9 - veor q9, q3, q0 - vand q15, q15, q12 - vand q13, q13, q9 - veor q9, q7, q1 - veor q12, q5, q6 - veor q11, q11, q13 - veor q10, q10, q13 - vand q13, q9, q12 - vorr q9, q9, q12 - veor q11, q11, q15 - veor q8, q8, q13 - veor q10, q10, q14 - veor q9, q9, q15 - veor q8, q8, q14 - vand q12, q2, q3 - veor q9, q9, q14 - vand q13, q4, q0 - vand q14, q1, q5 - vorr q15, q7, q6 - veor q11, q11, q12 - veor q9, q9, q14 - veor q8, q8, q15 - veor q10, q10, q13 - - @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 - - @ new smaller inversion - - vand q14, q11, q9 - vmov q12, q8 - - veor q13, q10, q14 - veor q15, q8, q14 - veor q14, q8, q14 @ q14=q15 - - vbsl q13, q9, q8 - vbsl q15, q11, q10 - veor q11, q11, q10 - - vbsl q12, q13, q14 - vbsl q8, q14, q13 - - vand q14, q12, q15 - veor q9, q9, q8 - - veor q14, q14, q11 - veor q12, q6, q0 - veor q8, q5, q3 - veor q10, q15, q14 - vand q10, q10, q6 - veor q6, q6, q5 - vand q11, q5, q15 - vand q6, q6, q14 - veor q5, q11, q10 - veor q6, q6, q11 - veor q15, q15, q13 - veor q14, q14, q9 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q0 - veor q12, q12, q8 - veor q0, q0, q3 - vand q8, q8, q15 - vand q3, q3, q13 - vand q12, q12, q14 - vand q0, q0, q9 - veor q8, q8, q12 - veor q0, q0, q3 - veor q12, q12, q11 - veor q3, q3, q10 - veor q6, q6, q12 - veor q0, q0, q12 - veor q5, q5, q8 - veor q3, q3, q8 - - veor q12, q7, q4 - veor q8, q1, q2 - veor q11, q15, q14 - veor q10, q13, q9 - vand q11, q11, q12 - vand q10, q10, q4 - veor q12, q12, q8 - veor q4, q4, q2 - vand q8, q8, q15 - vand q2, q2, q13 - vand q12, q12, q14 - vand q4, q4, q9 - veor q8, q8, q12 - veor q4, q4, q2 - veor q12, q12, q11 - veor q2, q2, q10 - veor q15, q15, q13 - veor q14, q14, q9 - veor q10, q15, q14 - vand q10, q10, q7 - veor q7, q7, q1 - vand q11, q1, q15 - vand q7, q7, q14 - veor q1, q11, q10 - veor q7, q7, q11 - veor q7, q7, q12 - veor q4, q4, q12 - veor q1, q1, q8 - veor q2, q2, q8 - veor q7, q7, q0 - veor q1, q1, q6 - veor q6, q6, q0 - veor q4, q4, q7 - veor q0, q0, q1 - - veor q1, q1, q5 - veor q5, q5, q2 - veor q2, q2, q3 - veor q3, q3, q5 - veor q4, q4, q5 - - veor q6, q6, q3 - subs r5,r5,#1 - bcc .Lenc_done - vext.8 q8, q0, q0, #12 @ x0 <<< 32 - vext.8 q9, q1, q1, #12 - veor q0, q0, q8 @ x0 ^ (x0 <<< 32) - vext.8 q10, q4, q4, #12 - veor q1, q1, q9 - vext.8 q11, q6, q6, #12 - veor q4, q4, q10 - vext.8 q12, q3, q3, #12 - veor q6, q6, q11 - vext.8 q13, q7, q7, #12 - veor q3, q3, q12 - vext.8 q14, q2, q2, #12 - veor q7, q7, q13 - vext.8 q15, q5, q5, #12 - veor q2, q2, q14 - - veor q9, q9, q0 - veor q5, q5, q15 - vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) - veor q10, q10, q1 - veor q8, q8, q5 - veor q9, q9, q5 - vext.8 q1, q1, q1, #8 - veor q13, q13, q3 - veor q0, q0, q8 - veor q14, q14, q7 - veor q1, q1, q9 - vext.8 q8, q3, q3, #8 - veor q12, q12, q6 - vext.8 q9, q7, q7, #8 - veor q15, q15, q2 - vext.8 q3, q6, q6, #8 - veor q11, q11, q4 - vext.8 q7, q5, q5, #8 - veor q12, q12, q5 - vext.8 q6, q2, q2, #8 - veor q11, q11, q5 - vext.8 q2, q4, q4, #8 - veor q5, q9, q13 - veor q4, q8, q12 - veor q3, q3, q11 - veor q7, q7, q15 - veor q6, q6, q14 - @ vmov q4, q8 - veor q2, q2, q10 - @ vmov q5, q9 - vldmia r6, {q12} @ .LSR - ite eq @ Thumb2 thing, samity check in ARM - addeq r6,r6,#0x10 - bne .Lenc_loop - vldmia r6, {q12} @ .LSRM0 - b .Lenc_loop -.align 4 -.Lenc_done: - vmov.i8 q8,#0x55 @ compose .LBS0 - vmov.i8 q9,#0x33 @ compose .LBS1 - vshr.u64 q10, q2, #1 - vshr.u64 q11, q3, #1 - veor q10, q10, q5 - veor q11, q11, q7 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #1 - veor q7, q7, q11 - vshl.u64 q11, q11, #1 - veor q2, q2, q10 - veor q3, q3, q11 - vshr.u64 q10, q4, #1 - vshr.u64 q11, q0, #1 - veor q10, q10, q6 - veor q11, q11, q1 - vand q10, q10, q8 - vand q11, q11, q8 - veor q6, q6, q10 - vshl.u64 q10, q10, #1 - veor q1, q1, q11 - vshl.u64 q11, q11, #1 - veor q4, q4, q10 - veor q0, q0, q11 - vmov.i8 q8,#0x0f @ compose .LBS2 - vshr.u64 q10, q7, #2 - vshr.u64 q11, q3, #2 - veor q10, q10, q5 - veor q11, q11, q2 - vand q10, q10, q9 - vand q11, q11, q9 - veor q5, q5, q10 - vshl.u64 q10, q10, #2 - veor q2, q2, q11 - vshl.u64 q11, q11, #2 - veor q7, q7, q10 - veor q3, q3, q11 - vshr.u64 q10, q1, #2 - vshr.u64 q11, q0, #2 - veor q10, q10, q6 - veor q11, q11, q4 - vand q10, q10, q9 - vand q11, q11, q9 - veor q6, q6, q10 - vshl.u64 q10, q10, #2 - veor q4, q4, q11 - vshl.u64 q11, q11, #2 - veor q1, q1, q10 - veor q0, q0, q11 - vshr.u64 q10, q6, #4 - vshr.u64 q11, q4, #4 - veor q10, q10, q5 - veor q11, q11, q2 - vand q10, q10, q8 - vand q11, q11, q8 - veor q5, q5, q10 - vshl.u64 q10, q10, #4 - veor q2, q2, q11 - vshl.u64 q11, q11, #4 - veor q6, q6, q10 - veor q4, q4, q11 - vshr.u64 q10, q1, #4 - vshr.u64 q11, q0, #4 - veor q10, q10, q7 - veor q11, q11, q3 - vand q10, q10, q8 - vand q11, q11, q8 - veor q7, q7, q10 - vshl.u64 q10, q10, #4 - veor q3, q3, q11 - vshl.u64 q11, q11, #4 - veor q1, q1, q10 - veor q0, q0, q11 - vldmia r4, {q8} @ last round key - veor q4, q4, q8 - veor q6, q6, q8 - veor q3, q3, q8 - veor q7, q7, q8 - veor q2, q2, q8 - veor q5, q5, q8 - veor q0, q0, q8 - veor q1, q1, q8 - bx lr -.size _bsaes_encrypt8,.-_bsaes_encrypt8 -.type _bsaes_key_convert,%function -.align 4 -_bsaes_key_convert: - adr r6,. - vld1.8 {q7}, [r4]! @ load round 0 key -#if defined(__thumb2__) || defined(__APPLE__) - adr r6,.LM0 -#else - sub r6,r6,#_bsaes_key_convert-.LM0 -#endif - vld1.8 {q15}, [r4]! @ load round 1 key - - vmov.i8 q8, #0x01 @ bit masks - vmov.i8 q9, #0x02 - vmov.i8 q10, #0x04 - vmov.i8 q11, #0x08 - vmov.i8 q12, #0x10 - vmov.i8 q13, #0x20 - vldmia r6, {q14} @ .LM0 - -#ifdef __ARMEL__ - vrev32.8 q7, q7 - vrev32.8 q15, q15 -#endif - sub r5,r5,#1 - vstmia r12!, {q7} @ save round 0 key - b .Lkey_loop - -.align 4 -.Lkey_loop: - vtbl.8 d14,{q15},d28 - vtbl.8 d15,{q15},d29 - vmov.i8 q6, #0x40 - vmov.i8 q15, #0x80 - - vtst.8 q0, q7, q8 - vtst.8 q1, q7, q9 - vtst.8 q2, q7, q10 - vtst.8 q3, q7, q11 - vtst.8 q4, q7, q12 - vtst.8 q5, q7, q13 - vtst.8 q6, q7, q6 - vtst.8 q7, q7, q15 - vld1.8 {q15}, [r4]! @ load next round key - vmvn q0, q0 @ "pnot" - vmvn q1, q1 - vmvn q5, q5 - vmvn q6, q6 -#ifdef __ARMEL__ - vrev32.8 q15, q15 -#endif - subs r5,r5,#1 - vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key - bne .Lkey_loop - - vmov.i8 q7,#0x63 @ compose .L63 - @ don't save last round key - bx lr -.size _bsaes_key_convert,.-_bsaes_key_convert -.globl bsaes_cbc_encrypt -.hidden bsaes_cbc_encrypt -.type bsaes_cbc_encrypt,%function -.align 5 -bsaes_cbc_encrypt: - @ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for - @ short inputs. We patch this out, using bsaes for all input sizes. - - @ it is up to the caller to make sure we are called with enc == 0 - - mov ip, sp - stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} - VFP_ABI_PUSH - ldr r8, [ip] @ IV is 1st arg on the stack - mov r2, r2, lsr#4 @ len in 16 byte blocks - sub sp, #0x10 @ scratch space to carry over the IV - mov r9, sp @ save sp - - ldr r10, [r3, #240] @ get # of rounds -#ifndef BSAES_ASM_EXTENDED_KEY - @ allocate the key schedule on the stack - sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key - add r12, #96 @ sifze of bit-slices key schedule - - @ populate the key schedule - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - mov sp, r12 @ sp is sp - bl _bsaes_key_convert - vldmia sp, {q6} - vstmia r12, {q15} @ save last round key - veor q7, q7, q6 @ fix up round 0 key - vstmia sp, {q7} -#else - ldr r12, [r3, #244] - eors r12, #1 - beq 0f - - @ populate the key schedule - str r12, [r3, #244] - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - add r12, r3, #248 @ pass key schedule - bl _bsaes_key_convert - add r4, r3, #248 - vldmia r4, {q6} - vstmia r12, {q15} @ save last round key - veor q7, q7, q6 @ fix up round 0 key - vstmia r4, {q7} - -.align 2 - -#endif - - vld1.8 {q15}, [r8] @ load IV - b .Lcbc_dec_loop - -.align 4 -.Lcbc_dec_loop: - subs r2, r2, #0x8 - bmi .Lcbc_dec_loop_finish - - vld1.8 {q0,q1}, [r0]! @ load input - vld1.8 {q2,q3}, [r0]! -#ifndef BSAES_ASM_EXTENDED_KEY - mov r4, sp @ pass the key -#else - add r4, r3, #248 -#endif - vld1.8 {q4,q5}, [r0]! - mov r5, r10 - vld1.8 {q6,q7}, [r0] - sub r0, r0, #0x60 - vstmia r9, {q15} @ put aside IV - - bl _bsaes_decrypt8 - - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q12,q13}, [r0]! - veor q4, q4, q10 - veor q2, q2, q11 - vld1.8 {q14,q15}, [r0]! - veor q7, q7, q12 - vst1.8 {q0,q1}, [r1]! @ write output - veor q3, q3, q13 - vst1.8 {q6}, [r1]! - veor q5, q5, q14 - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - vst1.8 {q7}, [r1]! - vst1.8 {q3}, [r1]! - vst1.8 {q5}, [r1]! - - b .Lcbc_dec_loop - -.Lcbc_dec_loop_finish: - adds r2, r2, #8 - beq .Lcbc_dec_done - - @ Set up most parameters for the _bsaes_decrypt8 call. -#ifndef BSAES_ASM_EXTENDED_KEY - mov r4, sp @ pass the key -#else - add r4, r3, #248 -#endif - mov r5, r10 - vstmia r9, {q15} @ put aside IV - - vld1.8 {q0}, [r0]! @ load input - cmp r2, #2 - blo .Lcbc_dec_one - vld1.8 {q1}, [r0]! - beq .Lcbc_dec_two - vld1.8 {q2}, [r0]! - cmp r2, #4 - blo .Lcbc_dec_three - vld1.8 {q3}, [r0]! - beq .Lcbc_dec_four - vld1.8 {q4}, [r0]! - cmp r2, #6 - blo .Lcbc_dec_five - vld1.8 {q5}, [r0]! - beq .Lcbc_dec_six - vld1.8 {q6}, [r0]! - sub r0, r0, #0x70 - - bl _bsaes_decrypt8 - - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q12,q13}, [r0]! - veor q4, q4, q10 - veor q2, q2, q11 - vld1.8 {q15}, [r0]! - veor q7, q7, q12 - vst1.8 {q0,q1}, [r1]! @ write output - veor q3, q3, q13 - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - vst1.8 {q7}, [r1]! - vst1.8 {q3}, [r1]! - b .Lcbc_dec_done -.align 4 -.Lcbc_dec_six: - sub r0, r0, #0x60 - bl _bsaes_decrypt8 - vldmia r9,{q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q12}, [r0]! - veor q4, q4, q10 - veor q2, q2, q11 - vld1.8 {q15}, [r0]! - veor q7, q7, q12 - vst1.8 {q0,q1}, [r1]! @ write output - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - vst1.8 {q7}, [r1]! - b .Lcbc_dec_done -.align 4 -.Lcbc_dec_five: - sub r0, r0, #0x50 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10,q11}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q15}, [r0]! - veor q4, q4, q10 - vst1.8 {q0,q1}, [r1]! @ write output - veor q2, q2, q11 - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - vst1.8 {q2}, [r1]! - b .Lcbc_dec_done -.align 4 -.Lcbc_dec_four: - sub r0, r0, #0x40 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q10}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vld1.8 {q15}, [r0]! - veor q4, q4, q10 - vst1.8 {q0,q1}, [r1]! @ write output - vst1.8 {q6}, [r1]! - vst1.8 {q4}, [r1]! - b .Lcbc_dec_done -.align 4 -.Lcbc_dec_three: - sub r0, r0, #0x30 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8,q9}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q15}, [r0]! - veor q1, q1, q8 - veor q6, q6, q9 - vst1.8 {q0,q1}, [r1]! @ write output - vst1.8 {q6}, [r1]! - b .Lcbc_dec_done -.align 4 -.Lcbc_dec_two: - sub r0, r0, #0x20 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q8}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vld1.8 {q15}, [r0]! @ reload input - veor q1, q1, q8 - vst1.8 {q0,q1}, [r1]! @ write output - b .Lcbc_dec_done -.align 4 -.Lcbc_dec_one: - sub r0, r0, #0x10 - bl _bsaes_decrypt8 - vldmia r9, {q14} @ reload IV - vld1.8 {q15}, [r0]! @ reload input - veor q0, q0, q14 @ ^= IV - vst1.8 {q0}, [r1]! @ write output - -.Lcbc_dec_done: -#ifndef BSAES_ASM_EXTENDED_KEY - vmov.i32 q0, #0 - vmov.i32 q1, #0 -.Lcbc_dec_bzero:@ wipe key schedule [if any] - vstmia sp!, {q0,q1} - cmp sp, r9 - bne .Lcbc_dec_bzero -#endif - - mov sp, r9 - add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb - vst1.8 {q15}, [r8] @ return IV - VFP_ABI_POP - ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} -.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt -.globl bsaes_ctr32_encrypt_blocks -.hidden bsaes_ctr32_encrypt_blocks -.type bsaes_ctr32_encrypt_blocks,%function -.align 5 -bsaes_ctr32_encrypt_blocks: - @ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this - @ out to retain a constant-time implementation. - mov ip, sp - stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} - VFP_ABI_PUSH - ldr r8, [ip] @ ctr is 1st arg on the stack - sub sp, sp, #0x10 @ scratch space to carry over the ctr - mov r9, sp @ save sp - - ldr r10, [r3, #240] @ get # of rounds -#ifndef BSAES_ASM_EXTENDED_KEY - @ allocate the key schedule on the stack - sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key - add r12, #96 @ size of bit-sliced key schedule - - @ populate the key schedule - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - mov sp, r12 @ sp is sp - bl _bsaes_key_convert - veor q7,q7,q15 @ fix up last round key - vstmia r12, {q7} @ save last round key - - vld1.8 {q0}, [r8] @ load counter -#ifdef __APPLE__ - mov r8, #:lower16:(.LREVM0SR-.LM0) - add r8, r6, r8 -#else - add r8, r6, #.LREVM0SR-.LM0 @ borrow r8 -#endif - vldmia sp, {q4} @ load round0 key -#else - ldr r12, [r3, #244] - eors r12, #1 - beq 0f - - @ populate the key schedule - str r12, [r3, #244] - mov r4, r3 @ pass key - mov r5, r10 @ pass # of rounds - add r12, r3, #248 @ pass key schedule - bl _bsaes_key_convert - veor q7,q7,q15 @ fix up last round key - vstmia r12, {q7} @ save last round key - -.align 2 - add r12, r3, #248 - vld1.8 {q0}, [r8] @ load counter - adrl r8, .LREVM0SR @ borrow r8 - vldmia r12, {q4} @ load round0 key - sub sp, #0x10 @ place for adjusted round0 key -#endif - - vmov.i32 q8,#1 @ compose 1<<96 - veor q9,q9,q9 - vrev32.8 q0,q0 - vext.8 q8,q9,q8,#4 - vrev32.8 q4,q4 - vadd.u32 q9,q8,q8 @ compose 2<<96 - vstmia sp, {q4} @ save adjusted round0 key - b .Lctr_enc_loop - -.align 4 -.Lctr_enc_loop: - vadd.u32 q10, q8, q9 @ compose 3<<96 - vadd.u32 q1, q0, q8 @ +1 - vadd.u32 q2, q0, q9 @ +2 - vadd.u32 q3, q0, q10 @ +3 - vadd.u32 q4, q1, q10 - vadd.u32 q5, q2, q10 - vadd.u32 q6, q3, q10 - vadd.u32 q7, q4, q10 - vadd.u32 q10, q5, q10 @ next counter - - @ Borrow prologue from _bsaes_encrypt8 to use the opportunity - @ to flip byte order in 32-bit counter - - vldmia sp, {q9} @ load round0 key -#ifndef BSAES_ASM_EXTENDED_KEY - add r4, sp, #0x10 @ pass next round key -#else - add r4, r3, #264 -#endif - vldmia r8, {q8} @ .LREVM0SR - mov r5, r10 @ pass rounds - vstmia r9, {q10} @ save next counter -#ifdef __APPLE__ - mov r6, #:lower16:(.LREVM0SR-.LSR) - sub r6, r8, r6 -#else - sub r6, r8, #.LREVM0SR-.LSR @ pass constants -#endif - - bl _bsaes_encrypt8_alt - - subs r2, r2, #8 - blo .Lctr_enc_loop_done - - vld1.8 {q8,q9}, [r0]! @ load input - vld1.8 {q10,q11}, [r0]! - veor q0, q8 - veor q1, q9 - vld1.8 {q12,q13}, [r0]! - veor q4, q10 - veor q6, q11 - vld1.8 {q14,q15}, [r0]! - veor q3, q12 - vst1.8 {q0,q1}, [r1]! @ write output - veor q7, q13 - veor q2, q14 - vst1.8 {q4}, [r1]! - veor q5, q15 - vst1.8 {q6}, [r1]! - vmov.i32 q8, #1 @ compose 1<<96 - vst1.8 {q3}, [r1]! - veor q9, q9, q9 - vst1.8 {q7}, [r1]! - vext.8 q8, q9, q8, #4 - vst1.8 {q2}, [r1]! - vadd.u32 q9,q8,q8 @ compose 2<<96 - vst1.8 {q5}, [r1]! - vldmia r9, {q0} @ load counter - - bne .Lctr_enc_loop - b .Lctr_enc_done - -.align 4 -.Lctr_enc_loop_done: - add r2, r2, #8 - vld1.8 {q8}, [r0]! @ load input - veor q0, q8 - vst1.8 {q0}, [r1]! @ write output - cmp r2, #2 - blo .Lctr_enc_done - vld1.8 {q9}, [r0]! - veor q1, q9 - vst1.8 {q1}, [r1]! - beq .Lctr_enc_done - vld1.8 {q10}, [r0]! - veor q4, q10 - vst1.8 {q4}, [r1]! - cmp r2, #4 - blo .Lctr_enc_done - vld1.8 {q11}, [r0]! - veor q6, q11 - vst1.8 {q6}, [r1]! - beq .Lctr_enc_done - vld1.8 {q12}, [r0]! - veor q3, q12 - vst1.8 {q3}, [r1]! - cmp r2, #6 - blo .Lctr_enc_done - vld1.8 {q13}, [r0]! - veor q7, q13 - vst1.8 {q7}, [r1]! - beq .Lctr_enc_done - vld1.8 {q14}, [r0] - veor q2, q14 - vst1.8 {q2}, [r1]! - -.Lctr_enc_done: - vmov.i32 q0, #0 - vmov.i32 q1, #0 -#ifndef BSAES_ASM_EXTENDED_KEY -.Lctr_enc_bzero:@ wipe key schedule [if any] - vstmia sp!, {q0,q1} - cmp sp, r9 - bne .Lctr_enc_bzero -#else - vstmia sp, {q0,q1} -#endif - - mov sp, r9 - add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb - VFP_ABI_POP - ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return - - @ OpenSSL contains aes_nohw_* fallback code here. We patch this - @ out to retain a constant-time implementation. -.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/ghash-armv4.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/ghash-armv4.S deleted file mode 100644 index 42cce5831f..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/ghash-armv4.S +++ /dev/null @@ -1,591 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL -@ instructions are in aesv8-armx.pl.) -.arch armv7-a - -.text -#if defined(__thumb2__) || defined(__clang__) -.syntax unified -#define ldrplb ldrbpl -#define ldrneb ldrbne -#endif -#if defined(__thumb2__) -.thumb -#else -.code 32 -#endif - -.type rem_4bit,%object -.align 5 -rem_4bit: -.short 0x0000,0x1C20,0x3840,0x2460 -.short 0x7080,0x6CA0,0x48C0,0x54E0 -.short 0xE100,0xFD20,0xD940,0xC560 -.short 0x9180,0x8DA0,0xA9C0,0xB5E0 -.size rem_4bit,.-rem_4bit - -.type rem_4bit_get,%function -rem_4bit_get: -#if defined(__thumb2__) - adr r2,rem_4bit -#else - sub r2,pc,#8+32 @ &rem_4bit -#endif - b .Lrem_4bit_got - nop - nop -.size rem_4bit_get,.-rem_4bit_get - -.globl gcm_ghash_4bit -.hidden gcm_ghash_4bit -.type gcm_ghash_4bit,%function -.align 4 -gcm_ghash_4bit: -#if defined(__thumb2__) - adr r12,rem_4bit -#else - sub r12,pc,#8+48 @ &rem_4bit -#endif - add r3,r2,r3 @ r3 to point at the end - stmdb sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ save r3/end too - - ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ... - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ ... to stack - - ldrb r12,[r2,#15] - ldrb r14,[r0,#15] -.Louter: - eor r12,r12,r14 - and r14,r12,#0xf0 - and r12,r12,#0x0f - mov r3,#14 - - add r7,r1,r12,lsl#4 - ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo] - add r11,r1,r14 - ldrb r12,[r2,#14] - - and r14,r4,#0xf @ rem - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - add r14,r14,r14 - eor r4,r8,r4,lsr#4 - ldrh r8,[sp,r14] @ rem_4bit[rem] - eor r4,r4,r5,lsl#28 - ldrb r14,[r0,#14] - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - eor r12,r12,r14 - and r14,r12,#0xf0 - and r12,r12,#0x0f - eor r7,r7,r8,lsl#16 - -.Linner: - add r11,r1,r12,lsl#4 - and r12,r4,#0xf @ rem - subs r3,r3,#1 - add r12,r12,r12 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo] - eor r4,r8,r4,lsr#4 - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - ldrh r8,[sp,r12] @ rem_4bit[rem] - eor r6,r10,r6,lsr#4 -#ifdef __thumb2__ - it pl -#endif - ldrplb r12,[r2,r3] - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - - add r11,r1,r14 - and r14,r4,#0xf @ rem - eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] - add r14,r14,r14 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - eor r4,r8,r4,lsr#4 -#ifdef __thumb2__ - it pl -#endif - ldrplb r8,[r0,r3] - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - ldrh r9,[sp,r14] - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 -#ifdef __thumb2__ - it pl -#endif - eorpl r12,r12,r8 - eor r7,r11,r7,lsr#4 -#ifdef __thumb2__ - itt pl -#endif - andpl r14,r12,#0xf0 - andpl r12,r12,#0x0f - eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem] - bpl .Linner - - ldr r3,[sp,#32] @ re-load r3/end - add r2,r2,#16 - mov r14,r4 -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r4,r4 - str r4,[r0,#12] -#elif defined(__ARMEB__) - str r4,[r0,#12] -#else - mov r9,r4,lsr#8 - strb r4,[r0,#12+3] - mov r10,r4,lsr#16 - strb r9,[r0,#12+2] - mov r11,r4,lsr#24 - strb r10,[r0,#12+1] - strb r11,[r0,#12] -#endif - cmp r2,r3 -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r5,r5 - str r5,[r0,#8] -#elif defined(__ARMEB__) - str r5,[r0,#8] -#else - mov r9,r5,lsr#8 - strb r5,[r0,#8+3] - mov r10,r5,lsr#16 - strb r9,[r0,#8+2] - mov r11,r5,lsr#24 - strb r10,[r0,#8+1] - strb r11,[r0,#8] -#endif - -#ifdef __thumb2__ - it ne -#endif - ldrneb r12,[r2,#15] -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r6,r6 - str r6,[r0,#4] -#elif defined(__ARMEB__) - str r6,[r0,#4] -#else - mov r9,r6,lsr#8 - strb r6,[r0,#4+3] - mov r10,r6,lsr#16 - strb r9,[r0,#4+2] - mov r11,r6,lsr#24 - strb r10,[r0,#4+1] - strb r11,[r0,#4] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r7,r7 - str r7,[r0,#0] -#elif defined(__ARMEB__) - str r7,[r0,#0] -#else - mov r9,r7,lsr#8 - strb r7,[r0,#0+3] - mov r10,r7,lsr#16 - strb r9,[r0,#0+2] - mov r11,r7,lsr#24 - strb r10,[r0,#0+1] - strb r11,[r0,#0] -#endif - - bne .Louter - - add sp,sp,#36 -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size gcm_ghash_4bit,.-gcm_ghash_4bit - -.globl gcm_gmult_4bit -.hidden gcm_gmult_4bit -.type gcm_gmult_4bit,%function -gcm_gmult_4bit: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - ldrb r12,[r0,#15] - b rem_4bit_get -.Lrem_4bit_got: - and r14,r12,#0xf0 - and r12,r12,#0x0f - mov r3,#14 - - add r7,r1,r12,lsl#4 - ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo] - ldrb r12,[r0,#14] - - add r11,r1,r14 - and r14,r4,#0xf @ rem - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - add r14,r14,r14 - eor r4,r8,r4,lsr#4 - ldrh r8,[r2,r14] @ rem_4bit[rem] - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - and r14,r12,#0xf0 - eor r7,r7,r8,lsl#16 - and r12,r12,#0x0f - -.Loop: - add r11,r1,r12,lsl#4 - and r12,r4,#0xf @ rem - subs r3,r3,#1 - add r12,r12,r12 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo] - eor r4,r8,r4,lsr#4 - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - eor r5,r5,r6,lsl#28 - ldrh r8,[r2,r12] @ rem_4bit[rem] - eor r6,r10,r6,lsr#4 -#ifdef __thumb2__ - it pl -#endif - ldrplb r12,[r0,r3] - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 - - add r11,r1,r14 - and r14,r4,#0xf @ rem - eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] - add r14,r14,r14 - ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] - eor r4,r8,r4,lsr#4 - eor r4,r4,r5,lsl#28 - eor r5,r9,r5,lsr#4 - ldrh r8,[r2,r14] @ rem_4bit[rem] - eor r5,r5,r6,lsl#28 - eor r6,r10,r6,lsr#4 - eor r6,r6,r7,lsl#28 - eor r7,r11,r7,lsr#4 -#ifdef __thumb2__ - itt pl -#endif - andpl r14,r12,#0xf0 - andpl r12,r12,#0x0f - eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] - bpl .Loop -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r4,r4 - str r4,[r0,#12] -#elif defined(__ARMEB__) - str r4,[r0,#12] -#else - mov r9,r4,lsr#8 - strb r4,[r0,#12+3] - mov r10,r4,lsr#16 - strb r9,[r0,#12+2] - mov r11,r4,lsr#24 - strb r10,[r0,#12+1] - strb r11,[r0,#12] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r5,r5 - str r5,[r0,#8] -#elif defined(__ARMEB__) - str r5,[r0,#8] -#else - mov r9,r5,lsr#8 - strb r5,[r0,#8+3] - mov r10,r5,lsr#16 - strb r9,[r0,#8+2] - mov r11,r5,lsr#24 - strb r10,[r0,#8+1] - strb r11,[r0,#8] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r6,r6 - str r6,[r0,#4] -#elif defined(__ARMEB__) - str r6,[r0,#4] -#else - mov r9,r6,lsr#8 - strb r6,[r0,#4+3] - mov r10,r6,lsr#16 - strb r9,[r0,#4+2] - mov r11,r6,lsr#24 - strb r10,[r0,#4+1] - strb r11,[r0,#4] -#endif - -#if __ARM_ARCH__>=7 && defined(__ARMEL__) - rev r7,r7 - str r7,[r0,#0] -#elif defined(__ARMEB__) - str r7,[r0,#0] -#else - mov r9,r7,lsr#8 - strb r7,[r0,#0+3] - mov r10,r7,lsr#16 - strb r9,[r0,#0+2] - mov r11,r7,lsr#24 - strb r10,[r0,#0+1] - strb r11,[r0,#0] -#endif - -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size gcm_gmult_4bit,.-gcm_gmult_4bit -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.globl gcm_init_neon -.hidden gcm_init_neon -.type gcm_init_neon,%function -.align 4 -gcm_init_neon: - vld1.64 d7,[r1]! @ load H - vmov.i8 q8,#0xe1 - vld1.64 d6,[r1] - vshl.i64 d17,#57 - vshr.u64 d16,#63 @ t0=0xc2....01 - vdup.8 q9,d7[7] - vshr.u64 d26,d6,#63 - vshr.s8 q9,#7 @ broadcast carry bit - vshl.i64 q3,q3,#1 - vand q8,q8,q9 - vorr d7,d26 @ H<<<=1 - veor q3,q3,q8 @ twisted H - vstmia r0,{q3} - - bx lr @ bx lr -.size gcm_init_neon,.-gcm_init_neon - -.globl gcm_gmult_neon -.hidden gcm_gmult_neon -.type gcm_gmult_neon,%function -.align 4 -gcm_gmult_neon: - vld1.64 d7,[r0]! @ load Xi - vld1.64 d6,[r0]! - vmov.i64 d29,#0x0000ffffffffffff - vldmia r1,{d26,d27} @ load twisted H - vmov.i64 d30,#0x00000000ffffffff -#ifdef __ARMEL__ - vrev64.8 q3,q3 -#endif - vmov.i64 d31,#0x000000000000ffff - veor d28,d26,d27 @ Karatsuba pre-processing - mov r3,#16 - b .Lgmult_neon -.size gcm_gmult_neon,.-gcm_gmult_neon - -.globl gcm_ghash_neon -.hidden gcm_ghash_neon -.type gcm_ghash_neon,%function -.align 4 -gcm_ghash_neon: - vld1.64 d1,[r0]! @ load Xi - vld1.64 d0,[r0]! - vmov.i64 d29,#0x0000ffffffffffff - vldmia r1,{d26,d27} @ load twisted H - vmov.i64 d30,#0x00000000ffffffff -#ifdef __ARMEL__ - vrev64.8 q0,q0 -#endif - vmov.i64 d31,#0x000000000000ffff - veor d28,d26,d27 @ Karatsuba pre-processing - -.Loop_neon: - vld1.64 d7,[r2]! @ load inp - vld1.64 d6,[r2]! -#ifdef __ARMEL__ - vrev64.8 q3,q3 -#endif - veor q3,q0 @ inp^=Xi -.Lgmult_neon: - vext.8 d16, d26, d26, #1 @ A1 - vmull.p8 q8, d16, d6 @ F = A1*B - vext.8 d0, d6, d6, #1 @ B1 - vmull.p8 q0, d26, d0 @ E = A*B1 - vext.8 d18, d26, d26, #2 @ A2 - vmull.p8 q9, d18, d6 @ H = A2*B - vext.8 d22, d6, d6, #2 @ B2 - vmull.p8 q11, d26, d22 @ G = A*B2 - vext.8 d20, d26, d26, #3 @ A3 - veor q8, q8, q0 @ L = E + F - vmull.p8 q10, d20, d6 @ J = A3*B - vext.8 d0, d6, d6, #3 @ B3 - veor q9, q9, q11 @ M = G + H - vmull.p8 q0, d26, d0 @ I = A*B3 - veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 - vand d17, d17, d29 - vext.8 d22, d6, d6, #4 @ B4 - veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 - vand d19, d19, d30 - vmull.p8 q11, d26, d22 @ K = A*B4 - veor q10, q10, q0 @ N = I + J - veor d16, d16, d17 - veor d18, d18, d19 - veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 - vand d21, d21, d31 - vext.8 q8, q8, q8, #15 - veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 - vmov.i64 d23, #0 - vext.8 q9, q9, q9, #14 - veor d20, d20, d21 - vmull.p8 q0, d26, d6 @ D = A*B - vext.8 q11, q11, q11, #12 - vext.8 q10, q10, q10, #13 - veor q8, q8, q9 - veor q10, q10, q11 - veor q0, q0, q8 - veor q0, q0, q10 - veor d6,d6,d7 @ Karatsuba pre-processing - vext.8 d16, d28, d28, #1 @ A1 - vmull.p8 q8, d16, d6 @ F = A1*B - vext.8 d2, d6, d6, #1 @ B1 - vmull.p8 q1, d28, d2 @ E = A*B1 - vext.8 d18, d28, d28, #2 @ A2 - vmull.p8 q9, d18, d6 @ H = A2*B - vext.8 d22, d6, d6, #2 @ B2 - vmull.p8 q11, d28, d22 @ G = A*B2 - vext.8 d20, d28, d28, #3 @ A3 - veor q8, q8, q1 @ L = E + F - vmull.p8 q10, d20, d6 @ J = A3*B - vext.8 d2, d6, d6, #3 @ B3 - veor q9, q9, q11 @ M = G + H - vmull.p8 q1, d28, d2 @ I = A*B3 - veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 - vand d17, d17, d29 - vext.8 d22, d6, d6, #4 @ B4 - veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 - vand d19, d19, d30 - vmull.p8 q11, d28, d22 @ K = A*B4 - veor q10, q10, q1 @ N = I + J - veor d16, d16, d17 - veor d18, d18, d19 - veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 - vand d21, d21, d31 - vext.8 q8, q8, q8, #15 - veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 - vmov.i64 d23, #0 - vext.8 q9, q9, q9, #14 - veor d20, d20, d21 - vmull.p8 q1, d28, d6 @ D = A*B - vext.8 q11, q11, q11, #12 - vext.8 q10, q10, q10, #13 - veor q8, q8, q9 - veor q10, q10, q11 - veor q1, q1, q8 - veor q1, q1, q10 - vext.8 d16, d27, d27, #1 @ A1 - vmull.p8 q8, d16, d7 @ F = A1*B - vext.8 d4, d7, d7, #1 @ B1 - vmull.p8 q2, d27, d4 @ E = A*B1 - vext.8 d18, d27, d27, #2 @ A2 - vmull.p8 q9, d18, d7 @ H = A2*B - vext.8 d22, d7, d7, #2 @ B2 - vmull.p8 q11, d27, d22 @ G = A*B2 - vext.8 d20, d27, d27, #3 @ A3 - veor q8, q8, q2 @ L = E + F - vmull.p8 q10, d20, d7 @ J = A3*B - vext.8 d4, d7, d7, #3 @ B3 - veor q9, q9, q11 @ M = G + H - vmull.p8 q2, d27, d4 @ I = A*B3 - veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 - vand d17, d17, d29 - vext.8 d22, d7, d7, #4 @ B4 - veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 - vand d19, d19, d30 - vmull.p8 q11, d27, d22 @ K = A*B4 - veor q10, q10, q2 @ N = I + J - veor d16, d16, d17 - veor d18, d18, d19 - veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 - vand d21, d21, d31 - vext.8 q8, q8, q8, #15 - veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 - vmov.i64 d23, #0 - vext.8 q9, q9, q9, #14 - veor d20, d20, d21 - vmull.p8 q2, d27, d7 @ D = A*B - vext.8 q11, q11, q11, #12 - vext.8 q10, q10, q10, #13 - veor q8, q8, q9 - veor q10, q10, q11 - veor q2, q2, q8 - veor q2, q2, q10 - veor q1,q1,q0 @ Karatsuba post-processing - veor q1,q1,q2 - veor d1,d1,d2 - veor d4,d4,d3 @ Xh|Xl - 256-bit result - - @ equivalent of reduction_avx from ghash-x86_64.pl - vshl.i64 q9,q0,#57 @ 1st phase - vshl.i64 q10,q0,#62 - veor q10,q10,q9 @ - vshl.i64 q9,q0,#63 - veor q10, q10, q9 @ - veor d1,d1,d20 @ - veor d4,d4,d21 - - vshr.u64 q10,q0,#1 @ 2nd phase - veor q2,q2,q0 - veor q0,q0,q10 @ - vshr.u64 q10,q10,#6 - vshr.u64 q0,q0,#1 @ - veor q0,q0,q2 @ - veor q0,q0,q10 @ - - subs r3,#16 - bne .Loop_neon - -#ifdef __ARMEL__ - vrev64.8 q0,q0 -#endif - sub r0,#16 - vst1.64 d1,[r0]! @ write out Xi - vst1.64 d0,[r0] - - bx lr @ bx lr -.size gcm_ghash_neon,.-gcm_ghash_neon -#endif -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/ghashv8-armx32.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/ghashv8-armx32.S deleted file mode 100644 index d6842945f0..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/ghashv8-armx32.S +++ /dev/null @@ -1,253 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text -.fpu neon -.code 32 -#undef __thumb2__ -.globl gcm_init_v8 -.hidden gcm_init_v8 -.type gcm_init_v8,%function -.align 4 -gcm_init_v8: - vld1.64 {q9},[r1] @ load input H - vmov.i8 q11,#0xe1 - vshl.i64 q11,q11,#57 @ 0xc2.0 - vext.8 q3,q9,q9,#8 - vshr.u64 q10,q11,#63 - vdup.32 q9,d18[1] - vext.8 q8,q10,q11,#8 @ t0=0xc2....01 - vshr.u64 q10,q3,#63 - vshr.s32 q9,q9,#31 @ broadcast carry bit - vand q10,q10,q8 - vshl.i64 q3,q3,#1 - vext.8 q10,q10,q10,#8 - vand q8,q8,q9 - vorr q3,q3,q10 @ H<<<=1 - veor q12,q3,q8 @ twisted H - vst1.64 {q12},[r0]! @ store Htable[0] - - @ calculate H^2 - vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing -.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12 - veor q8,q8,q12 -.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12 -.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8 - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase - - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - veor q0,q1,q10 - - vext.8 q10,q0,q0,#8 @ 2nd phase -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q10,q10,q2 - veor q14,q0,q10 - - vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing - veor q9,q9,q14 - vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed - vst1.64 {q13,q14},[r0] @ store Htable[1..2] - - bx lr -.size gcm_init_v8,.-gcm_init_v8 -.globl gcm_gmult_v8 -.hidden gcm_gmult_v8 -.type gcm_gmult_v8,%function -.align 4 -gcm_gmult_v8: - vld1.64 {q9},[r0] @ load Xi - vmov.i8 q11,#0xe1 - vld1.64 {q12,q13},[r1] @ load twisted H, ... - vshl.u64 q11,q11,#57 -#ifndef __ARMEB__ - vrev64.8 q9,q9 -#endif - vext.8 q3,q9,q9,#8 - -.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo - veor q9,q9,q3 @ Karatsuba pre-processing -.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi -.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction - - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - veor q0,q1,q10 - - vext.8 q10,q0,q0,#8 @ 2nd phase of reduction -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q10,q10,q2 - veor q0,q0,q10 - -#ifndef __ARMEB__ - vrev64.8 q0,q0 -#endif - vext.8 q0,q0,q0,#8 - vst1.64 {q0},[r0] @ write out Xi - - bx lr -.size gcm_gmult_v8,.-gcm_gmult_v8 -.globl gcm_ghash_v8 -.hidden gcm_ghash_v8 -.type gcm_ghash_v8,%function -.align 4 -gcm_ghash_v8: - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so - vld1.64 {q0},[r0] @ load [rotated] Xi - @ "[rotated]" means that - @ loaded value would have - @ to be rotated in order to - @ make it appear as in - @ algorithm specification - subs r3,r3,#32 @ see if r3 is 32 or larger - mov r12,#16 @ r12 is used as post- - @ increment for input pointer; - @ as loop is modulo-scheduled - @ r12 is zeroed just in time - @ to preclude overstepping - @ inp[len], which means that - @ last block[s] are actually - @ loaded twice, but last - @ copy is not processed - vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2 - vmov.i8 q11,#0xe1 - vld1.64 {q14},[r1] - moveq r12,#0 @ is it time to zero r12? - vext.8 q0,q0,q0,#8 @ rotate Xi - vld1.64 {q8},[r2]! @ load [rotated] I[0] - vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant -#ifndef __ARMEB__ - vrev64.8 q8,q8 - vrev64.8 q0,q0 -#endif - vext.8 q3,q8,q8,#8 @ rotate I[0] - blo .Lodd_tail_v8 @ r3 was less than 32 - vld1.64 {q9},[r2],r12 @ load [rotated] I[1] -#ifndef __ARMEB__ - vrev64.8 q9,q9 -#endif - vext.8 q7,q9,q9,#8 - veor q3,q3,q0 @ I[i]^=Xi -.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 - veor q9,q9,q7 @ Karatsuba pre-processing -.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 - b .Loop_mod2x_v8 - -.align 4 -.Loop_mod2x_v8: - vext.8 q10,q3,q3,#8 - subs r3,r3,#32 @ is there more data? -.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo - movlo r12,#0 @ is it time to zero r12? - -.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 - veor q10,q10,q3 @ Karatsuba pre-processing -.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi - veor q0,q0,q4 @ accumulate -.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) - vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] - - veor q2,q2,q6 - moveq r12,#0 @ is it time to zero r12? - veor q1,q1,q5 - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] -#ifndef __ARMEB__ - vrev64.8 q8,q8 -#endif - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction - -#ifndef __ARMEB__ - vrev64.8 q9,q9 -#endif - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - vext.8 q7,q9,q9,#8 - vext.8 q3,q8,q8,#8 - veor q0,q1,q10 -.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 - veor q3,q3,q2 @ accumulate q3 early - - vext.8 q10,q0,q0,#8 @ 2nd phase of reduction -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q3,q3,q10 - veor q9,q9,q7 @ Karatsuba pre-processing - veor q3,q3,q0 -.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 - bhs .Loop_mod2x_v8 @ there was at least 32 more bytes - - veor q2,q2,q10 - vext.8 q3,q8,q8,#8 @ re-construct q3 - adds r3,r3,#32 @ re-construct r3 - veor q0,q0,q2 @ re-construct q0 - beq .Ldone_v8 @ is r3 zero? -.Lodd_tail_v8: - vext.8 q10,q0,q0,#8 - veor q3,q3,q0 @ inp^=Xi - veor q9,q8,q10 @ q9 is rotated inp^Xi - -.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo - veor q9,q9,q3 @ Karatsuba pre-processing -.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi -.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) - - vext.8 q9,q0,q2,#8 @ Karatsuba post-processing - veor q10,q0,q2 - veor q1,q1,q9 - veor q1,q1,q10 -.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction - - vmov d4,d3 @ Xh|Xm - 256-bit result - vmov d3,d0 @ Xm is rotated Xl - veor q0,q1,q10 - - vext.8 q10,q0,q0,#8 @ 2nd phase of reduction -.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 - veor q10,q10,q2 - veor q0,q0,q10 - -.Ldone_v8: -#ifndef __ARMEB__ - vrev64.8 q0,q0 -#endif - vext.8 q0,q0,q0,#8 - vst1.64 {q0},[r0] @ write out Xi - - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so - bx lr -.size gcm_ghash_v8,.-gcm_ghash_v8 -.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha1-armv4-large.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha1-armv4-large.S deleted file mode 100644 index 61deddf8e7..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha1-armv4-large.S +++ /dev/null @@ -1,1511 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -#include - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - -.globl sha1_block_data_order -.hidden sha1_block_data_order -.type sha1_block_data_order,%function - -.align 5 -sha1_block_data_order: -#if __ARM_MAX_ARCH__>=7 -.Lsha1_block: - adr r3,.Lsha1_block - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P -#ifdef __APPLE__ - ldr r12,[r12] -#endif - tst r12,#ARMV8_SHA1 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 - ldmia r0,{r3,r4,r5,r6,r7} -.Lloop: - ldr r8,.LK_00_19 - mov r14,sp - sub sp,sp,#15*4 - mov r5,r5,ror#30 - mov r6,r6,ror#30 - mov r7,r7,ror#30 @ [6] -.L_00_15: -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r7,r8,r7,ror#2 @ E+=K_00_19 - eor r10,r5,r6 @ F_xx_xx - add r7,r7,r3,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r4,r10,ror#2 - add r7,r7,r9 @ E+=X[i] - eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r7,r7,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r6,r8,r6,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r4,r5 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r6,r8,r6,ror#2 @ E+=K_00_19 - eor r10,r4,r5 @ F_xx_xx - add r6,r6,r7,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r3,r10,ror#2 - add r6,r6,r9 @ E+=X[i] - eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r6,r6,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r5,r8,r5,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r3,r4 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r5,r8,r5,ror#2 @ E+=K_00_19 - eor r10,r3,r4 @ F_xx_xx - add r5,r5,r6,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r7,r10,ror#2 - add r5,r5,r9 @ E+=X[i] - eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r5,r5,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r4,r8,r4,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r7,r3 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r4,r8,r4,ror#2 @ E+=K_00_19 - eor r10,r7,r3 @ F_xx_xx - add r4,r4,r5,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r6,r10,ror#2 - add r4,r4,r9 @ E+=X[i] - eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r4,r4,r10 @ E+=F_00_19(B,C,D) -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r3,r8,r3,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r6,r7 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r3,r8,r3,ror#2 @ E+=K_00_19 - eor r10,r6,r7 @ F_xx_xx - add r3,r3,r4,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r5,r10,ror#2 - add r3,r3,r9 @ E+=X[i] - eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r3,r3,r10 @ E+=F_00_19(B,C,D) -#if defined(__thumb2__) - mov r12,sp - teq r14,r12 -#else - teq r14,sp -#endif - bne .L_00_15 @ [((11+4)*5+2)*3] - sub sp,sp,#25*4 -#if __ARM_ARCH__<7 - ldrb r10,[r1,#2] - ldrb r9,[r1,#3] - ldrb r11,[r1,#1] - add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1],#4 - orr r9,r9,r10,lsl#8 - eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#16 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r9,r12,lsl#24 -#else - ldr r9,[r1],#4 @ handles unaligned - add r7,r8,r7,ror#2 @ E+=K_00_19 - eor r10,r5,r6 @ F_xx_xx - add r7,r7,r3,ror#27 @ E+=ROR(A,27) -#ifdef __ARMEL__ - rev r9,r9 @ byte swap -#endif -#endif - and r10,r4,r10,ror#2 - add r7,r7,r9 @ E+=X[i] - eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) - str r9,[r14,#-4]! - add r7,r7,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r3,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) - add r6,r6,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r7,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) - add r5,r5,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r6,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) - add r4,r4,r10 @ E+=F_00_19(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r5,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) - add r3,r3,r10 @ E+=F_00_19(B,C,D) - - ldr r8,.LK_20_39 @ [+15+16*4] - cmn sp,#0 @ [+3], clear carry to denote 20_39 -.L_20_39_or_60_79: - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r7,r8,r7,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r5,r6 @ F_xx_xx - mov r9,r9,ror#31 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r4,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r7,r7,r9 @ E+=X[i] - add r7,r7,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r3,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - add r6,r6,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r7,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - add r5,r5,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r6,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - add r4,r4,r10 @ E+=F_20_39(B,C,D) - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - eor r10,r5,r10,ror#2 @ F_xx_xx - @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - add r3,r3,r10 @ E+=F_20_39(B,C,D) -#if defined(__thumb2__) - mov r12,sp - teq r14,r12 -#else - teq r14,sp @ preserve carry -#endif - bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] - bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes - - ldr r8,.LK_40_59 - sub sp,sp,#20*4 @ [+2] -.L_40_59: - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r7,r8,r7,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r5,r6 @ F_xx_xx - mov r9,r9,ror#31 - add r7,r7,r3,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r4,r10,ror#2 @ F_xx_xx - and r11,r5,r6 @ F_xx_xx - add r7,r7,r9 @ E+=X[i] - add r7,r7,r10 @ E+=F_40_59(B,C,D) - add r7,r7,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r6,r8,r6,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r4,r5 @ F_xx_xx - mov r9,r9,ror#31 - add r6,r6,r7,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r3,r10,ror#2 @ F_xx_xx - and r11,r4,r5 @ F_xx_xx - add r6,r6,r9 @ E+=X[i] - add r6,r6,r10 @ E+=F_40_59(B,C,D) - add r6,r6,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r5,r8,r5,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r3,r4 @ F_xx_xx - mov r9,r9,ror#31 - add r5,r5,r6,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r7,r10,ror#2 @ F_xx_xx - and r11,r3,r4 @ F_xx_xx - add r5,r5,r9 @ E+=X[i] - add r5,r5,r10 @ E+=F_40_59(B,C,D) - add r5,r5,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r4,r8,r4,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r7,r3 @ F_xx_xx - mov r9,r9,ror#31 - add r4,r4,r5,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r6,r10,ror#2 @ F_xx_xx - and r11,r7,r3 @ F_xx_xx - add r4,r4,r9 @ E+=X[i] - add r4,r4,r10 @ E+=F_40_59(B,C,D) - add r4,r4,r11,ror#2 - ldr r9,[r14,#15*4] - ldr r10,[r14,#13*4] - ldr r11,[r14,#7*4] - add r3,r8,r3,ror#2 @ E+=K_xx_xx - ldr r12,[r14,#2*4] - eor r9,r9,r10 - eor r11,r11,r12 @ 1 cycle stall - eor r10,r6,r7 @ F_xx_xx - mov r9,r9,ror#31 - add r3,r3,r4,ror#27 @ E+=ROR(A,27) - eor r9,r9,r11,ror#31 - str r9,[r14,#-4]! - and r10,r5,r10,ror#2 @ F_xx_xx - and r11,r6,r7 @ F_xx_xx - add r3,r3,r9 @ E+=X[i] - add r3,r3,r10 @ E+=F_40_59(B,C,D) - add r3,r3,r11,ror#2 -#if defined(__thumb2__) - mov r12,sp - teq r14,r12 -#else - teq r14,sp -#endif - bne .L_40_59 @ [+((12+5)*5+2)*4] - - ldr r8,.LK_60_79 - sub sp,sp,#20*4 - cmp sp,#0 @ set carry to denote 60_79 - b .L_20_39_or_60_79 @ [+4], spare 300 bytes -.L_done: - add sp,sp,#80*4 @ "deallocate" stack frame - ldmia r0,{r8,r9,r10,r11,r12} - add r3,r8,r3 - add r4,r9,r4 - add r5,r10,r5,ror#2 - add r6,r11,r6,ror#2 - add r7,r12,r7,ror#2 - stmia r0,{r3,r4,r5,r6,r7} - teq r1,r2 - bne .Lloop @ [+18], total 1307 - -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size sha1_block_data_order,.-sha1_block_data_order - -.align 5 -.LK_00_19:.word 0x5a827999 -.LK_20_39:.word 0x6ed9eba1 -.LK_40_59:.word 0x8f1bbcdc -.LK_60_79:.word 0xca62c1d6 -#if __ARM_MAX_ARCH__>=7 -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-.Lsha1_block -#endif -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 5 -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.type sha1_block_data_order_neon,%function -.align 4 -sha1_block_data_order_neon: -.LNEON: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 - @ dmb @ errata #451034 on early Cortex A8 - @ vstmdb sp!,{d8-d15} @ ABI specification says so - mov r14,sp - sub r12,sp,#64 - adr r8,.LK_00_19 - bic r12,r12,#15 @ align for 128-bit stores - - ldmia r0,{r3,r4,r5,r6,r7} @ load context - mov sp,r12 @ alloca - - vld1.8 {q0,q1},[r1]! @ handles unaligned - veor q15,q15,q15 - vld1.8 {q2,q3},[r1]! - vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19 - vrev32.8 q0,q0 @ yes, even on - vrev32.8 q1,q1 @ big-endian... - vrev32.8 q2,q2 - vadd.i32 q8,q0,q14 - vrev32.8 q3,q3 - vadd.i32 q9,q1,q14 - vst1.32 {q8},[r12,:128]! - vadd.i32 q10,q2,q14 - vst1.32 {q9},[r12,:128]! - vst1.32 {q10},[r12,:128]! - ldr r9,[sp] @ big RAW stall - -.Loop_neon: - vext.8 q8,q0,q1,#8 - bic r10,r6,r4 - add r7,r7,r9 - and r11,r5,r4 - vadd.i32 q13,q3,q14 - ldr r9,[sp,#4] - add r7,r7,r3,ror#27 - vext.8 q12,q3,q15,#4 - eor r11,r11,r10 - mov r4,r4,ror#2 - add r7,r7,r11 - veor q8,q8,q0 - bic r10,r5,r3 - add r6,r6,r9 - veor q12,q12,q2 - and r11,r4,r3 - ldr r9,[sp,#8] - veor q12,q12,q8 - add r6,r6,r7,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q13,q15,q12,#4 - bic r10,r4,r7 - add r5,r5,r9 - vadd.i32 q8,q12,q12 - and r11,r3,r7 - ldr r9,[sp,#12] - vsri.32 q8,q12,#31 - add r5,r5,r6,ror#27 - eor r11,r11,r10 - mov r7,r7,ror#2 - vshr.u32 q12,q13,#30 - add r5,r5,r11 - bic r10,r3,r6 - vshl.u32 q13,q13,#2 - add r4,r4,r9 - and r11,r7,r6 - veor q8,q8,q12 - ldr r9,[sp,#16] - add r4,r4,r5,ror#27 - veor q8,q8,q13 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q9,q1,q2,#8 - bic r10,r7,r5 - add r3,r3,r9 - and r11,r6,r5 - vadd.i32 q13,q8,q14 - ldr r9,[sp,#20] - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r4,ror#27 - vext.8 q12,q8,q15,#4 - eor r11,r11,r10 - mov r5,r5,ror#2 - add r3,r3,r11 - veor q9,q9,q1 - bic r10,r6,r4 - add r7,r7,r9 - veor q12,q12,q3 - and r11,r5,r4 - ldr r9,[sp,#24] - veor q12,q12,q9 - add r7,r7,r3,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q13,q15,q12,#4 - bic r10,r5,r3 - add r6,r6,r9 - vadd.i32 q9,q12,q12 - and r11,r4,r3 - ldr r9,[sp,#28] - vsri.32 q9,q12,#31 - add r6,r6,r7,ror#27 - eor r11,r11,r10 - mov r3,r3,ror#2 - vshr.u32 q12,q13,#30 - add r6,r6,r11 - bic r10,r4,r7 - vshl.u32 q13,q13,#2 - add r5,r5,r9 - and r11,r3,r7 - veor q9,q9,q12 - ldr r9,[sp,#32] - add r5,r5,r6,ror#27 - veor q9,q9,q13 - eor r11,r11,r10 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q10,q2,q3,#8 - bic r10,r3,r6 - add r4,r4,r9 - and r11,r7,r6 - vadd.i32 q13,q9,q14 - ldr r9,[sp,#36] - add r4,r4,r5,ror#27 - vext.8 q12,q9,q15,#4 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - veor q10,q10,q2 - bic r10,r7,r5 - add r3,r3,r9 - veor q12,q12,q8 - and r11,r6,r5 - ldr r9,[sp,#40] - veor q12,q12,q10 - add r3,r3,r4,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q13,q15,q12,#4 - bic r10,r6,r4 - add r7,r7,r9 - vadd.i32 q10,q12,q12 - and r11,r5,r4 - ldr r9,[sp,#44] - vsri.32 q10,q12,#31 - add r7,r7,r3,ror#27 - eor r11,r11,r10 - mov r4,r4,ror#2 - vshr.u32 q12,q13,#30 - add r7,r7,r11 - bic r10,r5,r3 - vshl.u32 q13,q13,#2 - add r6,r6,r9 - and r11,r4,r3 - veor q10,q10,q12 - ldr r9,[sp,#48] - add r6,r6,r7,ror#27 - veor q10,q10,q13 - eor r11,r11,r10 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q11,q3,q8,#8 - bic r10,r4,r7 - add r5,r5,r9 - and r11,r3,r7 - vadd.i32 q13,q10,q14 - ldr r9,[sp,#52] - add r5,r5,r6,ror#27 - vext.8 q12,q10,q15,#4 - eor r11,r11,r10 - mov r7,r7,ror#2 - add r5,r5,r11 - veor q11,q11,q3 - bic r10,r3,r6 - add r4,r4,r9 - veor q12,q12,q9 - and r11,r7,r6 - ldr r9,[sp,#56] - veor q12,q12,q11 - add r4,r4,r5,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q13,q15,q12,#4 - bic r10,r7,r5 - add r3,r3,r9 - vadd.i32 q11,q12,q12 - and r11,r6,r5 - ldr r9,[sp,#60] - vsri.32 q11,q12,#31 - add r3,r3,r4,ror#27 - eor r11,r11,r10 - mov r5,r5,ror#2 - vshr.u32 q12,q13,#30 - add r3,r3,r11 - bic r10,r6,r4 - vshl.u32 q13,q13,#2 - add r7,r7,r9 - and r11,r5,r4 - veor q11,q11,q12 - ldr r9,[sp,#0] - add r7,r7,r3,ror#27 - veor q11,q11,q13 - eor r11,r11,r10 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q10,q11,#8 - bic r10,r5,r3 - add r6,r6,r9 - and r11,r4,r3 - veor q0,q0,q8 - ldr r9,[sp,#4] - add r6,r6,r7,ror#27 - veor q0,q0,q1 - eor r11,r11,r10 - mov r3,r3,ror#2 - vadd.i32 q13,q11,q14 - add r6,r6,r11 - bic r10,r4,r7 - veor q12,q12,q0 - add r5,r5,r9 - and r11,r3,r7 - vshr.u32 q0,q12,#30 - ldr r9,[sp,#8] - add r5,r5,r6,ror#27 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - eor r11,r11,r10 - mov r7,r7,ror#2 - vsli.32 q0,q12,#2 - add r5,r5,r11 - bic r10,r3,r6 - add r4,r4,r9 - and r11,r7,r6 - ldr r9,[sp,#12] - add r4,r4,r5,ror#27 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - bic r10,r7,r5 - add r3,r3,r9 - and r11,r6,r5 - ldr r9,[sp,#16] - add r3,r3,r4,ror#27 - eor r11,r11,r10 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q11,q0,#8 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#20] - veor q1,q1,q9 - eor r11,r10,r5 - add r7,r7,r3,ror#27 - veor q1,q1,q2 - mov r4,r4,ror#2 - add r7,r7,r11 - vadd.i32 q13,q0,q14 - eor r10,r3,r5 - add r6,r6,r9 - veor q12,q12,q1 - ldr r9,[sp,#24] - eor r11,r10,r4 - vshr.u32 q1,q12,#30 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - vst1.32 {q13},[r12,:128]! - add r6,r6,r11 - eor r10,r7,r4 - vsli.32 q1,q12,#2 - add r5,r5,r9 - ldr r9,[sp,#28] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#32] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q12,q0,q1,#8 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#36] - veor q2,q2,q10 - eor r11,r10,r6 - add r3,r3,r4,ror#27 - veor q2,q2,q3 - mov r5,r5,ror#2 - add r3,r3,r11 - vadd.i32 q13,q1,q14 - eor r10,r4,r6 - vld1.32 {d28[],d29[]},[r8,:32]! - add r7,r7,r9 - veor q12,q12,q2 - ldr r9,[sp,#40] - eor r11,r10,r5 - vshr.u32 q2,q12,#30 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - vst1.32 {q13},[r12,:128]! - add r7,r7,r11 - eor r10,r3,r5 - vsli.32 q2,q12,#2 - add r6,r6,r9 - ldr r9,[sp,#44] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#48] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q12,q1,q2,#8 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#52] - veor q3,q3,q11 - eor r11,r10,r7 - add r4,r4,r5,ror#27 - veor q3,q3,q8 - mov r6,r6,ror#2 - add r4,r4,r11 - vadd.i32 q13,q2,q14 - eor r10,r5,r7 - add r3,r3,r9 - veor q12,q12,q3 - ldr r9,[sp,#56] - eor r11,r10,r6 - vshr.u32 q3,q12,#30 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - vst1.32 {q13},[r12,:128]! - add r3,r3,r11 - eor r10,r4,r6 - vsli.32 q3,q12,#2 - add r7,r7,r9 - ldr r9,[sp,#60] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#0] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q12,q2,q3,#8 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#4] - veor q8,q8,q0 - eor r11,r10,r3 - add r5,r5,r6,ror#27 - veor q8,q8,q9 - mov r7,r7,ror#2 - add r5,r5,r11 - vadd.i32 q13,q3,q14 - eor r10,r6,r3 - add r4,r4,r9 - veor q12,q12,q8 - ldr r9,[sp,#8] - eor r11,r10,r7 - vshr.u32 q8,q12,#30 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - add r4,r4,r11 - eor r10,r5,r7 - vsli.32 q8,q12,#2 - add r3,r3,r9 - ldr r9,[sp,#12] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#16] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q3,q8,#8 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#20] - veor q9,q9,q1 - eor r11,r10,r4 - add r6,r6,r7,ror#27 - veor q9,q9,q10 - mov r3,r3,ror#2 - add r6,r6,r11 - vadd.i32 q13,q8,q14 - eor r10,r7,r4 - add r5,r5,r9 - veor q12,q12,q9 - ldr r9,[sp,#24] - eor r11,r10,r3 - vshr.u32 q9,q12,#30 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - vst1.32 {q13},[r12,:128]! - add r5,r5,r11 - eor r10,r6,r3 - vsli.32 q9,q12,#2 - add r4,r4,r9 - ldr r9,[sp,#28] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#32] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q8,q9,#8 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#36] - veor q10,q10,q2 - add r7,r7,r3,ror#27 - eor r11,r5,r6 - veor q10,q10,q11 - add r7,r7,r10 - and r11,r11,r4 - vadd.i32 q13,q9,q14 - mov r4,r4,ror#2 - add r7,r7,r11 - veor q12,q12,q10 - add r6,r6,r9 - and r10,r4,r5 - vshr.u32 q10,q12,#30 - ldr r9,[sp,#40] - add r6,r6,r7,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r4,r5 - add r6,r6,r10 - vsli.32 q10,q12,#2 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#44] - add r5,r5,r6,ror#27 - eor r11,r3,r4 - add r5,r5,r10 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#48] - add r4,r4,r5,ror#27 - eor r11,r7,r3 - add r4,r4,r10 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q12,q9,q10,#8 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#52] - veor q11,q11,q3 - add r3,r3,r4,ror#27 - eor r11,r6,r7 - veor q11,q11,q0 - add r3,r3,r10 - and r11,r11,r5 - vadd.i32 q13,q10,q14 - mov r5,r5,ror#2 - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r11 - veor q12,q12,q11 - add r7,r7,r9 - and r10,r5,r6 - vshr.u32 q11,q12,#30 - ldr r9,[sp,#56] - add r7,r7,r3,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r5,r6 - add r7,r7,r10 - vsli.32 q11,q12,#2 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#60] - add r6,r6,r7,ror#27 - eor r11,r4,r5 - add r6,r6,r10 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#0] - add r5,r5,r6,ror#27 - eor r11,r3,r4 - add r5,r5,r10 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q12,q10,q11,#8 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#4] - veor q0,q0,q8 - add r4,r4,r5,ror#27 - eor r11,r7,r3 - veor q0,q0,q1 - add r4,r4,r10 - and r11,r11,r6 - vadd.i32 q13,q11,q14 - mov r6,r6,ror#2 - add r4,r4,r11 - veor q12,q12,q0 - add r3,r3,r9 - and r10,r6,r7 - vshr.u32 q0,q12,#30 - ldr r9,[sp,#8] - add r3,r3,r4,ror#27 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - eor r11,r6,r7 - add r3,r3,r10 - vsli.32 q0,q12,#2 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#12] - add r7,r7,r3,ror#27 - eor r11,r5,r6 - add r7,r7,r10 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#16] - add r6,r6,r7,ror#27 - eor r11,r4,r5 - add r6,r6,r10 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q12,q11,q0,#8 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#20] - veor q1,q1,q9 - add r5,r5,r6,ror#27 - eor r11,r3,r4 - veor q1,q1,q2 - add r5,r5,r10 - and r11,r11,r7 - vadd.i32 q13,q0,q14 - mov r7,r7,ror#2 - add r5,r5,r11 - veor q12,q12,q1 - add r4,r4,r9 - and r10,r7,r3 - vshr.u32 q1,q12,#30 - ldr r9,[sp,#24] - add r4,r4,r5,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r7,r3 - add r4,r4,r10 - vsli.32 q1,q12,#2 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#28] - add r3,r3,r4,ror#27 - eor r11,r6,r7 - add r3,r3,r10 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#32] - add r7,r7,r3,ror#27 - eor r11,r5,r6 - add r7,r7,r10 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q0,q1,#8 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#36] - veor q2,q2,q10 - add r6,r6,r7,ror#27 - eor r11,r4,r5 - veor q2,q2,q3 - add r6,r6,r10 - and r11,r11,r3 - vadd.i32 q13,q1,q14 - mov r3,r3,ror#2 - add r6,r6,r11 - veor q12,q12,q2 - add r5,r5,r9 - and r10,r3,r4 - vshr.u32 q2,q12,#30 - ldr r9,[sp,#40] - add r5,r5,r6,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r3,r4 - add r5,r5,r10 - vsli.32 q2,q12,#2 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#44] - add r4,r4,r5,ror#27 - eor r11,r7,r3 - add r4,r4,r10 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#48] - add r3,r3,r4,ror#27 - eor r11,r6,r7 - add r3,r3,r10 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q1,q2,#8 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#52] - veor q3,q3,q11 - eor r11,r10,r5 - add r7,r7,r3,ror#27 - veor q3,q3,q8 - mov r4,r4,ror#2 - add r7,r7,r11 - vadd.i32 q13,q2,q14 - eor r10,r3,r5 - add r6,r6,r9 - veor q12,q12,q3 - ldr r9,[sp,#56] - eor r11,r10,r4 - vshr.u32 q3,q12,#30 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - vst1.32 {q13},[r12,:128]! - add r6,r6,r11 - eor r10,r7,r4 - vsli.32 q3,q12,#2 - add r5,r5,r9 - ldr r9,[sp,#60] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#0] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - vadd.i32 q13,q3,q14 - eor r10,r5,r7 - add r3,r3,r9 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - teq r1,r2 - sub r8,r8,#16 - it eq - subeq r1,r1,#64 - vld1.8 {q0,q1},[r1]! - ldr r9,[sp,#4] - eor r11,r10,r6 - vld1.8 {q2,q3},[r1]! - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r11 - eor r10,r4,r6 - vrev32.8 q0,q0 - add r7,r7,r9 - ldr r9,[sp,#8] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#12] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#16] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - vrev32.8 q1,q1 - eor r10,r6,r3 - add r4,r4,r9 - vadd.i32 q8,q0,q14 - ldr r9,[sp,#20] - eor r11,r10,r7 - vst1.32 {q8},[r12,:128]! - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#24] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#28] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#32] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - vrev32.8 q2,q2 - eor r10,r7,r4 - add r5,r5,r9 - vadd.i32 q9,q1,q14 - ldr r9,[sp,#36] - eor r11,r10,r3 - vst1.32 {q9},[r12,:128]! - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#40] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#44] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#48] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - vrev32.8 q3,q3 - eor r10,r3,r5 - add r6,r6,r9 - vadd.i32 q10,q2,q14 - ldr r9,[sp,#52] - eor r11,r10,r4 - vst1.32 {q10},[r12,:128]! - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#56] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#60] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - ldmia r0,{r9,r10,r11,r12} @ accumulate context - add r3,r3,r9 - ldr r9,[r0,#16] - add r4,r4,r10 - add r5,r5,r11 - add r6,r6,r12 - it eq - moveq sp,r14 - add r7,r7,r9 - it ne - ldrne r9,[sp] - stmia r0,{r3,r4,r5,r6,r7} - itt ne - addne r12,sp,#3*16 - bne .Loop_neon - - @ vldmia sp!,{d8-d15} - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -.size sha1_block_data_order_neon,.-sha1_block_data_order_neon -#endif -#if __ARM_MAX_ARCH__>=7 - -# if defined(__thumb2__) -# define INST(a,b,c,d) .byte c,d|0xf,a,b -# else -# define INST(a,b,c,d) .byte a,b,c,d|0x10 -# endif - -.type sha1_block_data_order_armv8,%function -.align 5 -sha1_block_data_order_armv8: -.LARMv8: - vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so - - veor q1,q1,q1 - adr r3,.LK_00_19 - vld1.32 {q0},[r0]! - vld1.32 {d2[0]},[r0] - sub r0,r0,#16 - vld1.32 {d16[],d17[]},[r3,:32]! - vld1.32 {d18[],d19[]},[r3,:32]! - vld1.32 {d20[],d21[]},[r3,:32]! - vld1.32 {d22[],d23[]},[r3,:32] - -.Loop_v8: - vld1.8 {q4,q5},[r1]! - vld1.8 {q6,q7},[r1]! - vrev32.8 q4,q4 - vrev32.8 q5,q5 - - vadd.i32 q12,q8,q4 - vrev32.8 q6,q6 - vmov q14,q0 @ offload - subs r2,r2,#1 - - vadd.i32 q13,q8,q5 - vrev32.8 q7,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 0 - INST(0x68,0x0c,0x02,0xe2) @ sha1c q0,q1,q12 - vadd.i32 q12,q8,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 1 - INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 - vadd.i32 q13,q8,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 2 - INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 - vadd.i32 q12,q8,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 3 - INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 - vadd.i32 q13,q9,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 4 - INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 - vadd.i32 q12,q9,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 5 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q9,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 6 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - vadd.i32 q12,q9,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 7 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q9,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 8 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - vadd.i32 q12,q10,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 9 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q10,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 10 - INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 - vadd.i32 q12,q10,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 11 - INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 - vadd.i32 q13,q10,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 12 - INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 - vadd.i32 q12,q10,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 13 - INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 - vadd.i32 q13,q11,q7 - INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 - INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 14 - INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 - vadd.i32 q12,q11,q4 - INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 - INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 15 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q11,q5 - INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 - INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 16 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - vadd.i32 q12,q11,q6 - INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 17 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - vadd.i32 q13,q11,q7 - - INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 18 - INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 - - INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 19 - INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 - - vadd.i32 q1,q1,q2 - vadd.i32 q0,q0,q14 - bne .Loop_v8 - - vst1.32 {q0},[r0]! - vst1.32 {d2[0]},[r0] - - vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} - bx lr @ bx lr -.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8 -#endif -#if __ARM_MAX_ARCH__>=7 -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha256-armv4.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha256-armv4.S deleted file mode 100644 index aee04785c0..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha256-armv4.S +++ /dev/null @@ -1,2839 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. -@ ==================================================================== - -@ SHA256 block procedure for ARMv4. May 2007. - -@ Performance is ~2x better than gcc 3.4 generated code and in "abso- -@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per -@ byte [on single-issue Xscale PXA250 core]. - -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 22% improvement on -@ Cortex A8 core and ~20 cycles per processed byte. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 16% -@ improvement on Cortex A8 core and ~15.4 cycles per processed byte. - -@ September 2013. -@ -@ Add NEON implementation. On Cortex A8 it was measured to process one -@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon -@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only -@ code (meaning that latter performs sub-optimally, nothing was done -@ about it). - -@ May 2014. -@ -@ Add ARMv8 code path performing at 2.0 cpb on Apple A7. - -#ifndef __KERNEL__ -# include -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -#endif - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those -@ instructions are manually-encoded. (See unsha256.) -.arch armv7-a - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - -.type K256,%object -.align 5 -K256: -.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 -.size K256,.-K256 -.word 0 @ terminator -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-.Lsha256_block_data_order -#endif -.align 5 - -.globl sha256_block_data_order -.hidden sha256_block_data_order -.type sha256_block_data_order,%function -sha256_block_data_order: -.Lsha256_block_data_order: -#if __ARM_ARCH__<7 && !defined(__thumb2__) - sub r3,pc,#8 @ sha256_block_data_order -#else - adr r3,.Lsha256_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P -#ifdef __APPLE__ - ldr r12,[r12] -#endif - tst r12,#ARMV8_SHA256 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif - add r2,r1,r2,lsl#6 @ len to point at the end of inp - stmdb sp!,{r0,r1,r2,r4-r11,lr} - ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} - sub r14,r3,#256+32 @ K256 - sub sp,sp,#16*4 @ alloca(X[16]) -.Loop: -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ magic - eor r12,r12,r12 -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 0 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 0 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 0==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 0<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 1 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 1 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 1==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 1<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 2 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 2 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 2==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 2<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 3 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 3 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 3==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 3<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 4 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 4 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 4==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 4<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 5 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 5==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 5<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 6 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 6 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 6==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 6<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 7 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 7==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 7<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 8 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 8 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 8==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 8<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 9 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 9 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 9==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 9<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 10 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 10 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 10==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 10<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 11 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 11 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 11==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 11<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 12 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 12 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 12==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 12<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 13 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 13 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 13==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 13<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 14 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 14 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 14==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 14<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 15 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 15 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 15==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 15<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -.Lrounds_16_xx: - @ ldr r2,[sp,#1*4] @ 16 - @ ldr r1,[sp,#14*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#0*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#9*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 16==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 16<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#2*4] @ 17 - @ ldr r1,[sp,#15*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#1*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#10*4] - - add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 17==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 17<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#3*4] @ 18 - @ ldr r1,[sp,#0*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#2*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#11*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 18==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 18<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#4*4] @ 19 - @ ldr r1,[sp,#1*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#3*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#12*4] - - add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 19==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 19<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#5*4] @ 20 - @ ldr r1,[sp,#2*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#4*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#13*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 20==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 20<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#6*4] @ 21 - @ ldr r1,[sp,#3*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#5*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#14*4] - - add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 21==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 21<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#7*4] @ 22 - @ ldr r1,[sp,#4*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#6*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#15*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 22==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 22<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#8*4] @ 23 - @ ldr r1,[sp,#5*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#7*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#0*4] - - add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 23==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 23<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#9*4] @ 24 - @ ldr r1,[sp,#6*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#8*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#1*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 24==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 24<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#10*4] @ 25 - @ ldr r1,[sp,#7*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#9*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#2*4] - - add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 25==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 25<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#11*4] @ 26 - @ ldr r1,[sp,#8*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#10*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#3*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 26==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 26<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#12*4] @ 27 - @ ldr r1,[sp,#9*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#11*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#4*4] - - add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 27==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 27<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#13*4] @ 28 - @ ldr r1,[sp,#10*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#12*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#5*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 28==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 28<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#14*4] @ 29 - @ ldr r1,[sp,#11*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#13*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#6*4] - - add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 29==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 29<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#15*4] @ 30 - @ ldr r1,[sp,#12*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#14*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#7*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 30==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 30<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#0*4] @ 31 - @ ldr r1,[sp,#13*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#15*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#8*4] - - add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 31==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 31<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - ite eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq r3,[sp,#16*4] @ pull ctx - bne .Lrounds_16_xx - - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r0,[r3,#0] - ldr r2,[r3,#4] - ldr r12,[r3,#8] - add r4,r4,r0 - ldr r0,[r3,#12] - add r5,r5,r2 - ldr r2,[r3,#16] - add r6,r6,r12 - ldr r12,[r3,#20] - add r7,r7,r0 - ldr r0,[r3,#24] - add r8,r8,r2 - ldr r2,[r3,#28] - add r9,r9,r12 - ldr r1,[sp,#17*4] @ pull inp - ldr r12,[sp,#18*4] @ pull inp+len - add r10,r10,r0 - add r11,r11,r2 - stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} - cmp r1,r12 - sub r14,r14,#256 @ rewind Ktbl - bne .Loop - - add sp,sp,#19*4 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size sha256_block_data_order,.-sha256_block_data_order -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.globl sha256_block_data_order_neon -.hidden sha256_block_data_order_neon -.type sha256_block_data_order_neon,%function -.align 5 -.skip 16 -sha256_block_data_order_neon: -.LNEON: - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - - sub r11,sp,#16*4+16 - adr r14,K256 - bic r11,r11,#15 @ align for 128-bit stores - mov r12,sp - mov sp,r11 @ alloca - add r2,r1,r2,lsl#6 @ len to point at the end of inp - - vld1.8 {q0},[r1]! - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - vld1.32 {q8},[r14,:128]! - vld1.32 {q9},[r14,:128]! - vld1.32 {q10},[r14,:128]! - vld1.32 {q11},[r14,:128]! - vrev32.8 q0,q0 @ yes, even on - str r0,[sp,#64] - vrev32.8 q1,q1 @ big-endian - str r1,[sp,#68] - mov r1,sp - vrev32.8 q2,q2 - str r2,[sp,#72] - vrev32.8 q3,q3 - str r12,[sp,#76] @ save original sp - vadd.i32 q8,q8,q0 - vadd.i32 q9,q9,q1 - vst1.32 {q8},[r1,:128]! - vadd.i32 q10,q10,q2 - vst1.32 {q9},[r1,:128]! - vadd.i32 q11,q11,q3 - vst1.32 {q10},[r1,:128]! - vst1.32 {q11},[r1,:128]! - - ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} - sub r1,r1,#64 - ldr r2,[sp,#0] - eor r12,r12,r12 - eor r3,r5,r6 - b .L_00_48 - -.align 4 -.L_00_48: - vext.8 q8,q0,q1,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q2,q3,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q0,q0,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#4] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d7,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d7,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d7,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q0,q0,q9 - add r10,r10,r2 - ldr r2,[sp,#8] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d7,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d7,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d0,d0,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d0,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d0,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d0,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#12] - and r3,r3,r12 - vshr.u32 d24,d0,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d0,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d1,d1,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q0 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q1,q2,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q3,q0,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q1,q1,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#20] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d1,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d1,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d1,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q1,q1,q9 - add r6,r6,r2 - ldr r2,[sp,#24] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d1,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d1,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d2,d2,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d2,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d2,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d2,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#28] - and r3,r3,r12 - vshr.u32 d24,d2,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d2,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d3,d3,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q1 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vext.8 q8,q2,q3,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q0,q1,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q2,q2,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#36] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d3,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d3,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d3,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q2,q2,q9 - add r10,r10,r2 - ldr r2,[sp,#40] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d3,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d3,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d4,d4,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d4,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d4,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d4,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#44] - and r3,r3,r12 - vshr.u32 d24,d4,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d4,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d5,d5,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q2 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q3,q0,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q1,q2,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q3,q3,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#52] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d5,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d5,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d5,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q3,q3,q9 - add r6,r6,r2 - ldr r2,[sp,#56] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d5,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d5,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d6,d6,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d6,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d6,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d6,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#60] - and r3,r3,r12 - vshr.u32 d24,d6,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d6,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d7,d7,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q3 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[r14] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - teq r2,#0 @ check for K256 terminator - ldr r2,[sp,#0] - sub r1,r1,#64 - bne .L_00_48 - - ldr r1,[sp,#68] - ldr r0,[sp,#72] - sub r14,r14,#256 @ rewind r14 - teq r1,r0 - it eq - subeq r1,r1,#64 @ avoid SEGV - vld1.8 {q0},[r1]! @ load next input block - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - it ne - strne r1,[sp,#68] - mov r1,sp - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q0,q0 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q0 - ldr r2,[sp,#4] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#8] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#12] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q1,q1 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q1 - ldr r2,[sp,#20] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#24] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#28] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q2,q2 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q2 - ldr r2,[sp,#36] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#40] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#44] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q3,q3 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q3 - ldr r2,[sp,#52] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#56] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#60] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#64] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - ldr r0,[r2,#0] - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r12,[r2,#4] - ldr r3,[r2,#8] - ldr r1,[r2,#12] - add r4,r4,r0 @ accumulate - ldr r0,[r2,#16] - add r5,r5,r12 - ldr r12,[r2,#20] - add r6,r6,r3 - ldr r3,[r2,#24] - add r7,r7,r1 - ldr r1,[r2,#28] - add r8,r8,r0 - str r4,[r2],#4 - add r9,r9,r12 - str r5,[r2],#4 - add r10,r10,r3 - str r6,[r2],#4 - add r11,r11,r1 - str r7,[r2],#4 - stmia r2,{r8,r9,r10,r11} - - ittte ne - movne r1,sp - ldrne r2,[sp,#0] - eorne r12,r12,r12 - ldreq sp,[sp,#76] @ restore original sp - itt ne - eorne r3,r5,r6 - bne .L_00_48 - - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -.size sha256_block_data_order_neon,.-sha256_block_data_order_neon -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - -# if defined(__thumb2__) -# define INST(a,b,c,d) .byte c,d|0xc,a,b -# else -# define INST(a,b,c,d) .byte a,b,c,d -# endif - -.type sha256_block_data_order_armv8,%function -.align 5 -sha256_block_data_order_armv8: -.LARMv8: - vld1.32 {q0,q1},[r0] - sub r3,r3,#256+32 - add r2,r1,r2,lsl#6 @ len to point at the end of inp - b .Loop_v8 - -.align 4 -.Loop_v8: - vld1.8 {q8,q9},[r1]! - vld1.8 {q10,q11},[r1]! - vld1.32 {q12},[r3]! - vrev32.8 q8,q8 - vrev32.8 q9,q9 - vrev32.8 q10,q10 - vrev32.8 q11,q11 - vmov q14,q0 @ offload - vmov q15,q1 - teq r1,r2 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - - vld1.32 {q13},[r3] - vadd.i32 q12,q12,q10 - sub r3,r3,#256-16 @ rewind - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - - vadd.i32 q13,q13,q11 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - - vadd.i32 q0,q0,q14 - vadd.i32 q1,q1,q15 - it ne - bne .Loop_v8 - - vst1.32 {q0,q1},[r0] - - bx lr @ bx lr -.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8 -#endif -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha512-armv4.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha512-armv4.S deleted file mode 100644 index a06d41fee5..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/sha512-armv4.S +++ /dev/null @@ -1,1894 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. -@ -@ Licensed under the OpenSSL license (the "License"). You may not use -@ this file except in compliance with the License. You can obtain a copy -@ in the file LICENSE in the source distribution or at -@ https://www.openssl.org/source/license.html - - -@ ==================================================================== -@ Written by Andy Polyakov for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. -@ ==================================================================== - -@ SHA512 block procedure for ARMv4. September 2007. - -@ This code is ~4.5 (four and a half) times faster than code generated -@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue -@ Xscale PXA250 core]. -@ -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 6% improvement on -@ Cortex A8 core and ~40 cycles per processed byte. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 7% -@ improvement on Coxtex A8 core and ~38 cycles per byte. - -@ March 2011. -@ -@ Add NEON implementation. On Cortex A8 it was measured to process -@ one byte in 23.3 cycles or ~60% faster than integer-only code. - -@ August 2012. -@ -@ Improve NEON performance by 12% on Snapdragon S4. In absolute -@ terms it's 22.6 cycles per byte, which is disappointing result. -@ Technical writers asserted that 3-way S4 pipeline can sustain -@ multiple NEON instructions per cycle, but dual NEON issue could -@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html -@ for further details. On side note Cortex-A15 processes one byte in -@ 16 cycles. - -@ Byte order [in]dependence. ========================================= -@ -@ Originally caller was expected to maintain specific *dword* order in -@ h[0-7], namely with most significant dword at *lower* address, which -@ was reflected in below two parameters as 0 and 4. Now caller is -@ expected to maintain native byte order for whole 64-bit values. -#ifndef __KERNEL__ -# include -# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} -# define VFP_ABI_POP vldmia sp!,{d8-d15} -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -# define VFP_ABI_PUSH -# define VFP_ABI_POP -#endif - -@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both -@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. -.arch armv7-a - -#ifdef __ARMEL__ -# define LO 0 -# define HI 4 -# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 -#else -# define HI 0 -# define LO 4 -# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 -#endif - -.text -#if defined(__thumb2__) -.syntax unified -.thumb -# define adrl adr -#else -.code 32 -#endif - -.type K512,%object -.align 5 -K512: - WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) - WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) - WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) - WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) - WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) - WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) - WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) - WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) - WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) - WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) - WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) - WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) - WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) - WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) - WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) - WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) - WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) - WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) - WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) - WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) - WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) - WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) - WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) - WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) - WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) - WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) - WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) - WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) - WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) - WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) - WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) - WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) - WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) - WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) - WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) - WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) - WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) - WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) - WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) - WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) -.size K512,.-K512 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-.Lsha512_block_data_order -.skip 32-4 -#else -.skip 32 -#endif - -.globl sha512_block_data_order -.hidden sha512_block_data_order -.type sha512_block_data_order,%function -sha512_block_data_order: -.Lsha512_block_data_order: -#if __ARM_ARCH__<7 && !defined(__thumb2__) - sub r3,pc,#8 @ sha512_block_data_order -#else - adr r3,.Lsha512_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P -#ifdef __APPLE__ - ldr r12,[r12] -#endif - tst r12,#ARMV7_NEON - bne .LNEON -#endif - add r2,r1,r2,lsl#7 @ len to point at the end of inp - stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - sub r14,r3,#672 @ K512 - sub sp,sp,#9*8 - - ldr r7,[r0,#32+LO] - ldr r8,[r0,#32+HI] - ldr r9, [r0,#48+LO] - ldr r10, [r0,#48+HI] - ldr r11, [r0,#56+LO] - ldr r12, [r0,#56+HI] -.Loop: - str r9, [sp,#48+0] - str r10, [sp,#48+4] - str r11, [sp,#56+0] - str r12, [sp,#56+4] - ldr r5,[r0,#0+LO] - ldr r6,[r0,#0+HI] - ldr r3,[r0,#8+LO] - ldr r4,[r0,#8+HI] - ldr r9, [r0,#16+LO] - ldr r10, [r0,#16+HI] - ldr r11, [r0,#24+LO] - ldr r12, [r0,#24+HI] - str r3,[sp,#8+0] - str r4,[sp,#8+4] - str r9, [sp,#16+0] - str r10, [sp,#16+4] - str r11, [sp,#24+0] - str r12, [sp,#24+4] - ldr r3,[r0,#40+LO] - ldr r4,[r0,#40+HI] - str r3,[sp,#40+0] - str r4,[sp,#40+4] - -.L00_15: -#if __ARM_ARCH__<7 - ldrb r3,[r1,#7] - ldrb r9, [r1,#6] - ldrb r10, [r1,#5] - ldrb r11, [r1,#4] - ldrb r4,[r1,#3] - ldrb r12, [r1,#2] - orr r3,r3,r9,lsl#8 - ldrb r9, [r1,#1] - orr r3,r3,r10,lsl#16 - ldrb r10, [r1],#8 - orr r3,r3,r11,lsl#24 - orr r4,r4,r12,lsl#8 - orr r4,r4,r9,lsl#16 - orr r4,r4,r10,lsl#24 -#else - ldr r3,[r1,#4] - ldr r4,[r1],#8 -#ifdef __ARMEL__ - rev r3,r3 - rev r4,r4 -#endif -#endif - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov r9,r7,lsr#14 - str r3,[sp,#64+0] - mov r10,r8,lsr#14 - str r4,[sp,#64+4] - eor r9,r9,r8,lsl#18 - ldr r11,[sp,#56+0] @ h.lo - eor r10,r10,r7,lsl#18 - ldr r12,[sp,#56+4] @ h.hi - eor r9,r9,r7,lsr#18 - eor r10,r10,r8,lsr#18 - eor r9,r9,r8,lsl#14 - eor r10,r10,r7,lsl#14 - eor r9,r9,r8,lsr#9 - eor r10,r10,r7,lsr#9 - eor r9,r9,r7,lsl#23 - eor r10,r10,r8,lsl#23 @ Sigma1(e) - adds r3,r3,r9 - ldr r9,[sp,#40+0] @ f.lo - adc r4,r4,r10 @ T += Sigma1(e) - ldr r10,[sp,#40+4] @ f.hi - adds r3,r3,r11 - ldr r11,[sp,#48+0] @ g.lo - adc r4,r4,r12 @ T += h - ldr r12,[sp,#48+4] @ g.hi - - eor r9,r9,r11 - str r7,[sp,#32+0] - eor r10,r10,r12 - str r8,[sp,#32+4] - and r9,r9,r7 - str r5,[sp,#0+0] - and r10,r10,r8 - str r6,[sp,#0+4] - eor r9,r9,r11 - ldr r11,[r14,#LO] @ K[i].lo - eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#HI] @ K[i].hi - - adds r3,r3,r9 - ldr r7,[sp,#24+0] @ d.lo - adc r4,r4,r10 @ T += Ch(e,f,g) - ldr r8,[sp,#24+4] @ d.hi - adds r3,r3,r11 - and r9,r11,#0xff - adc r4,r4,r12 @ T += K[i] - adds r7,r7,r3 - ldr r11,[sp,#8+0] @ b.lo - adc r8,r8,r4 @ d += T - teq r9,#148 - - ldr r12,[sp,#16+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq r14,r14,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov r9,r5,lsr#28 - mov r10,r6,lsr#28 - eor r9,r9,r6,lsl#4 - eor r10,r10,r5,lsl#4 - eor r9,r9,r6,lsr#2 - eor r10,r10,r5,lsr#2 - eor r9,r9,r5,lsl#30 - eor r10,r10,r6,lsl#30 - eor r9,r9,r6,lsr#7 - eor r10,r10,r5,lsr#7 - eor r9,r9,r5,lsl#25 - eor r10,r10,r6,lsl#25 @ Sigma0(a) - adds r3,r3,r9 - and r9,r5,r11 - adc r4,r4,r10 @ T += Sigma0(a) - - ldr r10,[sp,#8+4] @ b.hi - orr r5,r5,r11 - ldr r11,[sp,#16+4] @ c.hi - and r5,r5,r12 - and r12,r6,r10 - orr r6,r6,r10 - orr r5,r5,r9 @ Maj(a,b,c).lo - and r6,r6,r11 - adds r5,r5,r3 - orr r6,r6,r12 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc r6,r6,r4 @ h += T - tst r14,#1 - add r14,r14,#8 - tst r14,#1 - beq .L00_15 - ldr r9,[sp,#184+0] - ldr r10,[sp,#184+4] - bic r14,r14,#1 -.L16_79: - @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) - @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 - @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 - mov r3,r9,lsr#1 - ldr r11,[sp,#80+0] - mov r4,r10,lsr#1 - ldr r12,[sp,#80+4] - eor r3,r3,r10,lsl#31 - eor r4,r4,r9,lsl#31 - eor r3,r3,r9,lsr#8 - eor r4,r4,r10,lsr#8 - eor r3,r3,r10,lsl#24 - eor r4,r4,r9,lsl#24 - eor r3,r3,r9,lsr#7 - eor r4,r4,r10,lsr#7 - eor r3,r3,r10,lsl#25 - - @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) - @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 - @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 - mov r9,r11,lsr#19 - mov r10,r12,lsr#19 - eor r9,r9,r12,lsl#13 - eor r10,r10,r11,lsl#13 - eor r9,r9,r12,lsr#29 - eor r10,r10,r11,lsr#29 - eor r9,r9,r11,lsl#3 - eor r10,r10,r12,lsl#3 - eor r9,r9,r11,lsr#6 - eor r10,r10,r12,lsr#6 - ldr r11,[sp,#120+0] - eor r9,r9,r12,lsl#26 - - ldr r12,[sp,#120+4] - adds r3,r3,r9 - ldr r9,[sp,#192+0] - adc r4,r4,r10 - - ldr r10,[sp,#192+4] - adds r3,r3,r11 - adc r4,r4,r12 - adds r3,r3,r9 - adc r4,r4,r10 - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov r9,r7,lsr#14 - str r3,[sp,#64+0] - mov r10,r8,lsr#14 - str r4,[sp,#64+4] - eor r9,r9,r8,lsl#18 - ldr r11,[sp,#56+0] @ h.lo - eor r10,r10,r7,lsl#18 - ldr r12,[sp,#56+4] @ h.hi - eor r9,r9,r7,lsr#18 - eor r10,r10,r8,lsr#18 - eor r9,r9,r8,lsl#14 - eor r10,r10,r7,lsl#14 - eor r9,r9,r8,lsr#9 - eor r10,r10,r7,lsr#9 - eor r9,r9,r7,lsl#23 - eor r10,r10,r8,lsl#23 @ Sigma1(e) - adds r3,r3,r9 - ldr r9,[sp,#40+0] @ f.lo - adc r4,r4,r10 @ T += Sigma1(e) - ldr r10,[sp,#40+4] @ f.hi - adds r3,r3,r11 - ldr r11,[sp,#48+0] @ g.lo - adc r4,r4,r12 @ T += h - ldr r12,[sp,#48+4] @ g.hi - - eor r9,r9,r11 - str r7,[sp,#32+0] - eor r10,r10,r12 - str r8,[sp,#32+4] - and r9,r9,r7 - str r5,[sp,#0+0] - and r10,r10,r8 - str r6,[sp,#0+4] - eor r9,r9,r11 - ldr r11,[r14,#LO] @ K[i].lo - eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#HI] @ K[i].hi - - adds r3,r3,r9 - ldr r7,[sp,#24+0] @ d.lo - adc r4,r4,r10 @ T += Ch(e,f,g) - ldr r8,[sp,#24+4] @ d.hi - adds r3,r3,r11 - and r9,r11,#0xff - adc r4,r4,r12 @ T += K[i] - adds r7,r7,r3 - ldr r11,[sp,#8+0] @ b.lo - adc r8,r8,r4 @ d += T - teq r9,#23 - - ldr r12,[sp,#16+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq r14,r14,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov r9,r5,lsr#28 - mov r10,r6,lsr#28 - eor r9,r9,r6,lsl#4 - eor r10,r10,r5,lsl#4 - eor r9,r9,r6,lsr#2 - eor r10,r10,r5,lsr#2 - eor r9,r9,r5,lsl#30 - eor r10,r10,r6,lsl#30 - eor r9,r9,r6,lsr#7 - eor r10,r10,r5,lsr#7 - eor r9,r9,r5,lsl#25 - eor r10,r10,r6,lsl#25 @ Sigma0(a) - adds r3,r3,r9 - and r9,r5,r11 - adc r4,r4,r10 @ T += Sigma0(a) - - ldr r10,[sp,#8+4] @ b.hi - orr r5,r5,r11 - ldr r11,[sp,#16+4] @ c.hi - and r5,r5,r12 - and r12,r6,r10 - orr r6,r6,r10 - orr r5,r5,r9 @ Maj(a,b,c).lo - and r6,r6,r11 - adds r5,r5,r3 - orr r6,r6,r12 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc r6,r6,r4 @ h += T - tst r14,#1 - add r14,r14,#8 -#if __ARM_ARCH__>=7 - ittt eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq r9,[sp,#184+0] - ldreq r10,[sp,#184+4] - beq .L16_79 - bic r14,r14,#1 - - ldr r3,[sp,#8+0] - ldr r4,[sp,#8+4] - ldr r9, [r0,#0+LO] - ldr r10, [r0,#0+HI] - ldr r11, [r0,#8+LO] - ldr r12, [r0,#8+HI] - adds r9,r5,r9 - str r9, [r0,#0+LO] - adc r10,r6,r10 - str r10, [r0,#0+HI] - adds r11,r3,r11 - str r11, [r0,#8+LO] - adc r12,r4,r12 - str r12, [r0,#8+HI] - - ldr r5,[sp,#16+0] - ldr r6,[sp,#16+4] - ldr r3,[sp,#24+0] - ldr r4,[sp,#24+4] - ldr r9, [r0,#16+LO] - ldr r10, [r0,#16+HI] - ldr r11, [r0,#24+LO] - ldr r12, [r0,#24+HI] - adds r9,r5,r9 - str r9, [r0,#16+LO] - adc r10,r6,r10 - str r10, [r0,#16+HI] - adds r11,r3,r11 - str r11, [r0,#24+LO] - adc r12,r4,r12 - str r12, [r0,#24+HI] - - ldr r3,[sp,#40+0] - ldr r4,[sp,#40+4] - ldr r9, [r0,#32+LO] - ldr r10, [r0,#32+HI] - ldr r11, [r0,#40+LO] - ldr r12, [r0,#40+HI] - adds r7,r7,r9 - str r7,[r0,#32+LO] - adc r8,r8,r10 - str r8,[r0,#32+HI] - adds r11,r3,r11 - str r11, [r0,#40+LO] - adc r12,r4,r12 - str r12, [r0,#40+HI] - - ldr r5,[sp,#48+0] - ldr r6,[sp,#48+4] - ldr r3,[sp,#56+0] - ldr r4,[sp,#56+4] - ldr r9, [r0,#48+LO] - ldr r10, [r0,#48+HI] - ldr r11, [r0,#56+LO] - ldr r12, [r0,#56+HI] - adds r9,r5,r9 - str r9, [r0,#48+LO] - adc r10,r6,r10 - str r10, [r0,#48+HI] - adds r11,r3,r11 - str r11, [r0,#56+LO] - adc r12,r4,r12 - str r12, [r0,#56+HI] - - add sp,sp,#640 - sub r14,r14,#640 - - teq r1,r2 - bne .Loop - - add sp,sp,#8*9 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} -#else - ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet -.word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size sha512_block_data_order,.-sha512_block_data_order -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.globl sha512_block_data_order_neon -.hidden sha512_block_data_order_neon -.type sha512_block_data_order_neon,%function -.align 4 -sha512_block_data_order_neon: -.LNEON: - dmb @ errata #451034 on early Cortex A8 - add r2,r1,r2,lsl#7 @ len to point at the end of inp - adr r3,K512 - VFP_ABI_PUSH - vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context -.Loop_neon: - vshr.u64 d24,d20,#14 @ 0 -#if 0<16 - vld1.64 {d0},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d20,#18 -#if 0>0 - vadd.i64 d16,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d20,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 0<16 && defined(__ARMEL__) - vrev64.8 d0,d0 -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d0 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 1 -#if 1<16 - vld1.64 {d1},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 1>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 1<16 && defined(__ARMEL__) - vrev64.8 d1,d1 -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d1 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 d24,d18,#14 @ 2 -#if 2<16 - vld1.64 {d2},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d18,#18 -#if 2>0 - vadd.i64 d22,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d18,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 2<16 && defined(__ARMEL__) - vrev64.8 d2,d2 -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d2 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 3 -#if 3<16 - vld1.64 {d3},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 3>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 3<16 && defined(__ARMEL__) - vrev64.8 d3,d3 -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d3 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 d24,d16,#14 @ 4 -#if 4<16 - vld1.64 {d4},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d16,#18 -#if 4>0 - vadd.i64 d20,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d16,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 4<16 && defined(__ARMEL__) - vrev64.8 d4,d4 -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d4 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 5 -#if 5<16 - vld1.64 {d5},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 5>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 5<16 && defined(__ARMEL__) - vrev64.8 d5,d5 -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d5 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 d24,d22,#14 @ 6 -#if 6<16 - vld1.64 {d6},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d22,#18 -#if 6>0 - vadd.i64 d18,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d22,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 6<16 && defined(__ARMEL__) - vrev64.8 d6,d6 -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d6 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 7 -#if 7<16 - vld1.64 {d7},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 7>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 7<16 && defined(__ARMEL__) - vrev64.8 d7,d7 -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d7 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - vshr.u64 d24,d20,#14 @ 8 -#if 8<16 - vld1.64 {d8},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d20,#18 -#if 8>0 - vadd.i64 d16,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d20,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 8<16 && defined(__ARMEL__) - vrev64.8 d8,d8 -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d8 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 9 -#if 9<16 - vld1.64 {d9},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 9>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 9<16 && defined(__ARMEL__) - vrev64.8 d9,d9 -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d9 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 d24,d18,#14 @ 10 -#if 10<16 - vld1.64 {d10},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d18,#18 -#if 10>0 - vadd.i64 d22,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d18,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 10<16 && defined(__ARMEL__) - vrev64.8 d10,d10 -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d10 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 11 -#if 11<16 - vld1.64 {d11},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 11>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 11<16 && defined(__ARMEL__) - vrev64.8 d11,d11 -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d11 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 d24,d16,#14 @ 12 -#if 12<16 - vld1.64 {d12},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d16,#18 -#if 12>0 - vadd.i64 d20,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d16,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 12<16 && defined(__ARMEL__) - vrev64.8 d12,d12 -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d12 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 13 -#if 13<16 - vld1.64 {d13},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 13>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 13<16 && defined(__ARMEL__) - vrev64.8 d13,d13 -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d13 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 d24,d22,#14 @ 14 -#if 14<16 - vld1.64 {d14},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d22,#18 -#if 14>0 - vadd.i64 d18,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d22,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 14<16 && defined(__ARMEL__) - vrev64.8 d14,d14 -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d14 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 15 -#if 15<16 - vld1.64 {d15},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 15>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 15<16 && defined(__ARMEL__) - vrev64.8 d15,d15 -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d15 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - mov r12,#4 -.L16_79_neon: - subs r12,#1 - vshr.u64 q12,q7,#19 - vshr.u64 q13,q7,#61 - vadd.i64 d16,d30 @ h+=Maj from the past - vshr.u64 q15,q7,#6 - vsli.64 q12,q7,#45 - vext.8 q14,q0,q1,#8 @ X[i+1] - vsli.64 q13,q7,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q0,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q4,q5,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d20,#14 @ from NEON_00_15 - vadd.i64 q0,q14 - vshr.u64 d25,d20,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d20,#41 @ from NEON_00_15 - vadd.i64 q0,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 16<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d0 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 17 -#if 17<16 - vld1.64 {d1},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 17>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 17<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d1 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 q12,q0,#19 - vshr.u64 q13,q0,#61 - vadd.i64 d22,d30 @ h+=Maj from the past - vshr.u64 q15,q0,#6 - vsli.64 q12,q0,#45 - vext.8 q14,q1,q2,#8 @ X[i+1] - vsli.64 q13,q0,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q1,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q5,q6,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d18,#14 @ from NEON_00_15 - vadd.i64 q1,q14 - vshr.u64 d25,d18,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d18,#41 @ from NEON_00_15 - vadd.i64 q1,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 18<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d2 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 19 -#if 19<16 - vld1.64 {d3},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 19>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 19<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d3 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 q12,q1,#19 - vshr.u64 q13,q1,#61 - vadd.i64 d20,d30 @ h+=Maj from the past - vshr.u64 q15,q1,#6 - vsli.64 q12,q1,#45 - vext.8 q14,q2,q3,#8 @ X[i+1] - vsli.64 q13,q1,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q2,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q6,q7,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d16,#14 @ from NEON_00_15 - vadd.i64 q2,q14 - vshr.u64 d25,d16,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d16,#41 @ from NEON_00_15 - vadd.i64 q2,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 20<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d4 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 21 -#if 21<16 - vld1.64 {d5},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 21>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 21<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d5 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 q12,q2,#19 - vshr.u64 q13,q2,#61 - vadd.i64 d18,d30 @ h+=Maj from the past - vshr.u64 q15,q2,#6 - vsli.64 q12,q2,#45 - vext.8 q14,q3,q4,#8 @ X[i+1] - vsli.64 q13,q2,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q3,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q7,q0,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d22,#14 @ from NEON_00_15 - vadd.i64 q3,q14 - vshr.u64 d25,d22,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d22,#41 @ from NEON_00_15 - vadd.i64 q3,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 22<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d6 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 23 -#if 23<16 - vld1.64 {d7},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 23>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 23<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d7 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - vshr.u64 q12,q3,#19 - vshr.u64 q13,q3,#61 - vadd.i64 d16,d30 @ h+=Maj from the past - vshr.u64 q15,q3,#6 - vsli.64 q12,q3,#45 - vext.8 q14,q4,q5,#8 @ X[i+1] - vsli.64 q13,q3,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q4,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q0,q1,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d20,#14 @ from NEON_00_15 - vadd.i64 q4,q14 - vshr.u64 d25,d20,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d20,#41 @ from NEON_00_15 - vadd.i64 q4,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 24<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d8 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 25 -#if 25<16 - vld1.64 {d9},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 25>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 25<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d9 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 q12,q4,#19 - vshr.u64 q13,q4,#61 - vadd.i64 d22,d30 @ h+=Maj from the past - vshr.u64 q15,q4,#6 - vsli.64 q12,q4,#45 - vext.8 q14,q5,q6,#8 @ X[i+1] - vsli.64 q13,q4,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q5,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q1,q2,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d18,#14 @ from NEON_00_15 - vadd.i64 q5,q14 - vshr.u64 d25,d18,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d18,#41 @ from NEON_00_15 - vadd.i64 q5,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 26<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d10 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 27 -#if 27<16 - vld1.64 {d11},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 27>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 27<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d11 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 q12,q5,#19 - vshr.u64 q13,q5,#61 - vadd.i64 d20,d30 @ h+=Maj from the past - vshr.u64 q15,q5,#6 - vsli.64 q12,q5,#45 - vext.8 q14,q6,q7,#8 @ X[i+1] - vsli.64 q13,q5,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q6,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q2,q3,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d16,#14 @ from NEON_00_15 - vadd.i64 q6,q14 - vshr.u64 d25,d16,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d16,#41 @ from NEON_00_15 - vadd.i64 q6,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 28<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d12 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 29 -#if 29<16 - vld1.64 {d13},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 29>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 29<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d13 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 q12,q6,#19 - vshr.u64 q13,q6,#61 - vadd.i64 d18,d30 @ h+=Maj from the past - vshr.u64 q15,q6,#6 - vsli.64 q12,q6,#45 - vext.8 q14,q7,q0,#8 @ X[i+1] - vsli.64 q13,q6,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q7,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q3,q4,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d22,#14 @ from NEON_00_15 - vadd.i64 q7,q14 - vshr.u64 d25,d22,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d22,#41 @ from NEON_00_15 - vadd.i64 q7,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 30<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d14 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 31 -#if 31<16 - vld1.64 {d15},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 31>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 31<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d15 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - bne .L16_79_neon - - vadd.i64 d16,d30 @ h+=Maj from the past - vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp - vadd.i64 q8,q12 @ vectorized accumulate - vadd.i64 q9,q13 - vadd.i64 q10,q14 - vadd.i64 q11,q15 - vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context - teq r1,r2 - sub r3,#640 @ rewind K512 - bne .Loop_neon - - VFP_ABI_POP - bx lr @ .word 0xe12fff1e -.size sha512_block_data_order_neon,.-sha512_block_data_order_neon -#endif -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/vpaes-armv7.S b/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/vpaes-armv7.S deleted file mode 100644 index e5ad6ed99b..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/fipsmodule/vpaes-armv7.S +++ /dev/null @@ -1,1236 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.syntax unified - -.arch armv7-a -.fpu neon - -#if defined(__thumb2__) -.thumb -#else -.code 32 -#endif - -.text - -.type _vpaes_consts,%object -.align 7 @ totally strategic alignment -_vpaes_consts: -.Lk_mc_forward:@ mc_forward -.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 -.quad 0x080B0A0904070605, 0x000302010C0F0E0D -.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 -.quad 0x000302010C0F0E0D, 0x080B0A0904070605 -.Lk_mc_backward:@ mc_backward -.quad 0x0605040702010003, 0x0E0D0C0F0A09080B -.quad 0x020100030E0D0C0F, 0x0A09080B06050407 -.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 -.quad 0x0A09080B06050407, 0x020100030E0D0C0F -.Lk_sr:@ sr -.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 -.quad 0x030E09040F0A0500, 0x0B06010C07020D08 -.quad 0x0F060D040B020900, 0x070E050C030A0108 -.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 - -@ -@ "Hot" constants -@ -.Lk_inv:@ inv, inva -.quad 0x0E05060F0D080180, 0x040703090A0B0C02 -.quad 0x01040A060F0B0780, 0x030D0E0C02050809 -.Lk_ipt:@ input transform (lo, hi) -.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 -.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 -.Lk_sbo:@ sbou, sbot -.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 -.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA -.Lk_sb1:@ sb1u, sb1t -.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF -.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 -.Lk_sb2:@ sb2u, sb2t -.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A -.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD - -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 -.align 2 -.size _vpaes_consts,.-_vpaes_consts -.align 6 -@@ -@@ _aes_preheat -@@ -@@ Fills q9-q15 as specified below. -@@ -.type _vpaes_preheat,%function -.align 4 -_vpaes_preheat: - adr r10, .Lk_inv - vmov.i8 q9, #0x0f @ .Lk_s0F - vld1.64 {q10,q11}, [r10]! @ .Lk_inv - add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo - vld1.64 {q12,q13}, [r10]! @ .Lk_sb1 - vld1.64 {q14,q15}, [r10] @ .Lk_sb2 - bx lr - -@@ -@@ _aes_encrypt_core -@@ -@@ AES-encrypt q0. -@@ -@@ Inputs: -@@ q0 = input -@@ q9-q15 as in _vpaes_preheat -@@ [r2] = scheduled keys -@@ -@@ Output in q0 -@@ Clobbers q1-q5, r8-r11 -@@ Preserves q6-q8 so you get some local vectors -@@ -@@ -.type _vpaes_encrypt_core,%function -.align 4 -_vpaes_encrypt_core: - mov r9, r2 - ldr r8, [r2,#240] @ pull rounds - adr r11, .Lk_ipt - @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo - @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi - vld1.64 {q2, q3}, [r11] - adr r11, .Lk_mc_forward+16 - vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 - vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1 - vtbl.8 d3, {q2}, d3 - vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2 - vtbl.8 d5, {q3}, d1 - veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0 - veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 - - @ .Lenc_entry ends with a bnz instruction which is normally paired with - @ subs in .Lenc_loop. - tst r8, r8 - b .Lenc_entry - -.align 4 -.Lenc_loop: - @ middle of middle round - add r10, r11, #0x40 - vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u - vtbl.8 d9, {q13}, d5 - vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] - vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t - vtbl.8 d1, {q12}, d7 - veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u - vtbl.8 d11, {q15}, d5 - veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A - vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t - vtbl.8 d5, {q14}, d7 - vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] - vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B - vtbl.8 d7, {q0}, d3 - veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A - @ Write to q5 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D - vtbl.8 d11, {q0}, d9 - veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B - vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C - vtbl.8 d9, {q3}, d3 - @ Here we restore the original q0/q5 usage. - veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D - and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4 - veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D - subs r8, r8, #1 @ nr-- - -.Lenc_entry: - @ top of round - vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i - vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k - vtbl.8 d11, {q11}, d3 - veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j - vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - vtbl.8 d7, {q10}, d1 - vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - vtbl.8 d9, {q10}, d3 - veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - vtbl.8 d5, {q10}, d7 - vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - vtbl.8 d7, {q10}, d9 - veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io - veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 - bne .Lenc_loop - - @ middle of last round - add r10, r11, #0x80 - - adr r11, .Lk_sbo - @ Read to q1 instead of q4, so the vtbl.8 instruction below does not - @ overlap table and destination registers. - vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou - vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 - vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - vtbl.8 d9, {q1}, d5 - vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] - @ Write to q2 instead of q0 below, to avoid overlapping table and - @ destination registers. - vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t - vtbl.8 d5, {q0}, d7 - veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k - veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A - @ Here we restore the original q0/q2 usage. - vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 - vtbl.8 d1, {q2}, d3 - bx lr -.size _vpaes_encrypt_core,.-_vpaes_encrypt_core - -.globl vpaes_encrypt -.hidden vpaes_encrypt -.type vpaes_encrypt,%function -.align 4 -vpaes_encrypt: - @ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack - @ alignment. - stmdb sp!, {r7,r8,r9,r10,r11,lr} - @ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved. - vstmdb sp!, {d8,d9,d10,d11} - - vld1.64 {q0}, [r0] - bl _vpaes_preheat - bl _vpaes_encrypt_core - vst1.64 {q0}, [r1] - - vldmia sp!, {d8,d9,d10,d11} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return -.size vpaes_encrypt,.-vpaes_encrypt - -@ -@ Decryption stuff -@ -.type _vpaes_decrypt_consts,%object -.align 4 -_vpaes_decrypt_consts: -.Lk_dipt:@ decryption input transform -.quad 0x0F505B040B545F00, 0x154A411E114E451A -.quad 0x86E383E660056500, 0x12771772F491F194 -.Lk_dsbo:@ decryption sbox final output -.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D -.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C -.Lk_dsb9:@ decryption sbox output *9*u, *9*t -.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 -.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 -.Lk_dsbd:@ decryption sbox output *D*u, *D*t -.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 -.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 -.Lk_dsbb:@ decryption sbox output *B*u, *B*t -.quad 0xD022649296B44200, 0x602646F6B0F2D404 -.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B -.Lk_dsbe:@ decryption sbox output *E*u, *E*t -.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 -.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 -.size _vpaes_decrypt_consts,.-_vpaes_decrypt_consts - -@@ -@@ Decryption core -@@ -@@ Same API as encryption core, except it clobbers q12-q15 rather than using -@@ the values from _vpaes_preheat. q9-q11 must still be set from -@@ _vpaes_preheat. -@@ -.type _vpaes_decrypt_core,%function -.align 4 -_vpaes_decrypt_core: - mov r9, r2 - ldr r8, [r2,#240] @ pull rounds - - @ This function performs shuffles with various constants. The x86_64 - @ version loads them on-demand into %xmm0-%xmm5. This does not work well - @ for ARMv7 because those registers are shuffle destinations. The ARMv8 - @ version preloads those constants into registers, but ARMv7 has half - @ the registers to work with. Instead, we load them on-demand into - @ q12-q15, registers normally use for preloaded constants. This is fine - @ because decryption doesn't use those constants. The values are - @ constant, so this does not interfere with potential 2x optimizations. - adr r7, .Lk_dipt - - vld1.64 {q12,q13}, [r7] @ vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo - lsl r11, r8, #4 @ mov %rax, %r11; shl $4, %r11 - eor r11, r11, #0x30 @ xor $0x30, %r11 - adr r10, .Lk_sr - and r11, r11, #0x30 @ and $0x30, %r11 - add r11, r11, r10 - adr r10, .Lk_mc_forward+48 - - vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 - vtbl.8 d4, {q12}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 - vtbl.8 d5, {q12}, d3 - vld1.64 {q5}, [r10] @ vmovdqa .Lk_mc_forward+48(%rip), %xmm5 - @ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi - vtbl.8 d0, {q13}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 - vtbl.8 d1, {q13}, d1 - veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2 - veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 - - @ .Ldec_entry ends with a bnz instruction which is normally paired with - @ subs in .Ldec_loop. - tst r8, r8 - b .Ldec_entry - -.align 4 -.Ldec_loop: -@ -@ Inverse mix columns -@ - - @ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of - @ the function. - adr r10, .Lk_dsb9 - vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u - @ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t - @ Load sbd* ahead of time. - vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu - @ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt - vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u - vtbl.8 d9, {q12}, d5 - vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t - vtbl.8 d3, {q13}, d7 - veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0 - - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - - @ Load sbb* ahead of time. - vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu - @ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt - - vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu - vtbl.8 d9, {q14}, d5 - @ Write to q1 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch - vtbl.8 d3, {q0}, d11 - @ Here we restore the original q0/q1 usage. This instruction is - @ reordered from the ARMv8 version so we do not clobber the vtbl.8 - @ below. - veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt - vtbl.8 d3, {q15}, d7 - @ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - @ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt - - @ Load sbd* ahead of time. - vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu - @ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet - - vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu - vtbl.8 d9, {q12}, d5 - @ Write to q1 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch - vtbl.8 d3, {q0}, d11 - @ Here we restore the original q0/q1 usage. This instruction is - @ reordered from the ARMv8 version so we do not clobber the vtbl.8 - @ below. - veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt - vtbl.8 d3, {q13}, d7 - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - - vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu - vtbl.8 d9, {q14}, d5 - @ Write to q1 instead of q0, so the table and destination registers do - @ not overlap. - vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch - vtbl.8 d3, {q0}, d11 - @ Here we restore the original q0/q1 usage. This instruction is - @ reordered from the ARMv8 version so we do not clobber the vtbl.8 - @ below. - veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch - vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet - vtbl.8 d3, {q15}, d7 - vext.8 q5, q5, q5, #12 @ vpalignr $12, %xmm5, %xmm5, %xmm5 - veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch - subs r8, r8, #1 @ sub $1,%rax # nr-- - -.Ldec_entry: - @ top of round - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i - vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - vtbl.8 d5, {q11}, d3 - veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j - vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - vtbl.8 d7, {q10}, d1 - vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - vtbl.8 d9, {q10}, d3 - veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak - vtbl.8 d5, {q10}, d7 - vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak - vtbl.8 d7, {q10}, d9 - veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io - veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo - vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0 - bne .Ldec_loop - - @ middle of last round - - adr r10, .Lk_dsbo - - @ Write to q1 rather than q4 to avoid overlapping table and destination. - vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou - vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou - vtbl.8 d9, {q1}, d5 - @ Write to q2 rather than q1 to avoid overlapping table and destination. - vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot - vtbl.8 d2, {q2}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t - vtbl.8 d3, {q2}, d7 - vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 - veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k - @ Write to q1 rather than q0 so the table and destination registers - @ below do not overlap. - veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A - vtbl.8 d0, {q1}, d4 @ vpshufb %xmm2, %xmm0, %xmm0 - vtbl.8 d1, {q1}, d5 - bx lr -.size _vpaes_decrypt_core,.-_vpaes_decrypt_core - -.globl vpaes_decrypt -.hidden vpaes_decrypt -.type vpaes_decrypt,%function -.align 4 -vpaes_decrypt: - @ _vpaes_decrypt_core uses r7-r11. - stmdb sp!, {r7,r8,r9,r10,r11,lr} - @ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved. - vstmdb sp!, {d8,d9,d10,d11} - - vld1.64 {q0}, [r0] - bl _vpaes_preheat - bl _vpaes_decrypt_core - vst1.64 {q0}, [r1] - - vldmia sp!, {d8,d9,d10,d11} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return -.size vpaes_decrypt,.-vpaes_decrypt -@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -@@ @@ -@@ AES key schedule @@ -@@ @@ -@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - -@ This function diverges from both x86_64 and armv7 in which constants are -@ pinned. x86_64 has a common preheat function for all operations. aarch64 -@ separates them because it has enough registers to pin nearly all constants. -@ armv7 does not have enough registers, but needing explicit loads and stores -@ also complicates using x86_64's register allocation directly. -@ -@ We pin some constants for convenience and leave q14 and q15 free to load -@ others on demand. - -@ -@ Key schedule constants -@ -.type _vpaes_key_consts,%object -.align 4 -_vpaes_key_consts: -.Lk_dksd:@ decryption key schedule: invskew x*D -.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 -.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E -.Lk_dksb:@ decryption key schedule: invskew x*B -.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 -.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 -.Lk_dkse:@ decryption key schedule: invskew x*E + 0x63 -.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 -.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 -.Lk_dks9:@ decryption key schedule: invskew x*9 -.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC -.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE - -.Lk_rcon:@ rcon -.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 - -.Lk_opt:@ output transform -.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 -.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 -.Lk_deskew:@ deskew tables: inverts the sbox's "skew" -.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A -.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 -.size _vpaes_key_consts,.-_vpaes_key_consts - -.type _vpaes_key_preheat,%function -.align 4 -_vpaes_key_preheat: - adr r11, .Lk_rcon - vmov.i8 q12, #0x5b @ .Lk_s63 - adr r10, .Lk_inv @ Must be aligned to 8 mod 16. - vmov.i8 q9, #0x0f @ .Lk_s0F - vld1.64 {q10,q11}, [r10] @ .Lk_inv - vld1.64 {q8}, [r11] @ .Lk_rcon - bx lr -.size _vpaes_key_preheat,.-_vpaes_key_preheat - -.type _vpaes_schedule_core,%function -.align 4 -_vpaes_schedule_core: - @ We only need to save lr, but ARM requires an 8-byte stack alignment, - @ so save an extra register. - stmdb sp!, {r3,lr} - - bl _vpaes_key_preheat @ load the tables - - adr r11, .Lk_ipt @ Must be aligned to 8 mod 16. - vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned) - - @ input transform - @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not - @ overlap table and destination. - vmov q4, q0 @ vmovdqa %xmm0, %xmm3 - bl _vpaes_schedule_transform - adr r10, .Lk_sr @ Must be aligned to 8 mod 16. - vmov q7, q0 @ vmovdqa %xmm0, %xmm7 - - add r8, r8, r10 - tst r3, r3 - bne .Lschedule_am_decrypting - - @ encrypting, output zeroth round key after transform - vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) - b .Lschedule_go - -.Lschedule_am_decrypting: - @ decrypting, output zeroth round key after shiftrows - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 - vtbl.8 d6, {q4}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q4}, d3 - vst1.64 {q3}, [r2] @ vmovdqu %xmm3, (%rdx) - eor r8, r8, #0x30 @ xor $0x30, %r8 - -.Lschedule_go: - cmp r1, #192 @ cmp $192, %esi - bhi .Lschedule_256 - beq .Lschedule_192 - @ 128: fall though - -@@ -@@ .schedule_128 -@@ -@@ 128-bit specific part of key schedule. -@@ -@@ This schedule is really simple, because all its parts -@@ are accomplished by the subroutines. -@@ -.Lschedule_128: - mov r0, #10 @ mov $10, %esi - -.Loop_schedule_128: - bl _vpaes_schedule_round - subs r0, r0, #1 @ dec %esi - beq .Lschedule_mangle_last - bl _vpaes_schedule_mangle @ write output - b .Loop_schedule_128 - -@@ -@@ .aes_schedule_192 -@@ -@@ 192-bit specific part of key schedule. -@@ -@@ The main body of this schedule is the same as the 128-bit -@@ schedule, but with more smearing. The long, high side is -@@ stored in q7 as before, and the short, low side is in -@@ the high bits of q6. -@@ -@@ This schedule is somewhat nastier, however, because each -@@ round produces 192 bits of key material, or 1.5 round keys. -@@ Therefore, on each cycle we do 2 rounds and produce 3 round -@@ keys. -@@ -.align 4 -.Lschedule_192: - sub r0, r0, #8 - vld1.64 {q0}, [r0] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) - bl _vpaes_schedule_transform @ input transform - vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part - vmov.i8 d12, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4 - @ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros - mov r0, #4 @ mov $4, %esi - -.Loop_schedule_192: - bl _vpaes_schedule_round - vext.8 q0, q6, q0, #8 @ vpalignr $8,%xmm6,%xmm0,%xmm0 - bl _vpaes_schedule_mangle @ save key n - bl _vpaes_schedule_192_smear - bl _vpaes_schedule_mangle @ save key n+1 - bl _vpaes_schedule_round - subs r0, r0, #1 @ dec %esi - beq .Lschedule_mangle_last - bl _vpaes_schedule_mangle @ save key n+2 - bl _vpaes_schedule_192_smear - b .Loop_schedule_192 - -@@ -@@ .aes_schedule_256 -@@ -@@ 256-bit specific part of key schedule. -@@ -@@ The structure here is very similar to the 128-bit -@@ schedule, but with an additional "low side" in -@@ q6. The low side's rounds are the same as the -@@ high side's, except no rcon and no rotation. -@@ -.align 4 -.Lschedule_256: - vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) - bl _vpaes_schedule_transform @ input transform - mov r0, #7 @ mov $7, %esi - -.Loop_schedule_256: - bl _vpaes_schedule_mangle @ output low result - vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 - - @ high round - bl _vpaes_schedule_round - subs r0, r0, #1 @ dec %esi - beq .Lschedule_mangle_last - bl _vpaes_schedule_mangle - - @ low round. swap xmm7 and xmm6 - vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 - vmov.i8 q4, #0 - vmov q5, q7 @ vmovdqa %xmm7, %xmm5 - vmov q7, q6 @ vmovdqa %xmm6, %xmm7 - bl _vpaes_schedule_low_round - vmov q7, q5 @ vmovdqa %xmm5, %xmm7 - - b .Loop_schedule_256 - -@@ -@@ .aes_schedule_mangle_last -@@ -@@ Mangler for last round of key schedule -@@ Mangles q0 -@@ when encrypting, outputs out(q0) ^ 63 -@@ when decrypting, outputs unskew(q0) -@@ -@@ Always called right before return... jumps to cleanup and exits -@@ -.align 4 -.Lschedule_mangle_last: - @ schedule last round key from xmm0 - adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew - tst r3, r3 - bne .Lschedule_mangle_last_dec - - @ encrypting - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1 - adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform - add r2, r2, #32 @ add $32, %rdx - vmov q2, q0 - vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute - vtbl.8 d1, {q2}, d3 - -.Lschedule_mangle_last_dec: - sub r2, r2, #16 @ add $-16, %rdx - veor q0, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0 - bl _vpaes_schedule_transform @ output transform - vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key - - @ cleanup - veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0 - veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 - veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2 - veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3 - veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4 - veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5 - veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6 - veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7 - ldmia sp!, {r3,pc} @ return -.size _vpaes_schedule_core,.-_vpaes_schedule_core - -@@ -@@ .aes_schedule_192_smear -@@ -@@ Smear the short, low side in the 192-bit key schedule. -@@ -@@ Inputs: -@@ q7: high side, b a x y -@@ q6: low side, d c 0 0 -@@ -@@ Outputs: -@@ q6: b+c+d b+c 0 0 -@@ q0: b+c+d b+c b a -@@ -.type _vpaes_schedule_192_smear,%function -.align 4 -_vpaes_schedule_192_smear: - vmov.i8 q1, #0 - vdup.32 q0, d15[1] - vshl.i64 q1, q6, #32 @ vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 - vmov d0, d15 @ vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a - veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 - veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 - veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a - vmov q0, q6 @ vmovdqa %xmm6, %xmm0 - vmov d12, d2 @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros - bx lr -.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear - -@@ -@@ .aes_schedule_round -@@ -@@ Runs one main round of the key schedule on q0, q7 -@@ -@@ Specifically, runs subbytes on the high dword of q0 -@@ then rotates it by one byte and xors into the low dword of -@@ q7. -@@ -@@ Adds rcon from low byte of q8, then rotates q8 for -@@ next rcon. -@@ -@@ Smears the dwords of q7 by xoring the low into the -@@ second low, result into third, result into highest. -@@ -@@ Returns results in q7 = q0. -@@ Clobbers q1-q4, r11. -@@ -.type _vpaes_schedule_round,%function -.align 4 -_vpaes_schedule_round: - @ extract rcon from xmm8 - vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4 - vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1 - vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8 - veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 - - @ rotate - vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 - vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0 - - @ fall through... - - @ low round: same as high round, but no rotation and no rcon. -_vpaes_schedule_low_round: - @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12. - @ We pin other values in _vpaes_key_preheat, so load them now. - adr r11, .Lk_sb1 - vld1.64 {q14,q15}, [r11] - - @ smear xmm7 - vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1 - veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 - vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4 - - @ subbytes - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i - veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7 - vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k - vtbl.8 d5, {q11}, d3 - veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j - vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i - vtbl.8 d7, {q10}, d1 - veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k - vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j - vtbl.8 d9, {q10}, d3 - veor q7, q7, q12 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7 - vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak - vtbl.8 d7, {q10}, d7 - veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k - vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak - vtbl.8 d5, {q10}, d9 - veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io - veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo - vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou - vtbl.8 d9, {q15}, d7 - vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t - vtbl.8 d3, {q14}, d5 - veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output - - @ add in smeared stuff - veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0 - veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7 - bx lr -.size _vpaes_schedule_round,.-_vpaes_schedule_round - -@@ -@@ .aes_schedule_transform -@@ -@@ Linear-transform q0 according to tables at [r11] -@@ -@@ Requires that q9 = 0x0F0F... as in preheat -@@ Output in q0 -@@ Clobbers q1, q2, q14, q15 -@@ -.type _vpaes_schedule_transform,%function -.align 4 -_vpaes_schedule_transform: - vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo - @ vmovdqa 16(%r11), %xmm1 # hi - vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 - vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 - vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d3 - vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 - vtbl.8 d1, {q15}, d1 - veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 - bx lr -.size _vpaes_schedule_transform,.-_vpaes_schedule_transform - -@@ -@@ .aes_schedule_mangle -@@ -@@ Mangles q0 from (basis-transformed) standard version -@@ to our version. -@@ -@@ On encrypt, -@@ xor with 0x63 -@@ multiply by circulant 0,1,1,1 -@@ apply shiftrows transform -@@ -@@ On decrypt, -@@ xor with 0x63 -@@ multiply by "inverse mixcolumns" circulant E,B,D,9 -@@ deskew -@@ apply shiftrows transform -@@ -@@ -@@ Writes out to [r2], and increments or decrements it -@@ Keeps track of round number mod 4 in r8 -@@ Preserves q0 -@@ Clobbers q1-q5 -@@ -.type _vpaes_schedule_mangle,%function -.align 4 -_vpaes_schedule_mangle: - tst r3, r3 - vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later - adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16. - vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5 - bne .Lschedule_mangle_dec - - @ encrypting - @ Write to q2 so we do not overlap table and destination below. - veor q2, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4 - add r2, r2, #16 @ add $16, %rdx - vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4 - vtbl.8 d9, {q2}, d11 - vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1 - vtbl.8 d3, {q4}, d11 - vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3 - vtbl.8 d7, {q1}, d11 - veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4 - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 - veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3 - - b .Lschedule_mangle_both -.align 4 -.Lschedule_mangle_dec: - @ inverse mix columns - adr r11, .Lk_dksd @ lea .Lk_dksd(%rip),%r11 - vshr.u8 q1, q4, #4 @ vpsrlb $4, %xmm4, %xmm1 # 1 = hi - vand q4, q4, q9 @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo - - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2 - @ vmovdqa 0x10(%r11), %xmm3 - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q15}, d3 - @ Load .Lk_dksb ahead of time. - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2 - @ vmovdqa 0x30(%r11), %xmm3 - @ Write to q13 so we do not overlap table and destination. - veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 - vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 - vtbl.8 d7, {q13}, d11 - - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 - vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q15}, d3 - @ Load .Lk_dkse ahead of time. - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2 - @ vmovdqa 0x50(%r11), %xmm3 - @ Write to q13 so we do not overlap table and destination. - veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 - vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 - vtbl.8 d7, {q13}, d11 - - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 - vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d7, {q15}, d3 - @ Load .Lk_dkse ahead of time. - vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2 - @ vmovdqa 0x70(%r11), %xmm4 - @ Write to q13 so we do not overlap table and destination. - veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 - - vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 - vtbl.8 d5, {q14}, d9 - vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 - vtbl.8 d7, {q13}, d11 - vtbl.8 d8, {q15}, d2 @ vpshufb %xmm1, %xmm4, %xmm4 - vtbl.8 d9, {q15}, d3 - vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 - veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 - veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3 - - sub r2, r2, #16 @ add $-16, %rdx - -.Lschedule_mangle_both: - @ Write to q2 so table and destination do not overlap. - vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 - vtbl.8 d5, {q3}, d3 - add r8, r8, #64-16 @ add $-16, %r8 - and r8, r8, #~(1<<6) @ and $0x30, %r8 - vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx) - bx lr -.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle - -.globl vpaes_set_encrypt_key -.hidden vpaes_set_encrypt_key -.type vpaes_set_encrypt_key,%function -.align 4 -vpaes_set_encrypt_key: - stmdb sp!, {r7,r8,r9,r10,r11, lr} - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - lsr r9, r1, #5 @ shr $5,%eax - add r9, r9, #5 @ $5,%eax - str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - - mov r3, #0 @ mov $0,%ecx - mov r8, #0x30 @ mov $0x30,%r8d - bl _vpaes_schedule_core - eor r0, r0, r0 - - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return -.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key - -.globl vpaes_set_decrypt_key -.hidden vpaes_set_decrypt_key -.type vpaes_set_decrypt_key,%function -.align 4 -vpaes_set_decrypt_key: - stmdb sp!, {r7,r8,r9,r10,r11, lr} - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - lsr r9, r1, #5 @ shr $5,%eax - add r9, r9, #5 @ $5,%eax - str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; - lsl r9, r9, #4 @ shl $4,%eax - add r2, r2, #16 @ lea 16(%rdx,%rax),%rdx - add r2, r2, r9 - - mov r3, #1 @ mov $1,%ecx - lsr r8, r1, #1 @ shr $1,%r8d - and r8, r8, #32 @ and $32,%r8d - eor r8, r8, #32 @ xor $32,%r8d # nbits==192?0:32 - bl _vpaes_schedule_core - - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return -.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key - -@ Additional constants for converting to bsaes. -.type _vpaes_convert_consts,%object -.align 4 -_vpaes_convert_consts: -@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear -@ transform in the AES S-box. 0x63 is incorporated into the low half of the -@ table. This was computed with the following script: -@ -@ def u64s_to_u128(x, y): -@ return x | (y << 64) -@ def u128_to_u64s(w): -@ return w & ((1<<64)-1), w >> 64 -@ def get_byte(w, i): -@ return (w >> (i*8)) & 0xff -@ def apply_table(table, b): -@ lo = b & 0xf -@ hi = b >> 4 -@ return get_byte(table[0], lo) ^ get_byte(table[1], hi) -@ def opt(b): -@ table = [ -@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808), -@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0), -@ ] -@ return apply_table(table, b) -@ def rot_byte(b, n): -@ return 0xff & ((b << n) | (b >> (8-n))) -@ def skew(x): -@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^ -@ rot_byte(x, 4)) -@ table = [0, 0] -@ for i in range(16): -@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8) -@ table[1] |= skew(opt(i<<4)) << (i*8) -@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0])) -@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1])) -.Lk_opt_then_skew: -.quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b -.quad 0x1f30062936192f00, 0xb49bad829db284ab - -@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation -@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344 -@ becomes 0x22334411 and then 0x11443322. -.Lk_decrypt_transform: -.quad 0x0704050603000102, 0x0f0c0d0e0b08090a -.size _vpaes_convert_consts,.-_vpaes_convert_consts - -@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes); -.globl vpaes_encrypt_key_to_bsaes -.hidden vpaes_encrypt_key_to_bsaes -.type vpaes_encrypt_key_to_bsaes,%function -.align 4 -vpaes_encrypt_key_to_bsaes: - stmdb sp!, {r11, lr} - - @ See _vpaes_schedule_core for the key schedule logic. In particular, - @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper), - @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last - @ contain the transformations not in the bsaes representation. This - @ function inverts those transforms. - @ - @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key - @ representation, which does not match the other aes_nohw_* - @ implementations. The ARM aes_nohw_* stores each 32-bit word - @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the - @ cost of extra REV and VREV32 operations in little-endian ARM. - - vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform - adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16. - add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression) - - vld1.64 {q12}, [r2] - vmov.i8 q10, #0x5b @ .Lk_s63 from vpaes-x86_64 - adr r11, .Lk_opt @ Must be aligned to 8 mod 16. - vmov.i8 q11, #0x63 @ .LK_s63 without .Lk_ipt applied - - @ vpaes stores one fewer round count than bsaes, but the number of keys - @ is the same. - ldr r2, [r1,#240] - add r2, r2, #1 - str r2, [r0,#240] - - @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt). - @ Invert this with .Lk_opt. - vld1.64 {q0}, [r1]! - bl _vpaes_schedule_transform - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - - @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied, - @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63, - @ multiplies by the circulant 0,1,1,1, then applies ShiftRows. -.Loop_enc_key_to_bsaes: - vld1.64 {q0}, [r1]! - - @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle - @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30. - @ We use r3 rather than r8 to avoid a callee-saved register. - vld1.64 {q1}, [r3] - vtbl.8 d4, {q0}, d2 - vtbl.8 d5, {q0}, d3 - add r3, r3, #16 - and r3, r3, #~(1<<6) - vmov q0, q2 - - @ Handle the last key differently. - subs r2, r2, #1 - beq .Loop_enc_key_to_bsaes_last - - @ Multiply by the circulant. This is its own inverse. - vtbl.8 d2, {q0}, d24 - vtbl.8 d3, {q0}, d25 - vmov q0, q1 - vtbl.8 d4, {q1}, d24 - vtbl.8 d5, {q1}, d25 - veor q0, q0, q2 - vtbl.8 d2, {q2}, d24 - vtbl.8 d3, {q2}, d25 - veor q0, q0, q1 - - @ XOR and finish. - veor q0, q0, q10 - bl _vpaes_schedule_transform - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - b .Loop_enc_key_to_bsaes - -.Loop_enc_key_to_bsaes_last: - @ The final key does not have a basis transform (note - @ .Lschedule_mangle_last inverts the original transform). It only XORs - @ 0x63 and applies ShiftRows. The latter was already inverted in the - @ loop. Note that, because we act on the original representation, we use - @ q11, not q10. - veor q0, q0, q11 - vrev32.8 q0, q0 - vst1.64 {q0}, [r0] - - @ Wipe registers which contained key material. - veor q0, q0, q0 - veor q1, q1, q1 - veor q2, q2, q2 - - ldmia sp!, {r11, pc} @ return -.size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes - -@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes); -.globl vpaes_decrypt_key_to_bsaes -.hidden vpaes_decrypt_key_to_bsaes -.type vpaes_decrypt_key_to_bsaes,%function -.align 4 -vpaes_decrypt_key_to_bsaes: - stmdb sp!, {r11, lr} - - @ See _vpaes_schedule_core for the key schedule logic. Note vpaes - @ computes the decryption key schedule in reverse. Additionally, - @ aes-x86_64.pl shares some transformations, so we must only partially - @ invert vpaes's transformations. In general, vpaes computes in a - @ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of - @ MixColumns, ShiftRows, and the affine part of the AES S-box (which is - @ split into a linear skew and XOR of 0x63). We undo all but MixColumns. - @ - @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key - @ representation, which does not match the other aes_nohw_* - @ implementations. The ARM aes_nohw_* stores each 32-bit word - @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the - @ cost of extra REV and VREV32 operations in little-endian ARM. - - adr r2, .Lk_decrypt_transform - adr r3, .Lk_sr+0x30 - adr r11, .Lk_opt_then_skew @ Input to _vpaes_schedule_transform. - vld1.64 {q12}, [r2] @ Reuse q12 from encryption. - vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform - - @ vpaes stores one fewer round count than bsaes, but the number of keys - @ is the same. - ldr r2, [r1,#240] - add r2, r2, #1 - str r2, [r0,#240] - - @ Undo the basis change and reapply the S-box affine transform. See - @ .Lschedule_mangle_last. - vld1.64 {q0}, [r1]! - bl _vpaes_schedule_transform - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - - @ See _vpaes_schedule_mangle for the transform on the middle keys. Note - @ it simultaneously inverts MixColumns and the S-box affine transform. - @ See .Lk_dksd through .Lk_dks9. -.Loop_dec_key_to_bsaes: - vld1.64 {q0}, [r1]! - - @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going - @ forwards cancels inverting for which direction we cycle r3. We use r3 - @ rather than r8 to avoid a callee-saved register. - vld1.64 {q1}, [r3] - vtbl.8 d4, {q0}, d2 - vtbl.8 d5, {q0}, d3 - add r3, r3, #64-16 - and r3, r3, #~(1<<6) - vmov q0, q2 - - @ Handle the last key differently. - subs r2, r2, #1 - beq .Loop_dec_key_to_bsaes_last - - @ Undo the basis change and reapply the S-box affine transform. - bl _vpaes_schedule_transform - - @ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We - @ combine the two operations in .Lk_decrypt_transform. - @ - @ TODO(davidben): Where does the rotation come from? - vtbl.8 d2, {q0}, d24 - vtbl.8 d3, {q0}, d25 - - vst1.64 {q1}, [r0]! - b .Loop_dec_key_to_bsaes - -.Loop_dec_key_to_bsaes_last: - @ The final key only inverts ShiftRows (already done in the loop). See - @ .Lschedule_am_decrypting. Its basis is not transformed. - vrev32.8 q0, q0 - vst1.64 {q0}, [r0]! - - @ Wipe registers which contained key material. - veor q0, q0, q0 - veor q1, q1, q1 - veor q2, q2, q2 - - ldmia sp!, {r11, pc} @ return -.size vpaes_decrypt_key_to_bsaes,.-vpaes_decrypt_key_to_bsaes -.globl vpaes_ctr32_encrypt_blocks -.hidden vpaes_ctr32_encrypt_blocks -.type vpaes_ctr32_encrypt_blocks,%function -.align 4 -vpaes_ctr32_encrypt_blocks: - mov ip, sp - stmdb sp!, {r7,r8,r9,r10,r11, lr} - @ This function uses q4-q7 (d8-d15), which are callee-saved. - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - cmp r2, #0 - @ r8 is passed on the stack. - ldr r8, [ip] - beq .Lctr32_done - - @ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3. - mov r9, r3 - mov r3, r2 - mov r2, r9 - - @ Load the IV and counter portion. - ldr r7, [r8, #12] - vld1.8 {q7}, [r8] - - bl _vpaes_preheat - rev r7, r7 @ The counter is big-endian. - -.Lctr32_loop: - vmov q0, q7 - vld1.8 {q6}, [r0]! @ .Load input ahead of time - bl _vpaes_encrypt_core - veor q0, q0, q6 @ XOR input and result - vst1.8 {q0}, [r1]! - subs r3, r3, #1 - @ Update the counter. - add r7, r7, #1 - rev r9, r7 - vmov.32 d15[1], r9 - bne .Lctr32_loop - -.Lctr32_done: - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return -.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-arm/crypto/test/trampoline-armv4.S b/packager/third_party/boringssl/linux-arm/crypto/test/trampoline-armv4.S deleted file mode 100644 index 5c788b3569..0000000000 --- a/packager/third_party/boringssl/linux-arm/crypto/test/trampoline-armv4.S +++ /dev/null @@ -1,380 +0,0 @@ -// This file is generated from a similarly-named Perl script in the BoringSSL -// source tree. Do not edit by hand. - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif - -#if !defined(OPENSSL_NO_ASM) -#if defined(__arm__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.syntax unified - -.arch armv7-a -.fpu vfp - -.text - -@ abi_test_trampoline loads callee-saved registers from |state|, calls |func| -@ with |argv|, then saves the callee-saved registers into |state|. It returns -@ the result of |func|. The |unwind| argument is unused. -@ uint32_t abi_test_trampoline(void (*func)(...), CallerState *state, -@ const uint32_t *argv, size_t argc, -@ int unwind); -.type abi_test_trampoline, %function -.globl abi_test_trampoline -.hidden abi_test_trampoline -.align 4 -abi_test_trampoline: -.Labi_test_trampoline_begin: - @ Save parameters and all callee-saved registers. For convenience, we - @ save r9 on iOS even though it's volatile. - vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - stmdb sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} - - @ Reserve stack space for six (10-4) stack parameters, plus an extra 4 - @ bytes to keep it 8-byte-aligned (see AAPCS, section 5.3). - sub sp, sp, #28 - - @ Every register in AAPCS is either non-volatile or a parameter (except - @ r9 on iOS), so this code, by the actual call, loses all its scratch - @ registers. First fill in stack parameters while there are registers - @ to spare. - cmp r3, #4 - bls .Lstack_args_done - mov r4, sp @ r4 is the output pointer. - add r5, r2, r3, lsl #2 @ Set r5 to the end of argv. - add r2, r2, #16 @ Skip four arguments. -.Lstack_args_loop: - ldr r6, [r2], #4 - cmp r2, r5 - str r6, [r4], #4 - bne .Lstack_args_loop - -.Lstack_args_done: - @ Load registers from |r1|. - vldmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15} -#if defined(__APPLE__) - @ r9 is not volatile on iOS. - ldmia r1!, {r4,r5,r6,r7,r8,r10-r11} -#else - ldmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11} -#endif - - @ Load register parameters. This uses up our remaining registers, so we - @ repurpose lr as scratch space. - ldr r3, [sp, #40] @ Reload argc. - ldr lr, [sp, #36] @ .Load argv into lr. - cmp r3, #3 - bhi .Larg_r3 - beq .Larg_r2 - cmp r3, #1 - bhi .Larg_r1 - beq .Larg_r0 - b .Largs_done - -.Larg_r3: - ldr r3, [lr, #12] @ argv[3] -.Larg_r2: - ldr r2, [lr, #8] @ argv[2] -.Larg_r1: - ldr r1, [lr, #4] @ argv[1] -.Larg_r0: - ldr r0, [lr] @ argv[0] -.Largs_done: - - @ With every other register in use, load the function pointer into lr - @ and call the function. - ldr lr, [sp, #28] - blx lr - - @ r1-r3 are free for use again. The trampoline only supports - @ single-return functions. Pass r4-r11 to the caller. - ldr r1, [sp, #32] - vstmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15} -#if defined(__APPLE__) - @ r9 is not volatile on iOS. - stmia r1!, {r4,r5,r6,r7,r8,r10-r11} -#else - stmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11} -#endif - - @ Unwind the stack and restore registers. - add sp, sp, #44 @ 44 = 28+16 - ldmia sp!, {r4,r5,r6,r7,r8,r9,r10,r11,lr} @ Skip r0-r3 (see +16 above). - vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} - - bx lr -.size abi_test_trampoline,.-abi_test_trampoline -.type abi_test_clobber_r0, %function -.globl abi_test_clobber_r0 -.hidden abi_test_clobber_r0 -.align 4 -abi_test_clobber_r0: - mov r0, #0 - bx lr -.size abi_test_clobber_r0,.-abi_test_clobber_r0 -.type abi_test_clobber_r1, %function -.globl abi_test_clobber_r1 -.hidden abi_test_clobber_r1 -.align 4 -abi_test_clobber_r1: - mov r1, #0 - bx lr -.size abi_test_clobber_r1,.-abi_test_clobber_r1 -.type abi_test_clobber_r2, %function -.globl abi_test_clobber_r2 -.hidden abi_test_clobber_r2 -.align 4 -abi_test_clobber_r2: - mov r2, #0 - bx lr -.size abi_test_clobber_r2,.-abi_test_clobber_r2 -.type abi_test_clobber_r3, %function -.globl abi_test_clobber_r3 -.hidden abi_test_clobber_r3 -.align 4 -abi_test_clobber_r3: - mov r3, #0 - bx lr -.size abi_test_clobber_r3,.-abi_test_clobber_r3 -.type abi_test_clobber_r4, %function -.globl abi_test_clobber_r4 -.hidden abi_test_clobber_r4 -.align 4 -abi_test_clobber_r4: - mov r4, #0 - bx lr -.size abi_test_clobber_r4,.-abi_test_clobber_r4 -.type abi_test_clobber_r5, %function -.globl abi_test_clobber_r5 -.hidden abi_test_clobber_r5 -.align 4 -abi_test_clobber_r5: - mov r5, #0 - bx lr -.size abi_test_clobber_r5,.-abi_test_clobber_r5 -.type abi_test_clobber_r6, %function -.globl abi_test_clobber_r6 -.hidden abi_test_clobber_r6 -.align 4 -abi_test_clobber_r6: - mov r6, #0 - bx lr -.size abi_test_clobber_r6,.-abi_test_clobber_r6 -.type abi_test_clobber_r7, %function -.globl abi_test_clobber_r7 -.hidden abi_test_clobber_r7 -.align 4 -abi_test_clobber_r7: - mov r7, #0 - bx lr -.size abi_test_clobber_r7,.-abi_test_clobber_r7 -.type abi_test_clobber_r8, %function -.globl abi_test_clobber_r8 -.hidden abi_test_clobber_r8 -.align 4 -abi_test_clobber_r8: - mov r8, #0 - bx lr -.size abi_test_clobber_r8,.-abi_test_clobber_r8 -.type abi_test_clobber_r9, %function -.globl abi_test_clobber_r9 -.hidden abi_test_clobber_r9 -.align 4 -abi_test_clobber_r9: - mov r9, #0 - bx lr -.size abi_test_clobber_r9,.-abi_test_clobber_r9 -.type abi_test_clobber_r10, %function -.globl abi_test_clobber_r10 -.hidden abi_test_clobber_r10 -.align 4 -abi_test_clobber_r10: - mov r10, #0 - bx lr -.size abi_test_clobber_r10,.-abi_test_clobber_r10 -.type abi_test_clobber_r11, %function -.globl abi_test_clobber_r11 -.hidden abi_test_clobber_r11 -.align 4 -abi_test_clobber_r11: - mov r11, #0 - bx lr -.size abi_test_clobber_r11,.-abi_test_clobber_r11 -.type abi_test_clobber_r12, %function -.globl abi_test_clobber_r12 -.hidden abi_test_clobber_r12 -.align 4 -abi_test_clobber_r12: - mov r12, #0 - bx lr -.size abi_test_clobber_r12,.-abi_test_clobber_r12 -.type abi_test_clobber_d0, %function -.globl abi_test_clobber_d0 -.hidden abi_test_clobber_d0 -.align 4 -abi_test_clobber_d0: - mov r0, #0 - vmov s0, r0 - vmov s1, r0 - bx lr -.size abi_test_clobber_d0,.-abi_test_clobber_d0 -.type abi_test_clobber_d1, %function -.globl abi_test_clobber_d1 -.hidden abi_test_clobber_d1 -.align 4 -abi_test_clobber_d1: - mov r0, #0 - vmov s2, r0 - vmov s3, r0 - bx lr -.size abi_test_clobber_d1,.-abi_test_clobber_d1 -.type abi_test_clobber_d2, %function -.globl abi_test_clobber_d2 -.hidden abi_test_clobber_d2 -.align 4 -abi_test_clobber_d2: - mov r0, #0 - vmov s4, r0 - vmov s5, r0 - bx lr -.size abi_test_clobber_d2,.-abi_test_clobber_d2 -.type abi_test_clobber_d3, %function -.globl abi_test_clobber_d3 -.hidden abi_test_clobber_d3 -.align 4 -abi_test_clobber_d3: - mov r0, #0 - vmov s6, r0 - vmov s7, r0 - bx lr -.size abi_test_clobber_d3,.-abi_test_clobber_d3 -.type abi_test_clobber_d4, %function -.globl abi_test_clobber_d4 -.hidden abi_test_clobber_d4 -.align 4 -abi_test_clobber_d4: - mov r0, #0 - vmov s8, r0 - vmov s9, r0 - bx lr -.size abi_test_clobber_d4,.-abi_test_clobber_d4 -.type abi_test_clobber_d5, %function -.globl abi_test_clobber_d5 -.hidden abi_test_clobber_d5 -.align 4 -abi_test_clobber_d5: - mov r0, #0 - vmov s10, r0 - vmov s11, r0 - bx lr -.size abi_test_clobber_d5,.-abi_test_clobber_d5 -.type abi_test_clobber_d6, %function -.globl abi_test_clobber_d6 -.hidden abi_test_clobber_d6 -.align 4 -abi_test_clobber_d6: - mov r0, #0 - vmov s12, r0 - vmov s13, r0 - bx lr -.size abi_test_clobber_d6,.-abi_test_clobber_d6 -.type abi_test_clobber_d7, %function -.globl abi_test_clobber_d7 -.hidden abi_test_clobber_d7 -.align 4 -abi_test_clobber_d7: - mov r0, #0 - vmov s14, r0 - vmov s15, r0 - bx lr -.size abi_test_clobber_d7,.-abi_test_clobber_d7 -.type abi_test_clobber_d8, %function -.globl abi_test_clobber_d8 -.hidden abi_test_clobber_d8 -.align 4 -abi_test_clobber_d8: - mov r0, #0 - vmov s16, r0 - vmov s17, r0 - bx lr -.size abi_test_clobber_d8,.-abi_test_clobber_d8 -.type abi_test_clobber_d9, %function -.globl abi_test_clobber_d9 -.hidden abi_test_clobber_d9 -.align 4 -abi_test_clobber_d9: - mov r0, #0 - vmov s18, r0 - vmov s19, r0 - bx lr -.size abi_test_clobber_d9,.-abi_test_clobber_d9 -.type abi_test_clobber_d10, %function -.globl abi_test_clobber_d10 -.hidden abi_test_clobber_d10 -.align 4 -abi_test_clobber_d10: - mov r0, #0 - vmov s20, r0 - vmov s21, r0 - bx lr -.size abi_test_clobber_d10,.-abi_test_clobber_d10 -.type abi_test_clobber_d11, %function -.globl abi_test_clobber_d11 -.hidden abi_test_clobber_d11 -.align 4 -abi_test_clobber_d11: - mov r0, #0 - vmov s22, r0 - vmov s23, r0 - bx lr -.size abi_test_clobber_d11,.-abi_test_clobber_d11 -.type abi_test_clobber_d12, %function -.globl abi_test_clobber_d12 -.hidden abi_test_clobber_d12 -.align 4 -abi_test_clobber_d12: - mov r0, #0 - vmov s24, r0 - vmov s25, r0 - bx lr -.size abi_test_clobber_d12,.-abi_test_clobber_d12 -.type abi_test_clobber_d13, %function -.globl abi_test_clobber_d13 -.hidden abi_test_clobber_d13 -.align 4 -abi_test_clobber_d13: - mov r0, #0 - vmov s26, r0 - vmov s27, r0 - bx lr -.size abi_test_clobber_d13,.-abi_test_clobber_d13 -.type abi_test_clobber_d14, %function -.globl abi_test_clobber_d14 -.hidden abi_test_clobber_d14 -.align 4 -abi_test_clobber_d14: - mov r0, #0 - vmov s28, r0 - vmov s29, r0 - bx lr -.size abi_test_clobber_d14,.-abi_test_clobber_d14 -.type abi_test_clobber_d15, %function -.globl abi_test_clobber_d15 -.hidden abi_test_clobber_d15 -.align 4 -abi_test_clobber_d15: - mov r0, #0 - vmov s30, r0 - vmov s31, r0 - bx lr -.size abi_test_clobber_d15,.-abi_test_clobber_d15 -#endif -#endif // !OPENSSL_NO_ASM -.section .note.GNU-stack,"",%progbits diff --git a/packager/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S b/packager/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S deleted file mode 100644 index 86b06fc2ef..0000000000 --- a/packager/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S +++ /dev/null @@ -1,3670 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if !defined(OPENSSL_NO_ASM) && defined(__powerpc64__) -.machine "any" - -.abiversion 2 -.text - -.align 7 -.Lrcon: -.byte 0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01 -.byte 0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x1b -.byte 0x0c,0x0f,0x0e,0x0d,0x0c,0x0f,0x0e,0x0d,0x0c,0x0f,0x0e,0x0d,0x0c,0x0f,0x0e,0x0d -.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.Lconsts: - mflr 0 - bcl 20,31,$+4 - mflr 6 - addi 6,6,-0x48 - mtlr 0 - blr -.long 0 -.byte 0,12,0x14,0,0,0,0,0 -.byte 65,69,83,32,102,111,114,32,80,111,119,101,114,73,83,65,32,50,46,48,55,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 - -.globl aes_hw_set_encrypt_key -.type aes_hw_set_encrypt_key,@function -.align 5 -aes_hw_set_encrypt_key: -.localentry aes_hw_set_encrypt_key,0 - -.Lset_encrypt_key: - mflr 11 - std 11,16(1) - - li 6,-1 - cmpldi 3,0 - beq- .Lenc_key_abort - cmpldi 5,0 - beq- .Lenc_key_abort - li 6,-2 - cmpwi 4,128 - blt- .Lenc_key_abort - cmpwi 4,256 - bgt- .Lenc_key_abort - andi. 0,4,0x3f - bne- .Lenc_key_abort - - lis 0,0xfff0 - li 12,-1 - or 0,0,0 - - bl .Lconsts - mtlr 11 - - neg 9,3 - lvx 1,0,3 - addi 3,3,15 - lvsr 3,0,9 - li 8,0x20 - cmpwi 4,192 - lvx 2,0,3 - vspltisb 5,0x0f - lvx 4,0,6 - vxor 3,3,5 - lvx 5,8,6 - addi 6,6,0x10 - vperm 1,1,2,3 - li 7,8 - vxor 0,0,0 - mtctr 7 - - lvsl 8,0,5 - vspltisb 9,-1 - lvx 10,0,5 - vperm 9,9,0,8 - - blt .Loop128 - addi 3,3,8 - beq .L192 - addi 3,3,8 - b .L256 - -.align 4 -.Loop128: - vperm 3,1,1,5 - vsldoi 6,0,1,12 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - .long 0x10632509 - stvx 7,0,5 - addi 5,5,16 - - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vadduwm 4,4,4 - vxor 1,1,3 - bdnz .Loop128 - - lvx 4,0,6 - - vperm 3,1,1,5 - vsldoi 6,0,1,12 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - .long 0x10632509 - stvx 7,0,5 - addi 5,5,16 - - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vadduwm 4,4,4 - vxor 1,1,3 - - vperm 3,1,1,5 - vsldoi 6,0,1,12 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - .long 0x10632509 - stvx 7,0,5 - addi 5,5,16 - - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vxor 1,1,3 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - stvx 7,0,5 - - addi 3,5,15 - addi 5,5,0x50 - - li 8,10 - b .Ldone - -.align 4 -.L192: - lvx 6,0,3 - li 7,4 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - stvx 7,0,5 - addi 5,5,16 - vperm 2,2,6,3 - vspltisb 3,8 - mtctr 7 - vsububm 5,5,3 - -.Loop192: - vperm 3,2,2,5 - vsldoi 6,0,1,12 - .long 0x10632509 - - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - - vsldoi 7,0,2,8 - vspltw 6,1,3 - vxor 6,6,2 - vsldoi 2,0,2,12 - vadduwm 4,4,4 - vxor 2,2,6 - vxor 1,1,3 - vxor 2,2,3 - vsldoi 7,7,1,8 - - vperm 3,2,2,5 - vsldoi 6,0,1,12 - vperm 11,7,7,8 - vsel 7,10,11,9 - vor 10,11,11 - .long 0x10632509 - stvx 7,0,5 - addi 5,5,16 - - vsldoi 7,1,2,8 - vxor 1,1,6 - vsldoi 6,0,6,12 - vperm 11,7,7,8 - vsel 7,10,11,9 - vor 10,11,11 - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - stvx 7,0,5 - addi 5,5,16 - - vspltw 6,1,3 - vxor 6,6,2 - vsldoi 2,0,2,12 - vadduwm 4,4,4 - vxor 2,2,6 - vxor 1,1,3 - vxor 2,2,3 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - stvx 7,0,5 - addi 3,5,15 - addi 5,5,16 - bdnz .Loop192 - - li 8,12 - addi 5,5,0x20 - b .Ldone - -.align 4 -.L256: - lvx 6,0,3 - li 7,7 - li 8,14 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - stvx 7,0,5 - addi 5,5,16 - vperm 2,2,6,3 - mtctr 7 - -.Loop256: - vperm 3,2,2,5 - vsldoi 6,0,1,12 - vperm 11,2,2,8 - vsel 7,10,11,9 - vor 10,11,11 - .long 0x10632509 - stvx 7,0,5 - addi 5,5,16 - - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vsldoi 6,0,6,12 - vxor 1,1,6 - vadduwm 4,4,4 - vxor 1,1,3 - vperm 11,1,1,8 - vsel 7,10,11,9 - vor 10,11,11 - stvx 7,0,5 - addi 3,5,15 - addi 5,5,16 - bdz .Ldone - - vspltw 3,1,3 - vsldoi 6,0,2,12 - .long 0x106305C8 - - vxor 2,2,6 - vsldoi 6,0,6,12 - vxor 2,2,6 - vsldoi 6,0,6,12 - vxor 2,2,6 - - vxor 2,2,3 - b .Loop256 - -.align 4 -.Ldone: - lvx 2,0,3 - vsel 2,10,2,9 - stvx 2,0,3 - li 6,0 - or 12,12,12 - stw 8,0(5) - -.Lenc_key_abort: - mr 3,6 - blr -.long 0 -.byte 0,12,0x14,1,0,0,3,0 -.long 0 -.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key - -.globl aes_hw_set_decrypt_key -.type aes_hw_set_decrypt_key,@function -.align 5 -aes_hw_set_decrypt_key: -.localentry aes_hw_set_decrypt_key,0 - - stdu 1,-64(1) - mflr 10 - std 10,80(1) - bl .Lset_encrypt_key - mtlr 10 - - cmpwi 3,0 - bne- .Ldec_key_abort - - slwi 7,8,4 - subi 3,5,240 - srwi 8,8,1 - add 5,3,7 - mtctr 8 - -.Ldeckey: - lwz 0, 0(3) - lwz 6, 4(3) - lwz 7, 8(3) - lwz 8, 12(3) - addi 3,3,16 - lwz 9, 0(5) - lwz 10,4(5) - lwz 11,8(5) - lwz 12,12(5) - stw 0, 0(5) - stw 6, 4(5) - stw 7, 8(5) - stw 8, 12(5) - subi 5,5,16 - stw 9, -16(3) - stw 10,-12(3) - stw 11,-8(3) - stw 12,-4(3) - bdnz .Ldeckey - - xor 3,3,3 -.Ldec_key_abort: - addi 1,1,64 - blr -.long 0 -.byte 0,12,4,1,0x80,0,3,0 -.long 0 -.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key -.globl aes_hw_encrypt -.type aes_hw_encrypt,@function -.align 5 -aes_hw_encrypt: -.localentry aes_hw_encrypt,0 - - lwz 6,240(5) - lis 0,0xfc00 - li 12,-1 - li 7,15 - or 0,0,0 - - lvx 0,0,3 - neg 11,4 - lvx 1,7,3 - lvsl 2,0,3 - vspltisb 4,0x0f - lvsr 3,0,11 - vxor 2,2,4 - li 7,16 - vperm 0,0,1,2 - lvx 1,0,5 - lvsr 5,0,5 - srwi 6,6,1 - lvx 2,7,5 - addi 7,7,16 - subi 6,6,1 - vperm 1,2,1,5 - - vxor 0,0,1 - lvx 1,7,5 - addi 7,7,16 - mtctr 6 - -.Loop_enc: - vperm 2,1,2,5 - .long 0x10001508 - lvx 2,7,5 - addi 7,7,16 - vperm 1,2,1,5 - .long 0x10000D08 - lvx 1,7,5 - addi 7,7,16 - bdnz .Loop_enc - - vperm 2,1,2,5 - .long 0x10001508 - lvx 2,7,5 - vperm 1,2,1,5 - .long 0x10000D09 - - vspltisb 2,-1 - vxor 1,1,1 - li 7,15 - vperm 2,2,1,3 - vxor 3,3,4 - lvx 1,0,4 - vperm 0,0,0,3 - vsel 1,1,0,2 - lvx 4,7,4 - stvx 1,0,4 - vsel 0,0,4,2 - stvx 0,7,4 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,3,0 -.long 0 -.size aes_hw_encrypt,.-aes_hw_encrypt -.globl aes_hw_decrypt -.type aes_hw_decrypt,@function -.align 5 -aes_hw_decrypt: -.localentry aes_hw_decrypt,0 - - lwz 6,240(5) - lis 0,0xfc00 - li 12,-1 - li 7,15 - or 0,0,0 - - lvx 0,0,3 - neg 11,4 - lvx 1,7,3 - lvsl 2,0,3 - vspltisb 4,0x0f - lvsr 3,0,11 - vxor 2,2,4 - li 7,16 - vperm 0,0,1,2 - lvx 1,0,5 - lvsr 5,0,5 - srwi 6,6,1 - lvx 2,7,5 - addi 7,7,16 - subi 6,6,1 - vperm 1,2,1,5 - - vxor 0,0,1 - lvx 1,7,5 - addi 7,7,16 - mtctr 6 - -.Loop_dec: - vperm 2,1,2,5 - .long 0x10001548 - lvx 2,7,5 - addi 7,7,16 - vperm 1,2,1,5 - .long 0x10000D48 - lvx 1,7,5 - addi 7,7,16 - bdnz .Loop_dec - - vperm 2,1,2,5 - .long 0x10001548 - lvx 2,7,5 - vperm 1,2,1,5 - .long 0x10000D49 - - vspltisb 2,-1 - vxor 1,1,1 - li 7,15 - vperm 2,2,1,3 - vxor 3,3,4 - lvx 1,0,4 - vperm 0,0,0,3 - vsel 1,1,0,2 - lvx 4,7,4 - stvx 1,0,4 - vsel 0,0,4,2 - stvx 0,7,4 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,3,0 -.long 0 -.size aes_hw_decrypt,.-aes_hw_decrypt -.globl aes_hw_cbc_encrypt -.type aes_hw_cbc_encrypt,@function -.align 5 -aes_hw_cbc_encrypt: -.localentry aes_hw_cbc_encrypt,0 - - cmpldi 5,16 - .long 0x4dc00020 - - cmpwi 8,0 - lis 0,0xffe0 - li 12,-1 - or 0,0,0 - - li 10,15 - vxor 0,0,0 - vspltisb 3,0x0f - - lvx 4,0,7 - lvsl 6,0,7 - lvx 5,10,7 - vxor 6,6,3 - vperm 4,4,5,6 - - neg 11,3 - lvsr 10,0,6 - lwz 9,240(6) - - lvsr 6,0,11 - lvx 5,0,3 - addi 3,3,15 - vxor 6,6,3 - - lvsl 8,0,4 - vspltisb 9,-1 - lvx 7,0,4 - vperm 9,9,0,8 - vxor 8,8,3 - - srwi 9,9,1 - li 10,16 - subi 9,9,1 - beq .Lcbc_dec - -.Lcbc_enc: - vor 2,5,5 - lvx 5,0,3 - addi 3,3,16 - mtctr 9 - subi 5,5,16 - - lvx 0,0,6 - vperm 2,2,5,6 - lvx 1,10,6 - addi 10,10,16 - vperm 0,1,0,10 - vxor 2,2,0 - lvx 0,10,6 - addi 10,10,16 - vxor 2,2,4 - -.Loop_cbc_enc: - vperm 1,0,1,10 - .long 0x10420D08 - lvx 1,10,6 - addi 10,10,16 - vperm 0,1,0,10 - .long 0x10420508 - lvx 0,10,6 - addi 10,10,16 - bdnz .Loop_cbc_enc - - vperm 1,0,1,10 - .long 0x10420D08 - lvx 1,10,6 - li 10,16 - vperm 0,1,0,10 - .long 0x10820509 - cmpldi 5,16 - - vperm 3,4,4,8 - vsel 2,7,3,9 - vor 7,3,3 - stvx 2,0,4 - addi 4,4,16 - bge .Lcbc_enc - - b .Lcbc_done - -.align 4 -.Lcbc_dec: - cmpldi 5,128 - bge _aesp8_cbc_decrypt8x - vor 3,5,5 - lvx 5,0,3 - addi 3,3,16 - mtctr 9 - subi 5,5,16 - - lvx 0,0,6 - vperm 3,3,5,6 - lvx 1,10,6 - addi 10,10,16 - vperm 0,1,0,10 - vxor 2,3,0 - lvx 0,10,6 - addi 10,10,16 - -.Loop_cbc_dec: - vperm 1,0,1,10 - .long 0x10420D48 - lvx 1,10,6 - addi 10,10,16 - vperm 0,1,0,10 - .long 0x10420548 - lvx 0,10,6 - addi 10,10,16 - bdnz .Loop_cbc_dec - - vperm 1,0,1,10 - .long 0x10420D48 - lvx 1,10,6 - li 10,16 - vperm 0,1,0,10 - .long 0x10420549 - cmpldi 5,16 - - vxor 2,2,4 - vor 4,3,3 - vperm 3,2,2,8 - vsel 2,7,3,9 - vor 7,3,3 - stvx 2,0,4 - addi 4,4,16 - bge .Lcbc_dec - -.Lcbc_done: - addi 4,4,-1 - lvx 2,0,4 - vsel 2,7,2,9 - stvx 2,0,4 - - neg 8,7 - li 10,15 - vxor 0,0,0 - vspltisb 9,-1 - vspltisb 3,0x0f - lvsr 8,0,8 - vperm 9,9,0,8 - vxor 8,8,3 - lvx 7,0,7 - vperm 4,4,4,8 - vsel 2,7,4,9 - lvx 5,10,7 - stvx 2,0,7 - vsel 2,4,5,9 - stvx 2,10,7 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,6,0 -.long 0 -.align 5 -_aesp8_cbc_decrypt8x: - stdu 1,-448(1) - li 10,207 - li 11,223 - stvx 20,10,1 - addi 10,10,32 - stvx 21,11,1 - addi 11,11,32 - stvx 22,10,1 - addi 10,10,32 - stvx 23,11,1 - addi 11,11,32 - stvx 24,10,1 - addi 10,10,32 - stvx 25,11,1 - addi 11,11,32 - stvx 26,10,1 - addi 10,10,32 - stvx 27,11,1 - addi 11,11,32 - stvx 28,10,1 - addi 10,10,32 - stvx 29,11,1 - addi 11,11,32 - stvx 30,10,1 - stvx 31,11,1 - li 0,-1 - stw 12,396(1) - li 8,0x10 - std 26,400(1) - li 26,0x20 - std 27,408(1) - li 27,0x30 - std 28,416(1) - li 28,0x40 - std 29,424(1) - li 29,0x50 - std 30,432(1) - li 30,0x60 - std 31,440(1) - li 31,0x70 - or 0,0,0 - - subi 9,9,3 - subi 5,5,128 - - lvx 23,0,6 - lvx 30,8,6 - addi 6,6,0x20 - lvx 31,0,6 - vperm 23,30,23,10 - addi 11,1,79 - mtctr 9 - -.Load_cbc_dec_key: - vperm 24,31,30,10 - lvx 30,8,6 - addi 6,6,0x20 - stvx 24,0,11 - vperm 25,30,31,10 - lvx 31,0,6 - stvx 25,8,11 - addi 11,11,0x20 - bdnz .Load_cbc_dec_key - - lvx 26,8,6 - vperm 24,31,30,10 - lvx 27,26,6 - stvx 24,0,11 - vperm 25,26,31,10 - lvx 28,27,6 - stvx 25,8,11 - addi 11,1,79 - vperm 26,27,26,10 - lvx 29,28,6 - vperm 27,28,27,10 - lvx 30,29,6 - vperm 28,29,28,10 - lvx 31,30,6 - vperm 29,30,29,10 - lvx 14,31,6 - vperm 30,31,30,10 - lvx 24,0,11 - vperm 31,14,31,10 - lvx 25,8,11 - - - - subi 3,3,15 - - li 10,8 - .long 0x7C001E99 - lvsl 6,0,10 - vspltisb 3,0x0f - .long 0x7C281E99 - vxor 6,6,3 - .long 0x7C5A1E99 - vperm 0,0,0,6 - .long 0x7C7B1E99 - vperm 1,1,1,6 - .long 0x7D5C1E99 - vperm 2,2,2,6 - vxor 14,0,23 - .long 0x7D7D1E99 - vperm 3,3,3,6 - vxor 15,1,23 - .long 0x7D9E1E99 - vperm 10,10,10,6 - vxor 16,2,23 - .long 0x7DBF1E99 - addi 3,3,0x80 - vperm 11,11,11,6 - vxor 17,3,23 - vperm 12,12,12,6 - vxor 18,10,23 - vperm 13,13,13,6 - vxor 19,11,23 - vxor 20,12,23 - vxor 21,13,23 - - mtctr 9 - b .Loop_cbc_dec8x -.align 5 -.Loop_cbc_dec8x: - .long 0x11CEC548 - .long 0x11EFC548 - .long 0x1210C548 - .long 0x1231C548 - .long 0x1252C548 - .long 0x1273C548 - .long 0x1294C548 - .long 0x12B5C548 - lvx 24,26,11 - addi 11,11,0x20 - - .long 0x11CECD48 - .long 0x11EFCD48 - .long 0x1210CD48 - .long 0x1231CD48 - .long 0x1252CD48 - .long 0x1273CD48 - .long 0x1294CD48 - .long 0x12B5CD48 - lvx 25,8,11 - bdnz .Loop_cbc_dec8x - - subic 5,5,128 - .long 0x11CEC548 - .long 0x11EFC548 - .long 0x1210C548 - .long 0x1231C548 - .long 0x1252C548 - .long 0x1273C548 - .long 0x1294C548 - .long 0x12B5C548 - - subfe. 0,0,0 - .long 0x11CECD48 - .long 0x11EFCD48 - .long 0x1210CD48 - .long 0x1231CD48 - .long 0x1252CD48 - .long 0x1273CD48 - .long 0x1294CD48 - .long 0x12B5CD48 - - and 0,0,5 - .long 0x11CED548 - .long 0x11EFD548 - .long 0x1210D548 - .long 0x1231D548 - .long 0x1252D548 - .long 0x1273D548 - .long 0x1294D548 - .long 0x12B5D548 - - add 3,3,0 - - - - .long 0x11CEDD48 - .long 0x11EFDD48 - .long 0x1210DD48 - .long 0x1231DD48 - .long 0x1252DD48 - .long 0x1273DD48 - .long 0x1294DD48 - .long 0x12B5DD48 - - addi 11,1,79 - .long 0x11CEE548 - .long 0x11EFE548 - .long 0x1210E548 - .long 0x1231E548 - .long 0x1252E548 - .long 0x1273E548 - .long 0x1294E548 - .long 0x12B5E548 - lvx 24,0,11 - - .long 0x11CEED48 - .long 0x11EFED48 - .long 0x1210ED48 - .long 0x1231ED48 - .long 0x1252ED48 - .long 0x1273ED48 - .long 0x1294ED48 - .long 0x12B5ED48 - lvx 25,8,11 - - .long 0x11CEF548 - vxor 4,4,31 - .long 0x11EFF548 - vxor 0,0,31 - .long 0x1210F548 - vxor 1,1,31 - .long 0x1231F548 - vxor 2,2,31 - .long 0x1252F548 - vxor 3,3,31 - .long 0x1273F548 - vxor 10,10,31 - .long 0x1294F548 - vxor 11,11,31 - .long 0x12B5F548 - vxor 12,12,31 - - .long 0x11CE2549 - .long 0x11EF0549 - .long 0x7C001E99 - .long 0x12100D49 - .long 0x7C281E99 - .long 0x12311549 - vperm 0,0,0,6 - .long 0x7C5A1E99 - .long 0x12521D49 - vperm 1,1,1,6 - .long 0x7C7B1E99 - .long 0x12735549 - vperm 2,2,2,6 - .long 0x7D5C1E99 - .long 0x12945D49 - vperm 3,3,3,6 - .long 0x7D7D1E99 - .long 0x12B56549 - vperm 10,10,10,6 - .long 0x7D9E1E99 - vor 4,13,13 - vperm 11,11,11,6 - .long 0x7DBF1E99 - addi 3,3,0x80 - - vperm 14,14,14,6 - vperm 15,15,15,6 - .long 0x7DC02799 - vperm 12,12,12,6 - vxor 14,0,23 - vperm 16,16,16,6 - .long 0x7DE82799 - vperm 13,13,13,6 - vxor 15,1,23 - vperm 17,17,17,6 - .long 0x7E1A2799 - vxor 16,2,23 - vperm 18,18,18,6 - .long 0x7E3B2799 - vxor 17,3,23 - vperm 19,19,19,6 - .long 0x7E5C2799 - vxor 18,10,23 - vperm 20,20,20,6 - .long 0x7E7D2799 - vxor 19,11,23 - vperm 21,21,21,6 - .long 0x7E9E2799 - vxor 20,12,23 - .long 0x7EBF2799 - addi 4,4,0x80 - vxor 21,13,23 - - mtctr 9 - beq .Loop_cbc_dec8x - - addic. 5,5,128 - beq .Lcbc_dec8x_done - nop - nop - -.Loop_cbc_dec8x_tail: - .long 0x11EFC548 - .long 0x1210C548 - .long 0x1231C548 - .long 0x1252C548 - .long 0x1273C548 - .long 0x1294C548 - .long 0x12B5C548 - lvx 24,26,11 - addi 11,11,0x20 - - .long 0x11EFCD48 - .long 0x1210CD48 - .long 0x1231CD48 - .long 0x1252CD48 - .long 0x1273CD48 - .long 0x1294CD48 - .long 0x12B5CD48 - lvx 25,8,11 - bdnz .Loop_cbc_dec8x_tail - - .long 0x11EFC548 - .long 0x1210C548 - .long 0x1231C548 - .long 0x1252C548 - .long 0x1273C548 - .long 0x1294C548 - .long 0x12B5C548 - - .long 0x11EFCD48 - .long 0x1210CD48 - .long 0x1231CD48 - .long 0x1252CD48 - .long 0x1273CD48 - .long 0x1294CD48 - .long 0x12B5CD48 - - .long 0x11EFD548 - .long 0x1210D548 - .long 0x1231D548 - .long 0x1252D548 - .long 0x1273D548 - .long 0x1294D548 - .long 0x12B5D548 - - .long 0x11EFDD48 - .long 0x1210DD48 - .long 0x1231DD48 - .long 0x1252DD48 - .long 0x1273DD48 - .long 0x1294DD48 - .long 0x12B5DD48 - - .long 0x11EFE548 - .long 0x1210E548 - .long 0x1231E548 - .long 0x1252E548 - .long 0x1273E548 - .long 0x1294E548 - .long 0x12B5E548 - - .long 0x11EFED48 - .long 0x1210ED48 - .long 0x1231ED48 - .long 0x1252ED48 - .long 0x1273ED48 - .long 0x1294ED48 - .long 0x12B5ED48 - - .long 0x11EFF548 - vxor 4,4,31 - .long 0x1210F548 - vxor 1,1,31 - .long 0x1231F548 - vxor 2,2,31 - .long 0x1252F548 - vxor 3,3,31 - .long 0x1273F548 - vxor 10,10,31 - .long 0x1294F548 - vxor 11,11,31 - .long 0x12B5F548 - vxor 12,12,31 - - cmplwi 5,32 - blt .Lcbc_dec8x_one - nop - beq .Lcbc_dec8x_two - cmplwi 5,64 - blt .Lcbc_dec8x_three - nop - beq .Lcbc_dec8x_four - cmplwi 5,96 - blt .Lcbc_dec8x_five - nop - beq .Lcbc_dec8x_six - -.Lcbc_dec8x_seven: - .long 0x11EF2549 - .long 0x12100D49 - .long 0x12311549 - .long 0x12521D49 - .long 0x12735549 - .long 0x12945D49 - .long 0x12B56549 - vor 4,13,13 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - vperm 18,18,18,6 - .long 0x7E3A2799 - vperm 19,19,19,6 - .long 0x7E5B2799 - vperm 20,20,20,6 - .long 0x7E7C2799 - vperm 21,21,21,6 - .long 0x7E9D2799 - .long 0x7EBE2799 - addi 4,4,0x70 - b .Lcbc_dec8x_done - -.align 5 -.Lcbc_dec8x_six: - .long 0x12102549 - .long 0x12311549 - .long 0x12521D49 - .long 0x12735549 - .long 0x12945D49 - .long 0x12B56549 - vor 4,13,13 - - vperm 16,16,16,6 - vperm 17,17,17,6 - .long 0x7E002799 - vperm 18,18,18,6 - .long 0x7E282799 - vperm 19,19,19,6 - .long 0x7E5A2799 - vperm 20,20,20,6 - .long 0x7E7B2799 - vperm 21,21,21,6 - .long 0x7E9C2799 - .long 0x7EBD2799 - addi 4,4,0x60 - b .Lcbc_dec8x_done - -.align 5 -.Lcbc_dec8x_five: - .long 0x12312549 - .long 0x12521D49 - .long 0x12735549 - .long 0x12945D49 - .long 0x12B56549 - vor 4,13,13 - - vperm 17,17,17,6 - vperm 18,18,18,6 - .long 0x7E202799 - vperm 19,19,19,6 - .long 0x7E482799 - vperm 20,20,20,6 - .long 0x7E7A2799 - vperm 21,21,21,6 - .long 0x7E9B2799 - .long 0x7EBC2799 - addi 4,4,0x50 - b .Lcbc_dec8x_done - -.align 5 -.Lcbc_dec8x_four: - .long 0x12522549 - .long 0x12735549 - .long 0x12945D49 - .long 0x12B56549 - vor 4,13,13 - - vperm 18,18,18,6 - vperm 19,19,19,6 - .long 0x7E402799 - vperm 20,20,20,6 - .long 0x7E682799 - vperm 21,21,21,6 - .long 0x7E9A2799 - .long 0x7EBB2799 - addi 4,4,0x40 - b .Lcbc_dec8x_done - -.align 5 -.Lcbc_dec8x_three: - .long 0x12732549 - .long 0x12945D49 - .long 0x12B56549 - vor 4,13,13 - - vperm 19,19,19,6 - vperm 20,20,20,6 - .long 0x7E602799 - vperm 21,21,21,6 - .long 0x7E882799 - .long 0x7EBA2799 - addi 4,4,0x30 - b .Lcbc_dec8x_done - -.align 5 -.Lcbc_dec8x_two: - .long 0x12942549 - .long 0x12B56549 - vor 4,13,13 - - vperm 20,20,20,6 - vperm 21,21,21,6 - .long 0x7E802799 - .long 0x7EA82799 - addi 4,4,0x20 - b .Lcbc_dec8x_done - -.align 5 -.Lcbc_dec8x_one: - .long 0x12B52549 - vor 4,13,13 - - vperm 21,21,21,6 - .long 0x7EA02799 - addi 4,4,0x10 - -.Lcbc_dec8x_done: - vperm 4,4,4,6 - .long 0x7C803F99 - - li 10,79 - li 11,95 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - - or 12,12,12 - lvx 20,10,1 - addi 10,10,32 - lvx 21,11,1 - addi 11,11,32 - lvx 22,10,1 - addi 10,10,32 - lvx 23,11,1 - addi 11,11,32 - lvx 24,10,1 - addi 10,10,32 - lvx 25,11,1 - addi 11,11,32 - lvx 26,10,1 - addi 10,10,32 - lvx 27,11,1 - addi 11,11,32 - lvx 28,10,1 - addi 10,10,32 - lvx 29,11,1 - addi 11,11,32 - lvx 30,10,1 - lvx 31,11,1 - ld 26,400(1) - ld 27,408(1) - ld 28,416(1) - ld 29,424(1) - ld 30,432(1) - ld 31,440(1) - addi 1,1,448 - blr -.long 0 -.byte 0,12,0x04,0,0x80,6,6,0 -.long 0 -.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt -.globl aes_hw_ctr32_encrypt_blocks -.type aes_hw_ctr32_encrypt_blocks,@function -.align 5 -aes_hw_ctr32_encrypt_blocks: -.localentry aes_hw_ctr32_encrypt_blocks,0 - - cmpldi 5,1 - .long 0x4dc00020 - - lis 0,0xfff0 - li 12,-1 - or 0,0,0 - - li 10,15 - vxor 0,0,0 - vspltisb 3,0x0f - - lvx 4,0,7 - lvsl 6,0,7 - lvx 5,10,7 - vspltisb 11,1 - vxor 6,6,3 - vperm 4,4,5,6 - vsldoi 11,0,11,1 - - neg 11,3 - lvsr 10,0,6 - lwz 9,240(6) - - lvsr 6,0,11 - lvx 5,0,3 - addi 3,3,15 - vxor 6,6,3 - - srwi 9,9,1 - li 10,16 - subi 9,9,1 - - cmpldi 5,8 - bge _aesp8_ctr32_encrypt8x - - lvsl 8,0,4 - vspltisb 9,-1 - lvx 7,0,4 - vperm 9,9,0,8 - vxor 8,8,3 - - lvx 0,0,6 - mtctr 9 - lvx 1,10,6 - addi 10,10,16 - vperm 0,1,0,10 - vxor 2,4,0 - lvx 0,10,6 - addi 10,10,16 - b .Loop_ctr32_enc - -.align 5 -.Loop_ctr32_enc: - vperm 1,0,1,10 - .long 0x10420D08 - lvx 1,10,6 - addi 10,10,16 - vperm 0,1,0,10 - .long 0x10420508 - lvx 0,10,6 - addi 10,10,16 - bdnz .Loop_ctr32_enc - - vadduwm 4,4,11 - vor 3,5,5 - lvx 5,0,3 - addi 3,3,16 - subic. 5,5,1 - - vperm 1,0,1,10 - .long 0x10420D08 - lvx 1,10,6 - vperm 3,3,5,6 - li 10,16 - vperm 1,1,0,10 - lvx 0,0,6 - vxor 3,3,1 - .long 0x10421D09 - - lvx 1,10,6 - addi 10,10,16 - vperm 2,2,2,8 - vsel 3,7,2,9 - mtctr 9 - vperm 0,1,0,10 - vor 7,2,2 - vxor 2,4,0 - lvx 0,10,6 - addi 10,10,16 - stvx 3,0,4 - addi 4,4,16 - bne .Loop_ctr32_enc - - addi 4,4,-1 - lvx 2,0,4 - vsel 2,7,2,9 - stvx 2,0,4 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,6,0 -.long 0 -.align 5 -_aesp8_ctr32_encrypt8x: - stdu 1,-448(1) - li 10,207 - li 11,223 - stvx 20,10,1 - addi 10,10,32 - stvx 21,11,1 - addi 11,11,32 - stvx 22,10,1 - addi 10,10,32 - stvx 23,11,1 - addi 11,11,32 - stvx 24,10,1 - addi 10,10,32 - stvx 25,11,1 - addi 11,11,32 - stvx 26,10,1 - addi 10,10,32 - stvx 27,11,1 - addi 11,11,32 - stvx 28,10,1 - addi 10,10,32 - stvx 29,11,1 - addi 11,11,32 - stvx 30,10,1 - stvx 31,11,1 - li 0,-1 - stw 12,396(1) - li 8,0x10 - std 26,400(1) - li 26,0x20 - std 27,408(1) - li 27,0x30 - std 28,416(1) - li 28,0x40 - std 29,424(1) - li 29,0x50 - std 30,432(1) - li 30,0x60 - std 31,440(1) - li 31,0x70 - or 0,0,0 - - subi 9,9,3 - - lvx 23,0,6 - lvx 30,8,6 - addi 6,6,0x20 - lvx 31,0,6 - vperm 23,30,23,10 - addi 11,1,79 - mtctr 9 - -.Load_ctr32_enc_key: - vperm 24,31,30,10 - lvx 30,8,6 - addi 6,6,0x20 - stvx 24,0,11 - vperm 25,30,31,10 - lvx 31,0,6 - stvx 25,8,11 - addi 11,11,0x20 - bdnz .Load_ctr32_enc_key - - lvx 26,8,6 - vperm 24,31,30,10 - lvx 27,26,6 - stvx 24,0,11 - vperm 25,26,31,10 - lvx 28,27,6 - stvx 25,8,11 - addi 11,1,79 - vperm 26,27,26,10 - lvx 29,28,6 - vperm 27,28,27,10 - lvx 30,29,6 - vperm 28,29,28,10 - lvx 31,30,6 - vperm 29,30,29,10 - lvx 15,31,6 - vperm 30,31,30,10 - lvx 24,0,11 - vperm 31,15,31,10 - lvx 25,8,11 - - vadduwm 7,11,11 - subi 3,3,15 - sldi 5,5,4 - - vadduwm 16,4,11 - vadduwm 17,4,7 - vxor 15,4,23 - li 10,8 - vadduwm 18,16,7 - vxor 16,16,23 - lvsl 6,0,10 - vadduwm 19,17,7 - vxor 17,17,23 - vspltisb 3,0x0f - vadduwm 20,18,7 - vxor 18,18,23 - vxor 6,6,3 - vadduwm 21,19,7 - vxor 19,19,23 - vadduwm 22,20,7 - vxor 20,20,23 - vadduwm 4,21,7 - vxor 21,21,23 - vxor 22,22,23 - - mtctr 9 - b .Loop_ctr32_enc8x -.align 5 -.Loop_ctr32_enc8x: - .long 0x11EFC508 - .long 0x1210C508 - .long 0x1231C508 - .long 0x1252C508 - .long 0x1273C508 - .long 0x1294C508 - .long 0x12B5C508 - .long 0x12D6C508 -.Loop_ctr32_enc8x_middle: - lvx 24,26,11 - addi 11,11,0x20 - - .long 0x11EFCD08 - .long 0x1210CD08 - .long 0x1231CD08 - .long 0x1252CD08 - .long 0x1273CD08 - .long 0x1294CD08 - .long 0x12B5CD08 - .long 0x12D6CD08 - lvx 25,8,11 - bdnz .Loop_ctr32_enc8x - - subic 11,5,256 - .long 0x11EFC508 - .long 0x1210C508 - .long 0x1231C508 - .long 0x1252C508 - .long 0x1273C508 - .long 0x1294C508 - .long 0x12B5C508 - .long 0x12D6C508 - - subfe 0,0,0 - .long 0x11EFCD08 - .long 0x1210CD08 - .long 0x1231CD08 - .long 0x1252CD08 - .long 0x1273CD08 - .long 0x1294CD08 - .long 0x12B5CD08 - .long 0x12D6CD08 - - and 0,0,11 - addi 11,1,79 - .long 0x11EFD508 - .long 0x1210D508 - .long 0x1231D508 - .long 0x1252D508 - .long 0x1273D508 - .long 0x1294D508 - .long 0x12B5D508 - .long 0x12D6D508 - lvx 24,0,11 - - subic 5,5,129 - .long 0x11EFDD08 - addi 5,5,1 - .long 0x1210DD08 - .long 0x1231DD08 - .long 0x1252DD08 - .long 0x1273DD08 - .long 0x1294DD08 - .long 0x12B5DD08 - .long 0x12D6DD08 - lvx 25,8,11 - - .long 0x11EFE508 - .long 0x7C001E99 - .long 0x1210E508 - .long 0x7C281E99 - .long 0x1231E508 - .long 0x7C5A1E99 - .long 0x1252E508 - .long 0x7C7B1E99 - .long 0x1273E508 - .long 0x7D5C1E99 - .long 0x1294E508 - .long 0x7D9D1E99 - .long 0x12B5E508 - .long 0x7DBE1E99 - .long 0x12D6E508 - .long 0x7DDF1E99 - addi 3,3,0x80 - - .long 0x11EFED08 - vperm 0,0,0,6 - .long 0x1210ED08 - vperm 1,1,1,6 - .long 0x1231ED08 - vperm 2,2,2,6 - .long 0x1252ED08 - vperm 3,3,3,6 - .long 0x1273ED08 - vperm 10,10,10,6 - .long 0x1294ED08 - vperm 12,12,12,6 - .long 0x12B5ED08 - vperm 13,13,13,6 - .long 0x12D6ED08 - vperm 14,14,14,6 - - add 3,3,0 - - - - subfe. 0,0,0 - .long 0x11EFF508 - vxor 0,0,31 - .long 0x1210F508 - vxor 1,1,31 - .long 0x1231F508 - vxor 2,2,31 - .long 0x1252F508 - vxor 3,3,31 - .long 0x1273F508 - vxor 10,10,31 - .long 0x1294F508 - vxor 12,12,31 - .long 0x12B5F508 - vxor 13,13,31 - .long 0x12D6F508 - vxor 14,14,31 - - bne .Lctr32_enc8x_break - - .long 0x100F0509 - .long 0x10300D09 - vadduwm 16,4,11 - .long 0x10511509 - vadduwm 17,4,7 - vxor 15,4,23 - .long 0x10721D09 - vadduwm 18,16,7 - vxor 16,16,23 - .long 0x11535509 - vadduwm 19,17,7 - vxor 17,17,23 - .long 0x11946509 - vadduwm 20,18,7 - vxor 18,18,23 - .long 0x11B56D09 - vadduwm 21,19,7 - vxor 19,19,23 - .long 0x11D67509 - vadduwm 22,20,7 - vxor 20,20,23 - vperm 0,0,0,6 - vadduwm 4,21,7 - vxor 21,21,23 - vperm 1,1,1,6 - vxor 22,22,23 - mtctr 9 - - .long 0x11EFC508 - .long 0x7C002799 - vperm 2,2,2,6 - .long 0x1210C508 - .long 0x7C282799 - vperm 3,3,3,6 - .long 0x1231C508 - .long 0x7C5A2799 - vperm 10,10,10,6 - .long 0x1252C508 - .long 0x7C7B2799 - vperm 12,12,12,6 - .long 0x1273C508 - .long 0x7D5C2799 - vperm 13,13,13,6 - .long 0x1294C508 - .long 0x7D9D2799 - vperm 14,14,14,6 - .long 0x12B5C508 - .long 0x7DBE2799 - .long 0x12D6C508 - .long 0x7DDF2799 - addi 4,4,0x80 - - b .Loop_ctr32_enc8x_middle - -.align 5 -.Lctr32_enc8x_break: - cmpwi 5,-0x60 - blt .Lctr32_enc8x_one - nop - beq .Lctr32_enc8x_two - cmpwi 5,-0x40 - blt .Lctr32_enc8x_three - nop - beq .Lctr32_enc8x_four - cmpwi 5,-0x20 - blt .Lctr32_enc8x_five - nop - beq .Lctr32_enc8x_six - cmpwi 5,0x00 - blt .Lctr32_enc8x_seven - -.Lctr32_enc8x_eight: - .long 0x11EF0509 - .long 0x12100D09 - .long 0x12311509 - .long 0x12521D09 - .long 0x12735509 - .long 0x12946509 - .long 0x12B56D09 - .long 0x12D67509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - vperm 18,18,18,6 - .long 0x7E3A2799 - vperm 19,19,19,6 - .long 0x7E5B2799 - vperm 20,20,20,6 - .long 0x7E7C2799 - vperm 21,21,21,6 - .long 0x7E9D2799 - vperm 22,22,22,6 - .long 0x7EBE2799 - .long 0x7EDF2799 - addi 4,4,0x80 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_seven: - .long 0x11EF0D09 - .long 0x12101509 - .long 0x12311D09 - .long 0x12525509 - .long 0x12736509 - .long 0x12946D09 - .long 0x12B57509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - vperm 18,18,18,6 - .long 0x7E3A2799 - vperm 19,19,19,6 - .long 0x7E5B2799 - vperm 20,20,20,6 - .long 0x7E7C2799 - vperm 21,21,21,6 - .long 0x7E9D2799 - .long 0x7EBE2799 - addi 4,4,0x70 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_six: - .long 0x11EF1509 - .long 0x12101D09 - .long 0x12315509 - .long 0x12526509 - .long 0x12736D09 - .long 0x12947509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - vperm 18,18,18,6 - .long 0x7E3A2799 - vperm 19,19,19,6 - .long 0x7E5B2799 - vperm 20,20,20,6 - .long 0x7E7C2799 - .long 0x7E9D2799 - addi 4,4,0x60 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_five: - .long 0x11EF1D09 - .long 0x12105509 - .long 0x12316509 - .long 0x12526D09 - .long 0x12737509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - vperm 18,18,18,6 - .long 0x7E3A2799 - vperm 19,19,19,6 - .long 0x7E5B2799 - .long 0x7E7C2799 - addi 4,4,0x50 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_four: - .long 0x11EF5509 - .long 0x12106509 - .long 0x12316D09 - .long 0x12527509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - vperm 18,18,18,6 - .long 0x7E3A2799 - .long 0x7E5B2799 - addi 4,4,0x40 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_three: - .long 0x11EF6509 - .long 0x12106D09 - .long 0x12317509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - vperm 17,17,17,6 - .long 0x7E082799 - .long 0x7E3A2799 - addi 4,4,0x30 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_two: - .long 0x11EF6D09 - .long 0x12107509 - - vperm 15,15,15,6 - vperm 16,16,16,6 - .long 0x7DE02799 - .long 0x7E082799 - addi 4,4,0x20 - b .Lctr32_enc8x_done - -.align 5 -.Lctr32_enc8x_one: - .long 0x11EF7509 - - vperm 15,15,15,6 - .long 0x7DE02799 - addi 4,4,0x10 - -.Lctr32_enc8x_done: - li 10,79 - li 11,95 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - stvx 6,10,1 - addi 10,10,32 - stvx 6,11,1 - addi 11,11,32 - - or 12,12,12 - lvx 20,10,1 - addi 10,10,32 - lvx 21,11,1 - addi 11,11,32 - lvx 22,10,1 - addi 10,10,32 - lvx 23,11,1 - addi 11,11,32 - lvx 24,10,1 - addi 10,10,32 - lvx 25,11,1 - addi 11,11,32 - lvx 26,10,1 - addi 10,10,32 - lvx 27,11,1 - addi 11,11,32 - lvx 28,10,1 - addi 10,10,32 - lvx 29,11,1 - addi 11,11,32 - lvx 30,10,1 - lvx 31,11,1 - ld 26,400(1) - ld 27,408(1) - ld 28,416(1) - ld 29,424(1) - ld 30,432(1) - ld 31,440(1) - addi 1,1,448 - blr -.long 0 -.byte 0,12,0x04,0,0x80,6,6,0 -.long 0 -.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks -.globl aes_hw_xts_encrypt -.type aes_hw_xts_encrypt,@function -.align 5 -aes_hw_xts_encrypt: -.localentry aes_hw_xts_encrypt,0 - - mr 10,3 - li 3,-1 - cmpldi 5,16 - .long 0x4dc00020 - - lis 0,0xfff0 - li 12,-1 - li 11,0 - or 0,0,0 - - vspltisb 9,0x07 - lvsl 6,11,11 - vspltisb 11,0x0f - vxor 6,6,9 - - li 3,15 - lvx 8,0,8 - lvsl 5,0,8 - lvx 4,3,8 - vxor 5,5,11 - vperm 8,8,4,5 - - neg 11,10 - lvsr 5,0,11 - lvx 2,0,10 - addi 10,10,15 - vxor 5,5,11 - - cmpldi 7,0 - beq .Lxts_enc_no_key2 - - lvsr 7,0,7 - lwz 9,240(7) - srwi 9,9,1 - subi 9,9,1 - li 3,16 - - lvx 0,0,7 - lvx 1,3,7 - addi 3,3,16 - vperm 0,1,0,7 - vxor 8,8,0 - lvx 0,3,7 - addi 3,3,16 - mtctr 9 - -.Ltweak_xts_enc: - vperm 1,0,1,7 - .long 0x11080D08 - lvx 1,3,7 - addi 3,3,16 - vperm 0,1,0,7 - .long 0x11080508 - lvx 0,3,7 - addi 3,3,16 - bdnz .Ltweak_xts_enc - - vperm 1,0,1,7 - .long 0x11080D08 - lvx 1,3,7 - vperm 0,1,0,7 - .long 0x11080509 - - li 8,0 - b .Lxts_enc - -.Lxts_enc_no_key2: - li 3,-16 - and 5,5,3 - - -.Lxts_enc: - lvx 4,0,10 - addi 10,10,16 - - lvsr 7,0,6 - lwz 9,240(6) - srwi 9,9,1 - subi 9,9,1 - li 3,16 - - vslb 10,9,9 - vor 10,10,9 - vspltisb 11,1 - vsldoi 10,10,11,15 - - cmpldi 5,96 - bge _aesp8_xts_encrypt6x - - andi. 7,5,15 - subic 0,5,32 - subi 7,7,16 - subfe 0,0,0 - and 0,0,7 - add 10,10,0 - - lvx 0,0,6 - lvx 1,3,6 - addi 3,3,16 - vperm 2,2,4,5 - vperm 0,1,0,7 - vxor 2,2,8 - vxor 2,2,0 - lvx 0,3,6 - addi 3,3,16 - mtctr 9 - b .Loop_xts_enc - -.align 5 -.Loop_xts_enc: - vperm 1,0,1,7 - .long 0x10420D08 - lvx 1,3,6 - addi 3,3,16 - vperm 0,1,0,7 - .long 0x10420508 - lvx 0,3,6 - addi 3,3,16 - bdnz .Loop_xts_enc - - vperm 1,0,1,7 - .long 0x10420D08 - lvx 1,3,6 - li 3,16 - vperm 0,1,0,7 - vxor 0,0,8 - .long 0x10620509 - - vperm 11,3,3,6 - - .long 0x7D602799 - - addi 4,4,16 - - subic. 5,5,16 - beq .Lxts_enc_done - - vor 2,4,4 - lvx 4,0,10 - addi 10,10,16 - lvx 0,0,6 - lvx 1,3,6 - addi 3,3,16 - - subic 0,5,32 - subfe 0,0,0 - and 0,0,7 - add 10,10,0 - - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 8,8,11 - - vperm 2,2,4,5 - vperm 0,1,0,7 - vxor 2,2,8 - vxor 3,3,0 - vxor 2,2,0 - lvx 0,3,6 - addi 3,3,16 - - mtctr 9 - cmpldi 5,16 - bge .Loop_xts_enc - - vxor 3,3,8 - lvsr 5,0,5 - vxor 4,4,4 - vspltisb 11,-1 - vperm 4,4,11,5 - vsel 2,2,3,4 - - subi 11,4,17 - subi 4,4,16 - mtctr 5 - li 5,16 -.Loop_xts_enc_steal: - lbzu 0,1(11) - stb 0,16(11) - bdnz .Loop_xts_enc_steal - - mtctr 9 - b .Loop_xts_enc - -.Lxts_enc_done: - cmpldi 8,0 - beq .Lxts_enc_ret - - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 8,8,11 - - vperm 8,8,8,6 - .long 0x7D004799 - -.Lxts_enc_ret: - or 12,12,12 - li 3,0 - blr -.long 0 -.byte 0,12,0x04,0,0x80,6,6,0 -.long 0 -.size aes_hw_xts_encrypt,.-aes_hw_xts_encrypt - -.globl aes_hw_xts_decrypt -.type aes_hw_xts_decrypt,@function -.align 5 -aes_hw_xts_decrypt: -.localentry aes_hw_xts_decrypt,0 - - mr 10,3 - li 3,-1 - cmpldi 5,16 - .long 0x4dc00020 - - lis 0,0xfff8 - li 12,-1 - li 11,0 - or 0,0,0 - - andi. 0,5,15 - neg 0,0 - andi. 0,0,16 - sub 5,5,0 - - vspltisb 9,0x07 - lvsl 6,11,11 - vspltisb 11,0x0f - vxor 6,6,9 - - li 3,15 - lvx 8,0,8 - lvsl 5,0,8 - lvx 4,3,8 - vxor 5,5,11 - vperm 8,8,4,5 - - neg 11,10 - lvsr 5,0,11 - lvx 2,0,10 - addi 10,10,15 - vxor 5,5,11 - - cmpldi 7,0 - beq .Lxts_dec_no_key2 - - lvsr 7,0,7 - lwz 9,240(7) - srwi 9,9,1 - subi 9,9,1 - li 3,16 - - lvx 0,0,7 - lvx 1,3,7 - addi 3,3,16 - vperm 0,1,0,7 - vxor 8,8,0 - lvx 0,3,7 - addi 3,3,16 - mtctr 9 - -.Ltweak_xts_dec: - vperm 1,0,1,7 - .long 0x11080D08 - lvx 1,3,7 - addi 3,3,16 - vperm 0,1,0,7 - .long 0x11080508 - lvx 0,3,7 - addi 3,3,16 - bdnz .Ltweak_xts_dec - - vperm 1,0,1,7 - .long 0x11080D08 - lvx 1,3,7 - vperm 0,1,0,7 - .long 0x11080509 - - li 8,0 - b .Lxts_dec - -.Lxts_dec_no_key2: - neg 3,5 - andi. 3,3,15 - add 5,5,3 - - -.Lxts_dec: - lvx 4,0,10 - addi 10,10,16 - - lvsr 7,0,6 - lwz 9,240(6) - srwi 9,9,1 - subi 9,9,1 - li 3,16 - - vslb 10,9,9 - vor 10,10,9 - vspltisb 11,1 - vsldoi 10,10,11,15 - - cmpldi 5,96 - bge _aesp8_xts_decrypt6x - - lvx 0,0,6 - lvx 1,3,6 - addi 3,3,16 - vperm 2,2,4,5 - vperm 0,1,0,7 - vxor 2,2,8 - vxor 2,2,0 - lvx 0,3,6 - addi 3,3,16 - mtctr 9 - - cmpldi 5,16 - blt .Ltail_xts_dec - - -.align 5 -.Loop_xts_dec: - vperm 1,0,1,7 - .long 0x10420D48 - lvx 1,3,6 - addi 3,3,16 - vperm 0,1,0,7 - .long 0x10420548 - lvx 0,3,6 - addi 3,3,16 - bdnz .Loop_xts_dec - - vperm 1,0,1,7 - .long 0x10420D48 - lvx 1,3,6 - li 3,16 - vperm 0,1,0,7 - vxor 0,0,8 - .long 0x10620549 - - vperm 11,3,3,6 - - .long 0x7D602799 - - addi 4,4,16 - - subic. 5,5,16 - beq .Lxts_dec_done - - vor 2,4,4 - lvx 4,0,10 - addi 10,10,16 - lvx 0,0,6 - lvx 1,3,6 - addi 3,3,16 - - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 8,8,11 - - vperm 2,2,4,5 - vperm 0,1,0,7 - vxor 2,2,8 - vxor 2,2,0 - lvx 0,3,6 - addi 3,3,16 - - mtctr 9 - cmpldi 5,16 - bge .Loop_xts_dec - -.Ltail_xts_dec: - vsrab 11,8,9 - vaddubm 12,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 12,12,11 - - subi 10,10,16 - add 10,10,5 - - vxor 2,2,8 - vxor 2,2,12 - -.Loop_xts_dec_short: - vperm 1,0,1,7 - .long 0x10420D48 - lvx 1,3,6 - addi 3,3,16 - vperm 0,1,0,7 - .long 0x10420548 - lvx 0,3,6 - addi 3,3,16 - bdnz .Loop_xts_dec_short - - vperm 1,0,1,7 - .long 0x10420D48 - lvx 1,3,6 - li 3,16 - vperm 0,1,0,7 - vxor 0,0,12 - .long 0x10620549 - - vperm 11,3,3,6 - - .long 0x7D602799 - - - vor 2,4,4 - lvx 4,0,10 - - lvx 0,0,6 - lvx 1,3,6 - addi 3,3,16 - vperm 2,2,4,5 - vperm 0,1,0,7 - - lvsr 5,0,5 - vxor 4,4,4 - vspltisb 11,-1 - vperm 4,4,11,5 - vsel 2,2,3,4 - - vxor 0,0,8 - vxor 2,2,0 - lvx 0,3,6 - addi 3,3,16 - - subi 11,4,1 - mtctr 5 - li 5,16 -.Loop_xts_dec_steal: - lbzu 0,1(11) - stb 0,16(11) - bdnz .Loop_xts_dec_steal - - mtctr 9 - b .Loop_xts_dec - -.Lxts_dec_done: - cmpldi 8,0 - beq .Lxts_dec_ret - - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 8,8,11 - - vperm 8,8,8,6 - .long 0x7D004799 - -.Lxts_dec_ret: - or 12,12,12 - li 3,0 - blr -.long 0 -.byte 0,12,0x04,0,0x80,6,6,0 -.long 0 -.size aes_hw_xts_decrypt,.-aes_hw_xts_decrypt -.align 5 -_aesp8_xts_encrypt6x: - stdu 1,-448(1) - mflr 11 - li 7,207 - li 3,223 - std 11,464(1) - stvx 20,7,1 - addi 7,7,32 - stvx 21,3,1 - addi 3,3,32 - stvx 22,7,1 - addi 7,7,32 - stvx 23,3,1 - addi 3,3,32 - stvx 24,7,1 - addi 7,7,32 - stvx 25,3,1 - addi 3,3,32 - stvx 26,7,1 - addi 7,7,32 - stvx 27,3,1 - addi 3,3,32 - stvx 28,7,1 - addi 7,7,32 - stvx 29,3,1 - addi 3,3,32 - stvx 30,7,1 - stvx 31,3,1 - li 0,-1 - stw 12,396(1) - li 3,0x10 - std 26,400(1) - li 26,0x20 - std 27,408(1) - li 27,0x30 - std 28,416(1) - li 28,0x40 - std 29,424(1) - li 29,0x50 - std 30,432(1) - li 30,0x60 - std 31,440(1) - li 31,0x70 - or 0,0,0 - - subi 9,9,3 - - lvx 23,0,6 - lvx 30,3,6 - addi 6,6,0x20 - lvx 31,0,6 - vperm 23,30,23,7 - addi 7,1,79 - mtctr 9 - -.Load_xts_enc_key: - vperm 24,31,30,7 - lvx 30,3,6 - addi 6,6,0x20 - stvx 24,0,7 - vperm 25,30,31,7 - lvx 31,0,6 - stvx 25,3,7 - addi 7,7,0x20 - bdnz .Load_xts_enc_key - - lvx 26,3,6 - vperm 24,31,30,7 - lvx 27,26,6 - stvx 24,0,7 - vperm 25,26,31,7 - lvx 28,27,6 - stvx 25,3,7 - addi 7,1,79 - vperm 26,27,26,7 - lvx 29,28,6 - vperm 27,28,27,7 - lvx 30,29,6 - vperm 28,29,28,7 - lvx 31,30,6 - vperm 29,30,29,7 - lvx 22,31,6 - vperm 30,31,30,7 - lvx 24,0,7 - vperm 31,22,31,7 - lvx 25,3,7 - - vperm 0,2,4,5 - subi 10,10,31 - vxor 17,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 7,0,17 - vxor 8,8,11 - - .long 0x7C235699 - vxor 18,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 1,1,1,6 - vand 11,11,10 - vxor 12,1,18 - vxor 8,8,11 - - .long 0x7C5A5699 - andi. 31,5,15 - vxor 19,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 2,2,2,6 - vand 11,11,10 - vxor 13,2,19 - vxor 8,8,11 - - .long 0x7C7B5699 - sub 5,5,31 - vxor 20,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 3,3,3,6 - vand 11,11,10 - vxor 14,3,20 - vxor 8,8,11 - - .long 0x7C9C5699 - subi 5,5,0x60 - vxor 21,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 4,4,4,6 - vand 11,11,10 - vxor 15,4,21 - vxor 8,8,11 - - .long 0x7CBD5699 - addi 10,10,0x60 - vxor 22,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 5,5,5,6 - vand 11,11,10 - vxor 16,5,22 - vxor 8,8,11 - - vxor 31,31,23 - mtctr 9 - b .Loop_xts_enc6x - -.align 5 -.Loop_xts_enc6x: - .long 0x10E7C508 - .long 0x118CC508 - .long 0x11ADC508 - .long 0x11CEC508 - .long 0x11EFC508 - .long 0x1210C508 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD08 - .long 0x118CCD08 - .long 0x11ADCD08 - .long 0x11CECD08 - .long 0x11EFCD08 - .long 0x1210CD08 - lvx 25,3,7 - bdnz .Loop_xts_enc6x - - subic 5,5,96 - vxor 0,17,31 - .long 0x10E7C508 - .long 0x118CC508 - vsrab 11,8,9 - vxor 17,8,23 - vaddubm 8,8,8 - .long 0x11ADC508 - .long 0x11CEC508 - vsldoi 11,11,11,15 - .long 0x11EFC508 - .long 0x1210C508 - - subfe. 0,0,0 - vand 11,11,10 - .long 0x10E7CD08 - .long 0x118CCD08 - vxor 8,8,11 - .long 0x11ADCD08 - .long 0x11CECD08 - vxor 1,18,31 - vsrab 11,8,9 - vxor 18,8,23 - .long 0x11EFCD08 - .long 0x1210CD08 - - and 0,0,5 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - .long 0x10E7D508 - .long 0x118CD508 - vand 11,11,10 - .long 0x11ADD508 - .long 0x11CED508 - vxor 8,8,11 - .long 0x11EFD508 - .long 0x1210D508 - - add 10,10,0 - - - - vxor 2,19,31 - vsrab 11,8,9 - vxor 19,8,23 - vaddubm 8,8,8 - .long 0x10E7DD08 - .long 0x118CDD08 - vsldoi 11,11,11,15 - .long 0x11ADDD08 - .long 0x11CEDD08 - vand 11,11,10 - .long 0x11EFDD08 - .long 0x1210DD08 - - addi 7,1,79 - vxor 8,8,11 - .long 0x10E7E508 - .long 0x118CE508 - vxor 3,20,31 - vsrab 11,8,9 - vxor 20,8,23 - .long 0x11ADE508 - .long 0x11CEE508 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - .long 0x11EFE508 - .long 0x1210E508 - lvx 24,0,7 - vand 11,11,10 - - .long 0x10E7ED08 - .long 0x118CED08 - vxor 8,8,11 - .long 0x11ADED08 - .long 0x11CEED08 - vxor 4,21,31 - vsrab 11,8,9 - vxor 21,8,23 - .long 0x11EFED08 - .long 0x1210ED08 - lvx 25,3,7 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - - .long 0x10E7F508 - .long 0x118CF508 - vand 11,11,10 - .long 0x11ADF508 - .long 0x11CEF508 - vxor 8,8,11 - .long 0x11EFF508 - .long 0x1210F508 - vxor 5,22,31 - vsrab 11,8,9 - vxor 22,8,23 - - .long 0x10E70509 - .long 0x7C005699 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - .long 0x118C0D09 - .long 0x7C235699 - .long 0x11AD1509 - vperm 0,0,0,6 - .long 0x7C5A5699 - vand 11,11,10 - .long 0x11CE1D09 - vperm 1,1,1,6 - .long 0x7C7B5699 - .long 0x11EF2509 - vperm 2,2,2,6 - .long 0x7C9C5699 - vxor 8,8,11 - .long 0x11702D09 - - vperm 3,3,3,6 - .long 0x7CBD5699 - addi 10,10,0x60 - vperm 4,4,4,6 - vperm 5,5,5,6 - - vperm 7,7,7,6 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 7,0,17 - vperm 13,13,13,6 - .long 0x7D832799 - vxor 12,1,18 - vperm 14,14,14,6 - .long 0x7DBA2799 - vxor 13,2,19 - vperm 15,15,15,6 - .long 0x7DDB2799 - vxor 14,3,20 - vperm 16,11,11,6 - .long 0x7DFC2799 - vxor 15,4,21 - .long 0x7E1D2799 - - vxor 16,5,22 - addi 4,4,0x60 - - mtctr 9 - beq .Loop_xts_enc6x - - addic. 5,5,0x60 - beq .Lxts_enc6x_zero - cmpwi 5,0x20 - blt .Lxts_enc6x_one - nop - beq .Lxts_enc6x_two - cmpwi 5,0x40 - blt .Lxts_enc6x_three - nop - beq .Lxts_enc6x_four - -.Lxts_enc6x_five: - vxor 7,1,17 - vxor 12,2,18 - vxor 13,3,19 - vxor 14,4,20 - vxor 15,5,21 - - bl _aesp8_xts_enc5x - - vperm 7,7,7,6 - vor 17,22,22 - vperm 12,12,12,6 - .long 0x7CE02799 - vperm 13,13,13,6 - .long 0x7D832799 - vperm 14,14,14,6 - .long 0x7DBA2799 - vxor 11,15,22 - vperm 15,15,15,6 - .long 0x7DDB2799 - .long 0x7DFC2799 - addi 4,4,0x50 - bne .Lxts_enc6x_steal - b .Lxts_enc6x_done - -.align 4 -.Lxts_enc6x_four: - vxor 7,2,17 - vxor 12,3,18 - vxor 13,4,19 - vxor 14,5,20 - vxor 15,15,15 - - bl _aesp8_xts_enc5x - - vperm 7,7,7,6 - vor 17,21,21 - vperm 12,12,12,6 - .long 0x7CE02799 - vperm 13,13,13,6 - .long 0x7D832799 - vxor 11,14,21 - vperm 14,14,14,6 - .long 0x7DBA2799 - .long 0x7DDB2799 - addi 4,4,0x40 - bne .Lxts_enc6x_steal - b .Lxts_enc6x_done - -.align 4 -.Lxts_enc6x_three: - vxor 7,3,17 - vxor 12,4,18 - vxor 13,5,19 - vxor 14,14,14 - vxor 15,15,15 - - bl _aesp8_xts_enc5x - - vperm 7,7,7,6 - vor 17,20,20 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 11,13,20 - vperm 13,13,13,6 - .long 0x7D832799 - .long 0x7DBA2799 - addi 4,4,0x30 - bne .Lxts_enc6x_steal - b .Lxts_enc6x_done - -.align 4 -.Lxts_enc6x_two: - vxor 7,4,17 - vxor 12,5,18 - vxor 13,13,13 - vxor 14,14,14 - vxor 15,15,15 - - bl _aesp8_xts_enc5x - - vperm 7,7,7,6 - vor 17,19,19 - vxor 11,12,19 - vperm 12,12,12,6 - .long 0x7CE02799 - .long 0x7D832799 - addi 4,4,0x20 - bne .Lxts_enc6x_steal - b .Lxts_enc6x_done - -.align 4 -.Lxts_enc6x_one: - vxor 7,5,17 - nop -.Loop_xts_enc1x: - .long 0x10E7C508 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD08 - lvx 25,3,7 - bdnz .Loop_xts_enc1x - - add 10,10,31 - cmpwi 31,0 - .long 0x10E7C508 - - subi 10,10,16 - .long 0x10E7CD08 - - lvsr 5,0,31 - .long 0x10E7D508 - - .long 0x7C005699 - .long 0x10E7DD08 - - addi 7,1,79 - .long 0x10E7E508 - lvx 24,0,7 - - .long 0x10E7ED08 - lvx 25,3,7 - vxor 17,17,31 - - vperm 0,0,0,6 - .long 0x10E7F508 - - vperm 0,0,0,5 - .long 0x10E78D09 - - vor 17,18,18 - vxor 11,7,18 - vperm 7,7,7,6 - .long 0x7CE02799 - addi 4,4,0x10 - bne .Lxts_enc6x_steal - b .Lxts_enc6x_done - -.align 4 -.Lxts_enc6x_zero: - cmpwi 31,0 - beq .Lxts_enc6x_done - - add 10,10,31 - subi 10,10,16 - .long 0x7C005699 - lvsr 5,0,31 - vperm 0,0,0,6 - vperm 0,0,0,5 - vxor 11,11,17 -.Lxts_enc6x_steal: - vxor 0,0,17 - vxor 7,7,7 - vspltisb 12,-1 - vperm 7,7,12,5 - vsel 7,0,11,7 - - subi 30,4,17 - subi 4,4,16 - mtctr 31 -.Loop_xts_enc6x_steal: - lbzu 0,1(30) - stb 0,16(30) - bdnz .Loop_xts_enc6x_steal - - li 31,0 - mtctr 9 - b .Loop_xts_enc1x - -.align 4 -.Lxts_enc6x_done: - cmpldi 8,0 - beq .Lxts_enc6x_ret - - vxor 8,17,23 - vperm 8,8,8,6 - .long 0x7D004799 - -.Lxts_enc6x_ret: - mtlr 11 - li 10,79 - li 11,95 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - - or 12,12,12 - lvx 20,10,1 - addi 10,10,32 - lvx 21,11,1 - addi 11,11,32 - lvx 22,10,1 - addi 10,10,32 - lvx 23,11,1 - addi 11,11,32 - lvx 24,10,1 - addi 10,10,32 - lvx 25,11,1 - addi 11,11,32 - lvx 26,10,1 - addi 10,10,32 - lvx 27,11,1 - addi 11,11,32 - lvx 28,10,1 - addi 10,10,32 - lvx 29,11,1 - addi 11,11,32 - lvx 30,10,1 - lvx 31,11,1 - ld 26,400(1) - ld 27,408(1) - ld 28,416(1) - ld 29,424(1) - ld 30,432(1) - ld 31,440(1) - addi 1,1,448 - blr -.long 0 -.byte 0,12,0x04,1,0x80,6,6,0 -.long 0 - -.align 5 -_aesp8_xts_enc5x: - .long 0x10E7C508 - .long 0x118CC508 - .long 0x11ADC508 - .long 0x11CEC508 - .long 0x11EFC508 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD08 - .long 0x118CCD08 - .long 0x11ADCD08 - .long 0x11CECD08 - .long 0x11EFCD08 - lvx 25,3,7 - bdnz _aesp8_xts_enc5x - - add 10,10,31 - cmpwi 31,0 - .long 0x10E7C508 - .long 0x118CC508 - .long 0x11ADC508 - .long 0x11CEC508 - .long 0x11EFC508 - - subi 10,10,16 - .long 0x10E7CD08 - .long 0x118CCD08 - .long 0x11ADCD08 - .long 0x11CECD08 - .long 0x11EFCD08 - vxor 17,17,31 - - .long 0x10E7D508 - lvsr 5,0,31 - .long 0x118CD508 - .long 0x11ADD508 - .long 0x11CED508 - .long 0x11EFD508 - vxor 1,18,31 - - .long 0x10E7DD08 - .long 0x7C005699 - .long 0x118CDD08 - .long 0x11ADDD08 - .long 0x11CEDD08 - .long 0x11EFDD08 - vxor 2,19,31 - - addi 7,1,79 - .long 0x10E7E508 - .long 0x118CE508 - .long 0x11ADE508 - .long 0x11CEE508 - .long 0x11EFE508 - lvx 24,0,7 - vxor 3,20,31 - - .long 0x10E7ED08 - vperm 0,0,0,6 - .long 0x118CED08 - .long 0x11ADED08 - .long 0x11CEED08 - .long 0x11EFED08 - lvx 25,3,7 - vxor 4,21,31 - - .long 0x10E7F508 - vperm 0,0,0,5 - .long 0x118CF508 - .long 0x11ADF508 - .long 0x11CEF508 - .long 0x11EFF508 - - .long 0x10E78D09 - .long 0x118C0D09 - .long 0x11AD1509 - .long 0x11CE1D09 - .long 0x11EF2509 - blr -.long 0 -.byte 0,12,0x14,0,0,0,0,0 - -.align 5 -_aesp8_xts_decrypt6x: - stdu 1,-448(1) - mflr 11 - li 7,207 - li 3,223 - std 11,464(1) - stvx 20,7,1 - addi 7,7,32 - stvx 21,3,1 - addi 3,3,32 - stvx 22,7,1 - addi 7,7,32 - stvx 23,3,1 - addi 3,3,32 - stvx 24,7,1 - addi 7,7,32 - stvx 25,3,1 - addi 3,3,32 - stvx 26,7,1 - addi 7,7,32 - stvx 27,3,1 - addi 3,3,32 - stvx 28,7,1 - addi 7,7,32 - stvx 29,3,1 - addi 3,3,32 - stvx 30,7,1 - stvx 31,3,1 - li 0,-1 - stw 12,396(1) - li 3,0x10 - std 26,400(1) - li 26,0x20 - std 27,408(1) - li 27,0x30 - std 28,416(1) - li 28,0x40 - std 29,424(1) - li 29,0x50 - std 30,432(1) - li 30,0x60 - std 31,440(1) - li 31,0x70 - or 0,0,0 - - subi 9,9,3 - - lvx 23,0,6 - lvx 30,3,6 - addi 6,6,0x20 - lvx 31,0,6 - vperm 23,30,23,7 - addi 7,1,79 - mtctr 9 - -.Load_xts_dec_key: - vperm 24,31,30,7 - lvx 30,3,6 - addi 6,6,0x20 - stvx 24,0,7 - vperm 25,30,31,7 - lvx 31,0,6 - stvx 25,3,7 - addi 7,7,0x20 - bdnz .Load_xts_dec_key - - lvx 26,3,6 - vperm 24,31,30,7 - lvx 27,26,6 - stvx 24,0,7 - vperm 25,26,31,7 - lvx 28,27,6 - stvx 25,3,7 - addi 7,1,79 - vperm 26,27,26,7 - lvx 29,28,6 - vperm 27,28,27,7 - lvx 30,29,6 - vperm 28,29,28,7 - lvx 31,30,6 - vperm 29,30,29,7 - lvx 22,31,6 - vperm 30,31,30,7 - lvx 24,0,7 - vperm 31,22,31,7 - lvx 25,3,7 - - vperm 0,2,4,5 - subi 10,10,31 - vxor 17,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vand 11,11,10 - vxor 7,0,17 - vxor 8,8,11 - - .long 0x7C235699 - vxor 18,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 1,1,1,6 - vand 11,11,10 - vxor 12,1,18 - vxor 8,8,11 - - .long 0x7C5A5699 - andi. 31,5,15 - vxor 19,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 2,2,2,6 - vand 11,11,10 - vxor 13,2,19 - vxor 8,8,11 - - .long 0x7C7B5699 - sub 5,5,31 - vxor 20,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 3,3,3,6 - vand 11,11,10 - vxor 14,3,20 - vxor 8,8,11 - - .long 0x7C9C5699 - subi 5,5,0x60 - vxor 21,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 4,4,4,6 - vand 11,11,10 - vxor 15,4,21 - vxor 8,8,11 - - .long 0x7CBD5699 - addi 10,10,0x60 - vxor 22,8,23 - vsrab 11,8,9 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - vperm 5,5,5,6 - vand 11,11,10 - vxor 16,5,22 - vxor 8,8,11 - - vxor 31,31,23 - mtctr 9 - b .Loop_xts_dec6x - -.align 5 -.Loop_xts_dec6x: - .long 0x10E7C548 - .long 0x118CC548 - .long 0x11ADC548 - .long 0x11CEC548 - .long 0x11EFC548 - .long 0x1210C548 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD48 - .long 0x118CCD48 - .long 0x11ADCD48 - .long 0x11CECD48 - .long 0x11EFCD48 - .long 0x1210CD48 - lvx 25,3,7 - bdnz .Loop_xts_dec6x - - subic 5,5,96 - vxor 0,17,31 - .long 0x10E7C548 - .long 0x118CC548 - vsrab 11,8,9 - vxor 17,8,23 - vaddubm 8,8,8 - .long 0x11ADC548 - .long 0x11CEC548 - vsldoi 11,11,11,15 - .long 0x11EFC548 - .long 0x1210C548 - - subfe. 0,0,0 - vand 11,11,10 - .long 0x10E7CD48 - .long 0x118CCD48 - vxor 8,8,11 - .long 0x11ADCD48 - .long 0x11CECD48 - vxor 1,18,31 - vsrab 11,8,9 - vxor 18,8,23 - .long 0x11EFCD48 - .long 0x1210CD48 - - and 0,0,5 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - .long 0x10E7D548 - .long 0x118CD548 - vand 11,11,10 - .long 0x11ADD548 - .long 0x11CED548 - vxor 8,8,11 - .long 0x11EFD548 - .long 0x1210D548 - - add 10,10,0 - - - - vxor 2,19,31 - vsrab 11,8,9 - vxor 19,8,23 - vaddubm 8,8,8 - .long 0x10E7DD48 - .long 0x118CDD48 - vsldoi 11,11,11,15 - .long 0x11ADDD48 - .long 0x11CEDD48 - vand 11,11,10 - .long 0x11EFDD48 - .long 0x1210DD48 - - addi 7,1,79 - vxor 8,8,11 - .long 0x10E7E548 - .long 0x118CE548 - vxor 3,20,31 - vsrab 11,8,9 - vxor 20,8,23 - .long 0x11ADE548 - .long 0x11CEE548 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - .long 0x11EFE548 - .long 0x1210E548 - lvx 24,0,7 - vand 11,11,10 - - .long 0x10E7ED48 - .long 0x118CED48 - vxor 8,8,11 - .long 0x11ADED48 - .long 0x11CEED48 - vxor 4,21,31 - vsrab 11,8,9 - vxor 21,8,23 - .long 0x11EFED48 - .long 0x1210ED48 - lvx 25,3,7 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - - .long 0x10E7F548 - .long 0x118CF548 - vand 11,11,10 - .long 0x11ADF548 - .long 0x11CEF548 - vxor 8,8,11 - .long 0x11EFF548 - .long 0x1210F548 - vxor 5,22,31 - vsrab 11,8,9 - vxor 22,8,23 - - .long 0x10E70549 - .long 0x7C005699 - vaddubm 8,8,8 - vsldoi 11,11,11,15 - .long 0x118C0D49 - .long 0x7C235699 - .long 0x11AD1549 - vperm 0,0,0,6 - .long 0x7C5A5699 - vand 11,11,10 - .long 0x11CE1D49 - vperm 1,1,1,6 - .long 0x7C7B5699 - .long 0x11EF2549 - vperm 2,2,2,6 - .long 0x7C9C5699 - vxor 8,8,11 - .long 0x12102D49 - vperm 3,3,3,6 - .long 0x7CBD5699 - addi 10,10,0x60 - vperm 4,4,4,6 - vperm 5,5,5,6 - - vperm 7,7,7,6 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 7,0,17 - vperm 13,13,13,6 - .long 0x7D832799 - vxor 12,1,18 - vperm 14,14,14,6 - .long 0x7DBA2799 - vxor 13,2,19 - vperm 15,15,15,6 - .long 0x7DDB2799 - vxor 14,3,20 - vperm 16,16,16,6 - .long 0x7DFC2799 - vxor 15,4,21 - .long 0x7E1D2799 - vxor 16,5,22 - addi 4,4,0x60 - - mtctr 9 - beq .Loop_xts_dec6x - - addic. 5,5,0x60 - beq .Lxts_dec6x_zero - cmpwi 5,0x20 - blt .Lxts_dec6x_one - nop - beq .Lxts_dec6x_two - cmpwi 5,0x40 - blt .Lxts_dec6x_three - nop - beq .Lxts_dec6x_four - -.Lxts_dec6x_five: - vxor 7,1,17 - vxor 12,2,18 - vxor 13,3,19 - vxor 14,4,20 - vxor 15,5,21 - - bl _aesp8_xts_dec5x - - vperm 7,7,7,6 - vor 17,22,22 - vxor 18,8,23 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 7,0,18 - vperm 13,13,13,6 - .long 0x7D832799 - vperm 14,14,14,6 - .long 0x7DBA2799 - vperm 15,15,15,6 - .long 0x7DDB2799 - .long 0x7DFC2799 - addi 4,4,0x50 - bne .Lxts_dec6x_steal - b .Lxts_dec6x_done - -.align 4 -.Lxts_dec6x_four: - vxor 7,2,17 - vxor 12,3,18 - vxor 13,4,19 - vxor 14,5,20 - vxor 15,15,15 - - bl _aesp8_xts_dec5x - - vperm 7,7,7,6 - vor 17,21,21 - vor 18,22,22 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 7,0,22 - vperm 13,13,13,6 - .long 0x7D832799 - vperm 14,14,14,6 - .long 0x7DBA2799 - .long 0x7DDB2799 - addi 4,4,0x40 - bne .Lxts_dec6x_steal - b .Lxts_dec6x_done - -.align 4 -.Lxts_dec6x_three: - vxor 7,3,17 - vxor 12,4,18 - vxor 13,5,19 - vxor 14,14,14 - vxor 15,15,15 - - bl _aesp8_xts_dec5x - - vperm 7,7,7,6 - vor 17,20,20 - vor 18,21,21 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 7,0,21 - vperm 13,13,13,6 - .long 0x7D832799 - .long 0x7DBA2799 - addi 4,4,0x30 - bne .Lxts_dec6x_steal - b .Lxts_dec6x_done - -.align 4 -.Lxts_dec6x_two: - vxor 7,4,17 - vxor 12,5,18 - vxor 13,13,13 - vxor 14,14,14 - vxor 15,15,15 - - bl _aesp8_xts_dec5x - - vperm 7,7,7,6 - vor 17,19,19 - vor 18,20,20 - vperm 12,12,12,6 - .long 0x7CE02799 - vxor 7,0,20 - .long 0x7D832799 - addi 4,4,0x20 - bne .Lxts_dec6x_steal - b .Lxts_dec6x_done - -.align 4 -.Lxts_dec6x_one: - vxor 7,5,17 - nop -.Loop_xts_dec1x: - .long 0x10E7C548 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD48 - lvx 25,3,7 - bdnz .Loop_xts_dec1x - - subi 0,31,1 - .long 0x10E7C548 - - andi. 0,0,16 - cmpwi 31,0 - .long 0x10E7CD48 - - sub 10,10,0 - .long 0x10E7D548 - - .long 0x7C005699 - .long 0x10E7DD48 - - addi 7,1,79 - .long 0x10E7E548 - lvx 24,0,7 - - .long 0x10E7ED48 - lvx 25,3,7 - vxor 17,17,31 - - vperm 0,0,0,6 - .long 0x10E7F548 - - mtctr 9 - .long 0x10E78D49 - - vor 17,18,18 - vor 18,19,19 - vperm 7,7,7,6 - .long 0x7CE02799 - addi 4,4,0x10 - vxor 7,0,19 - bne .Lxts_dec6x_steal - b .Lxts_dec6x_done - -.align 4 -.Lxts_dec6x_zero: - cmpwi 31,0 - beq .Lxts_dec6x_done - - .long 0x7C005699 - vperm 0,0,0,6 - vxor 7,0,18 -.Lxts_dec6x_steal: - .long 0x10E7C548 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD48 - lvx 25,3,7 - bdnz .Lxts_dec6x_steal - - add 10,10,31 - .long 0x10E7C548 - - cmpwi 31,0 - .long 0x10E7CD48 - - .long 0x7C005699 - .long 0x10E7D548 - - lvsr 5,0,31 - .long 0x10E7DD48 - - addi 7,1,79 - .long 0x10E7E548 - lvx 24,0,7 - - .long 0x10E7ED48 - lvx 25,3,7 - vxor 18,18,31 - - vperm 0,0,0,6 - .long 0x10E7F548 - - vperm 0,0,0,5 - .long 0x11679549 - - vperm 7,11,11,6 - .long 0x7CE02799 - - - vxor 7,7,7 - vspltisb 12,-1 - vperm 7,7,12,5 - vsel 7,0,11,7 - vxor 7,7,17 - - subi 30,4,1 - mtctr 31 -.Loop_xts_dec6x_steal: - lbzu 0,1(30) - stb 0,16(30) - bdnz .Loop_xts_dec6x_steal - - li 31,0 - mtctr 9 - b .Loop_xts_dec1x - -.align 4 -.Lxts_dec6x_done: - cmpldi 8,0 - beq .Lxts_dec6x_ret - - vxor 8,17,23 - vperm 8,8,8,6 - .long 0x7D004799 - -.Lxts_dec6x_ret: - mtlr 11 - li 10,79 - li 11,95 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - stvx 9,10,1 - addi 10,10,32 - stvx 9,11,1 - addi 11,11,32 - - or 12,12,12 - lvx 20,10,1 - addi 10,10,32 - lvx 21,11,1 - addi 11,11,32 - lvx 22,10,1 - addi 10,10,32 - lvx 23,11,1 - addi 11,11,32 - lvx 24,10,1 - addi 10,10,32 - lvx 25,11,1 - addi 11,11,32 - lvx 26,10,1 - addi 10,10,32 - lvx 27,11,1 - addi 11,11,32 - lvx 28,10,1 - addi 10,10,32 - lvx 29,11,1 - addi 11,11,32 - lvx 30,10,1 - lvx 31,11,1 - ld 26,400(1) - ld 27,408(1) - ld 28,416(1) - ld 29,424(1) - ld 30,432(1) - ld 31,440(1) - addi 1,1,448 - blr -.long 0 -.byte 0,12,0x04,1,0x80,6,6,0 -.long 0 - -.align 5 -_aesp8_xts_dec5x: - .long 0x10E7C548 - .long 0x118CC548 - .long 0x11ADC548 - .long 0x11CEC548 - .long 0x11EFC548 - lvx 24,26,7 - addi 7,7,0x20 - - .long 0x10E7CD48 - .long 0x118CCD48 - .long 0x11ADCD48 - .long 0x11CECD48 - .long 0x11EFCD48 - lvx 25,3,7 - bdnz _aesp8_xts_dec5x - - subi 0,31,1 - .long 0x10E7C548 - .long 0x118CC548 - .long 0x11ADC548 - .long 0x11CEC548 - .long 0x11EFC548 - - andi. 0,0,16 - cmpwi 31,0 - .long 0x10E7CD48 - .long 0x118CCD48 - .long 0x11ADCD48 - .long 0x11CECD48 - .long 0x11EFCD48 - vxor 17,17,31 - - sub 10,10,0 - .long 0x10E7D548 - .long 0x118CD548 - .long 0x11ADD548 - .long 0x11CED548 - .long 0x11EFD548 - vxor 1,18,31 - - .long 0x10E7DD48 - .long 0x7C005699 - .long 0x118CDD48 - .long 0x11ADDD48 - .long 0x11CEDD48 - .long 0x11EFDD48 - vxor 2,19,31 - - addi 7,1,79 - .long 0x10E7E548 - .long 0x118CE548 - .long 0x11ADE548 - .long 0x11CEE548 - .long 0x11EFE548 - lvx 24,0,7 - vxor 3,20,31 - - .long 0x10E7ED48 - vperm 0,0,0,6 - .long 0x118CED48 - .long 0x11ADED48 - .long 0x11CEED48 - .long 0x11EFED48 - lvx 25,3,7 - vxor 4,21,31 - - .long 0x10E7F548 - .long 0x118CF548 - .long 0x11ADF548 - .long 0x11CEF548 - .long 0x11EFF548 - - .long 0x10E78D49 - .long 0x118C0D49 - .long 0x11AD1549 - .long 0x11CE1D49 - .long 0x11EF2549 - mtctr 9 - blr -.long 0 -.byte 0,12,0x14,0,0,0,0,0 -#endif // !OPENSSL_NO_ASM && __powerpc64__ -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S b/packager/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S deleted file mode 100644 index 5b909a38d3..0000000000 --- a/packager/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S +++ /dev/null @@ -1,587 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if !defined(OPENSSL_NO_ASM) && defined(__powerpc64__) -.machine "any" - -.abiversion 2 -.text - -.globl gcm_init_p8 -.type gcm_init_p8,@function -.align 5 -gcm_init_p8: -.localentry gcm_init_p8,0 - - li 0,-4096 - li 8,0x10 - li 12,-1 - li 9,0x20 - or 0,0,0 - li 10,0x30 - .long 0x7D202699 - - vspltisb 8,-16 - vspltisb 5,1 - vaddubm 8,8,8 - vxor 4,4,4 - vor 8,8,5 - vsldoi 8,8,4,15 - vsldoi 6,4,5,1 - vaddubm 8,8,8 - vspltisb 7,7 - vor 8,8,6 - vspltb 6,9,0 - vsl 9,9,5 - vsrab 6,6,7 - vand 6,6,8 - vxor 3,9,6 - - vsldoi 9,3,3,8 - vsldoi 8,4,8,8 - vsldoi 11,4,9,8 - vsldoi 10,9,4,8 - - .long 0x7D001F99 - .long 0x7D681F99 - li 8,0x40 - .long 0x7D291F99 - li 9,0x50 - .long 0x7D4A1F99 - li 10,0x60 - - .long 0x10035CC8 - .long 0x10234CC8 - .long 0x104354C8 - - .long 0x10E044C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vxor 0,0,5 - vxor 2,2,6 - - vsldoi 0,0,0,8 - vxor 0,0,7 - - vsldoi 6,0,0,8 - .long 0x100044C8 - vxor 6,6,2 - vxor 16,0,6 - - vsldoi 17,16,16,8 - vsldoi 19,4,17,8 - vsldoi 18,17,4,8 - - .long 0x7E681F99 - li 8,0x70 - .long 0x7E291F99 - li 9,0x80 - .long 0x7E4A1F99 - li 10,0x90 - .long 0x10039CC8 - .long 0x11B09CC8 - .long 0x10238CC8 - .long 0x11D08CC8 - .long 0x104394C8 - .long 0x11F094C8 - - .long 0x10E044C8 - .long 0x114D44C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vsldoi 11,14,4,8 - vsldoi 9,4,14,8 - vxor 0,0,5 - vxor 2,2,6 - vxor 13,13,11 - vxor 15,15,9 - - vsldoi 0,0,0,8 - vsldoi 13,13,13,8 - vxor 0,0,7 - vxor 13,13,10 - - vsldoi 6,0,0,8 - vsldoi 9,13,13,8 - .long 0x100044C8 - .long 0x11AD44C8 - vxor 6,6,2 - vxor 9,9,15 - vxor 0,0,6 - vxor 13,13,9 - - vsldoi 9,0,0,8 - vsldoi 17,13,13,8 - vsldoi 11,4,9,8 - vsldoi 10,9,4,8 - vsldoi 19,4,17,8 - vsldoi 18,17,4,8 - - .long 0x7D681F99 - li 8,0xa0 - .long 0x7D291F99 - li 9,0xb0 - .long 0x7D4A1F99 - li 10,0xc0 - .long 0x7E681F99 - .long 0x7E291F99 - .long 0x7E4A1F99 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,2,0 -.long 0 -.size gcm_init_p8,.-gcm_init_p8 -.globl gcm_gmult_p8 -.type gcm_gmult_p8,@function -.align 5 -gcm_gmult_p8: -.localentry gcm_gmult_p8,0 - - lis 0,0xfff8 - li 8,0x10 - li 12,-1 - li 9,0x20 - or 0,0,0 - li 10,0x30 - .long 0x7C601E99 - - .long 0x7D682699 - lvsl 12,0,0 - .long 0x7D292699 - vspltisb 5,0x07 - .long 0x7D4A2699 - vxor 12,12,5 - .long 0x7D002699 - vperm 3,3,3,12 - vxor 4,4,4 - - .long 0x10035CC8 - .long 0x10234CC8 - .long 0x104354C8 - - .long 0x10E044C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vxor 0,0,5 - vxor 2,2,6 - - vsldoi 0,0,0,8 - vxor 0,0,7 - - vsldoi 6,0,0,8 - .long 0x100044C8 - vxor 6,6,2 - vxor 0,0,6 - - vperm 0,0,0,12 - .long 0x7C001F99 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,2,0 -.long 0 -.size gcm_gmult_p8,.-gcm_gmult_p8 - -.globl gcm_ghash_p8 -.type gcm_ghash_p8,@function -.align 5 -gcm_ghash_p8: -.localentry gcm_ghash_p8,0 - - li 0,-4096 - li 8,0x10 - li 12,-1 - li 9,0x20 - or 0,0,0 - li 10,0x30 - .long 0x7C001E99 - - .long 0x7D682699 - li 8,0x40 - lvsl 12,0,0 - .long 0x7D292699 - li 9,0x50 - vspltisb 5,0x07 - .long 0x7D4A2699 - li 10,0x60 - vxor 12,12,5 - .long 0x7D002699 - vperm 0,0,0,12 - vxor 4,4,4 - - cmpldi 6,64 - bge .Lgcm_ghash_p8_4x - - .long 0x7C602E99 - addi 5,5,16 - subic. 6,6,16 - vperm 3,3,3,12 - vxor 3,3,0 - beq .Lshort - - .long 0x7E682699 - li 8,16 - .long 0x7E292699 - add 9,5,6 - .long 0x7E4A2699 - - -.align 5 -.Loop_2x: - .long 0x7E002E99 - vperm 16,16,16,12 - - subic 6,6,32 - .long 0x10039CC8 - .long 0x11B05CC8 - subfe 0,0,0 - .long 0x10238CC8 - .long 0x11D04CC8 - and 0,0,6 - .long 0x104394C8 - .long 0x11F054C8 - add 5,5,0 - - vxor 0,0,13 - vxor 1,1,14 - - .long 0x10E044C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vxor 2,2,15 - vxor 0,0,5 - vxor 2,2,6 - - vsldoi 0,0,0,8 - vxor 0,0,7 - .long 0x7C682E99 - addi 5,5,32 - - vsldoi 6,0,0,8 - .long 0x100044C8 - vperm 3,3,3,12 - vxor 6,6,2 - vxor 3,3,6 - vxor 3,3,0 - cmpld 9,5 - bgt .Loop_2x - - cmplwi 6,0 - bne .Leven - -.Lshort: - .long 0x10035CC8 - .long 0x10234CC8 - .long 0x104354C8 - - .long 0x10E044C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vxor 0,0,5 - vxor 2,2,6 - - vsldoi 0,0,0,8 - vxor 0,0,7 - - vsldoi 6,0,0,8 - .long 0x100044C8 - vxor 6,6,2 - -.Leven: - vxor 0,0,6 - vperm 0,0,0,12 - .long 0x7C001F99 - - or 12,12,12 - blr -.long 0 -.byte 0,12,0x14,0,0,0,4,0 -.long 0 -.align 5 -.gcm_ghash_p8_4x: -.Lgcm_ghash_p8_4x: - stdu 1,-256(1) - li 10,63 - li 11,79 - stvx 20,10,1 - addi 10,10,32 - stvx 21,11,1 - addi 11,11,32 - stvx 22,10,1 - addi 10,10,32 - stvx 23,11,1 - addi 11,11,32 - stvx 24,10,1 - addi 10,10,32 - stvx 25,11,1 - addi 11,11,32 - stvx 26,10,1 - addi 10,10,32 - stvx 27,11,1 - addi 11,11,32 - stvx 28,10,1 - addi 10,10,32 - stvx 29,11,1 - addi 11,11,32 - stvx 30,10,1 - li 10,0x60 - stvx 31,11,1 - li 0,-1 - stw 12,252(1) - or 0,0,0 - - lvsl 5,0,8 - - li 8,0x70 - .long 0x7E292699 - li 9,0x80 - vspltisb 6,8 - - li 10,0x90 - .long 0x7EE82699 - li 8,0xa0 - .long 0x7F092699 - li 9,0xb0 - .long 0x7F2A2699 - li 10,0xc0 - .long 0x7FA82699 - li 8,0x10 - .long 0x7FC92699 - li 9,0x20 - .long 0x7FEA2699 - li 10,0x30 - - vsldoi 7,4,6,8 - vaddubm 18,5,7 - vaddubm 19,6,18 - - srdi 6,6,4 - - .long 0x7C602E99 - .long 0x7E082E99 - subic. 6,6,8 - .long 0x7EC92E99 - .long 0x7F8A2E99 - addi 5,5,0x40 - vperm 3,3,3,12 - vperm 16,16,16,12 - vperm 22,22,22,12 - vperm 28,28,28,12 - - vxor 2,3,0 - - .long 0x11B0BCC8 - .long 0x11D0C4C8 - .long 0x11F0CCC8 - - vperm 11,17,9,18 - vperm 5,22,28,19 - vperm 10,17,9,19 - vperm 6,22,28,18 - .long 0x12B68CC8 - .long 0x12855CC8 - .long 0x137C4CC8 - .long 0x134654C8 - - vxor 21,21,14 - vxor 20,20,13 - vxor 27,27,21 - vxor 26,26,15 - - blt .Ltail_4x - -.Loop_4x: - .long 0x7C602E99 - .long 0x7E082E99 - subic. 6,6,4 - .long 0x7EC92E99 - .long 0x7F8A2E99 - addi 5,5,0x40 - vperm 16,16,16,12 - vperm 22,22,22,12 - vperm 28,28,28,12 - vperm 3,3,3,12 - - .long 0x1002ECC8 - .long 0x1022F4C8 - .long 0x1042FCC8 - .long 0x11B0BCC8 - .long 0x11D0C4C8 - .long 0x11F0CCC8 - - vxor 0,0,20 - vxor 1,1,27 - vxor 2,2,26 - vperm 5,22,28,19 - vperm 6,22,28,18 - - .long 0x10E044C8 - .long 0x12855CC8 - .long 0x134654C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vxor 0,0,5 - vxor 2,2,6 - - vsldoi 0,0,0,8 - vxor 0,0,7 - - vsldoi 6,0,0,8 - .long 0x12B68CC8 - .long 0x137C4CC8 - .long 0x100044C8 - - vxor 20,20,13 - vxor 26,26,15 - vxor 2,2,3 - vxor 21,21,14 - vxor 2,2,6 - vxor 27,27,21 - vxor 2,2,0 - bge .Loop_4x - -.Ltail_4x: - .long 0x1002ECC8 - .long 0x1022F4C8 - .long 0x1042FCC8 - - vxor 0,0,20 - vxor 1,1,27 - - .long 0x10E044C8 - - vsldoi 5,1,4,8 - vsldoi 6,4,1,8 - vxor 2,2,26 - vxor 0,0,5 - vxor 2,2,6 - - vsldoi 0,0,0,8 - vxor 0,0,7 - - vsldoi 6,0,0,8 - .long 0x100044C8 - vxor 6,6,2 - vxor 0,0,6 - - addic. 6,6,4 - beq .Ldone_4x - - .long 0x7C602E99 - cmpldi 6,2 - li 6,-4 - blt .Lone - .long 0x7E082E99 - beq .Ltwo - -.Lthree: - .long 0x7EC92E99 - vperm 3,3,3,12 - vperm 16,16,16,12 - vperm 22,22,22,12 - - vxor 2,3,0 - vor 29,23,23 - vor 30, 24, 24 - vor 31,25,25 - - vperm 5,16,22,19 - vperm 6,16,22,18 - .long 0x12B08CC8 - .long 0x13764CC8 - .long 0x12855CC8 - .long 0x134654C8 - - vxor 27,27,21 - b .Ltail_4x - -.align 4 -.Ltwo: - vperm 3,3,3,12 - vperm 16,16,16,12 - - vxor 2,3,0 - vperm 5,4,16,19 - vperm 6,4,16,18 - - vsldoi 29,4,17,8 - vor 30, 17, 17 - vsldoi 31,17,4,8 - - .long 0x12855CC8 - .long 0x13704CC8 - .long 0x134654C8 - - b .Ltail_4x - -.align 4 -.Lone: - vperm 3,3,3,12 - - vsldoi 29,4,9,8 - vor 30, 9, 9 - vsldoi 31,9,4,8 - - vxor 2,3,0 - vxor 20,20,20 - vxor 27,27,27 - vxor 26,26,26 - - b .Ltail_4x - -.Ldone_4x: - vperm 0,0,0,12 - .long 0x7C001F99 - - li 10,63 - li 11,79 - or 12,12,12 - lvx 20,10,1 - addi 10,10,32 - lvx 21,11,1 - addi 11,11,32 - lvx 22,10,1 - addi 10,10,32 - lvx 23,11,1 - addi 11,11,32 - lvx 24,10,1 - addi 10,10,32 - lvx 25,11,1 - addi 11,11,32 - lvx 26,10,1 - addi 10,10,32 - lvx 27,11,1 - addi 11,11,32 - lvx 28,10,1 - addi 10,10,32 - lvx 29,11,1 - addi 11,11,32 - lvx 30,10,1 - lvx 31,11,1 - addi 1,1,256 - blr -.long 0 -.byte 0,12,0x04,0,0x80,0,4,0 -.long 0 -.size gcm_ghash_p8,.-gcm_ghash_p8 - -.byte 71,72,65,83,72,32,102,111,114,32,80,111,119,101,114,73,83,65,32,50,46,48,55,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 2 -.align 2 -#endif // !OPENSSL_NO_ASM && __powerpc64__ -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/chacha/chacha-x86.S b/packager/third_party/boringssl/linux-x86/crypto/chacha/chacha-x86.S deleted file mode 100644 index feceb5d9f8..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/chacha/chacha-x86.S +++ /dev/null @@ -1,975 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl ChaCha20_ctr32 -.hidden ChaCha20_ctr32 -.type ChaCha20_ctr32,@function -.align 16 -ChaCha20_ctr32: -.L_ChaCha20_ctr32_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - xorl %eax,%eax - cmpl 28(%esp),%eax - je .L000no_data - call .Lpic_point -.Lpic_point: - popl %eax - leal OPENSSL_ia32cap_P-.Lpic_point(%eax),%ebp - testl $16777216,(%ebp) - jz .L001x86 - testl $512,4(%ebp) - jz .L001x86 - jmp .Lssse3_shortcut -.L001x86: - movl 32(%esp),%esi - movl 36(%esp),%edi - subl $132,%esp - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,80(%esp) - movl %ebx,84(%esp) - movl %ecx,88(%esp) - movl %edx,92(%esp) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - movl %eax,96(%esp) - movl %ebx,100(%esp) - movl %ecx,104(%esp) - movl %edx,108(%esp) - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - subl $1,%eax - movl %eax,112(%esp) - movl %ebx,116(%esp) - movl %ecx,120(%esp) - movl %edx,124(%esp) - jmp .L002entry -.align 16 -.L003outer_loop: - movl %ebx,156(%esp) - movl %eax,152(%esp) - movl %ecx,160(%esp) -.L002entry: - movl $1634760805,%eax - movl $857760878,4(%esp) - movl $2036477234,8(%esp) - movl $1797285236,12(%esp) - movl 84(%esp),%ebx - movl 88(%esp),%ebp - movl 104(%esp),%ecx - movl 108(%esp),%esi - movl 116(%esp),%edx - movl 120(%esp),%edi - movl %ebx,20(%esp) - movl %ebp,24(%esp) - movl %ecx,40(%esp) - movl %esi,44(%esp) - movl %edx,52(%esp) - movl %edi,56(%esp) - movl 92(%esp),%ebx - movl 124(%esp),%edi - movl 112(%esp),%edx - movl 80(%esp),%ebp - movl 96(%esp),%ecx - movl 100(%esp),%esi - addl $1,%edx - movl %ebx,28(%esp) - movl %edi,60(%esp) - movl %edx,112(%esp) - movl $10,%ebx - jmp .L004loop -.align 16 -.L004loop: - addl %ebp,%eax - movl %ebx,128(%esp) - movl %ebp,%ebx - xorl %eax,%edx - roll $16,%edx - addl %edx,%ecx - xorl %ecx,%ebx - movl 52(%esp),%edi - roll $12,%ebx - movl 20(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,(%esp) - roll $8,%edx - movl 4(%esp),%eax - addl %edx,%ecx - movl %edx,48(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - movl %ecx,32(%esp) - roll $16,%edi - movl %ebx,16(%esp) - addl %edi,%esi - movl 40(%esp),%ecx - xorl %esi,%ebp - movl 56(%esp),%edx - roll $12,%ebp - movl 24(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,4(%esp) - roll $8,%edi - movl 8(%esp),%eax - addl %edi,%esi - movl %edi,52(%esp) - xorl %esi,%ebp - addl %ebx,%eax - roll $7,%ebp - xorl %eax,%edx - movl %esi,36(%esp) - roll $16,%edx - movl %ebp,20(%esp) - addl %edx,%ecx - movl 44(%esp),%esi - xorl %ecx,%ebx - movl 60(%esp),%edi - roll $12,%ebx - movl 28(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,8(%esp) - roll $8,%edx - movl 12(%esp),%eax - addl %edx,%ecx - movl %edx,56(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - roll $16,%edi - movl %ebx,24(%esp) - addl %edi,%esi - xorl %esi,%ebp - roll $12,%ebp - movl 20(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,12(%esp) - roll $8,%edi - movl (%esp),%eax - addl %edi,%esi - movl %edi,%edx - xorl %esi,%ebp - addl %ebx,%eax - roll $7,%ebp - xorl %eax,%edx - roll $16,%edx - movl %ebp,28(%esp) - addl %edx,%ecx - xorl %ecx,%ebx - movl 48(%esp),%edi - roll $12,%ebx - movl 24(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,(%esp) - roll $8,%edx - movl 4(%esp),%eax - addl %edx,%ecx - movl %edx,60(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - movl %ecx,40(%esp) - roll $16,%edi - movl %ebx,20(%esp) - addl %edi,%esi - movl 32(%esp),%ecx - xorl %esi,%ebp - movl 52(%esp),%edx - roll $12,%ebp - movl 28(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,4(%esp) - roll $8,%edi - movl 8(%esp),%eax - addl %edi,%esi - movl %edi,48(%esp) - xorl %esi,%ebp - addl %ebx,%eax - roll $7,%ebp - xorl %eax,%edx - movl %esi,44(%esp) - roll $16,%edx - movl %ebp,24(%esp) - addl %edx,%ecx - movl 36(%esp),%esi - xorl %ecx,%ebx - movl 56(%esp),%edi - roll $12,%ebx - movl 16(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,8(%esp) - roll $8,%edx - movl 12(%esp),%eax - addl %edx,%ecx - movl %edx,52(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - roll $16,%edi - movl %ebx,28(%esp) - addl %edi,%esi - xorl %esi,%ebp - movl 48(%esp),%edx - roll $12,%ebp - movl 128(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,12(%esp) - roll $8,%edi - movl (%esp),%eax - addl %edi,%esi - movl %edi,56(%esp) - xorl %esi,%ebp - roll $7,%ebp - decl %ebx - jnz .L004loop - movl 160(%esp),%ebx - addl $1634760805,%eax - addl 80(%esp),%ebp - addl 96(%esp),%ecx - addl 100(%esp),%esi - cmpl $64,%ebx - jb .L005tail - movl 156(%esp),%ebx - addl 112(%esp),%edx - addl 120(%esp),%edi - xorl (%ebx),%eax - xorl 16(%ebx),%ebp - movl %eax,(%esp) - movl 152(%esp),%eax - xorl 32(%ebx),%ecx - xorl 36(%ebx),%esi - xorl 48(%ebx),%edx - xorl 56(%ebx),%edi - movl %ebp,16(%eax) - movl %ecx,32(%eax) - movl %esi,36(%eax) - movl %edx,48(%eax) - movl %edi,56(%eax) - movl 4(%esp),%ebp - movl 8(%esp),%ecx - movl 12(%esp),%esi - movl 20(%esp),%edx - movl 24(%esp),%edi - addl $857760878,%ebp - addl $2036477234,%ecx - addl $1797285236,%esi - addl 84(%esp),%edx - addl 88(%esp),%edi - xorl 4(%ebx),%ebp - xorl 8(%ebx),%ecx - xorl 12(%ebx),%esi - xorl 20(%ebx),%edx - xorl 24(%ebx),%edi - movl %ebp,4(%eax) - movl %ecx,8(%eax) - movl %esi,12(%eax) - movl %edx,20(%eax) - movl %edi,24(%eax) - movl 28(%esp),%ebp - movl 40(%esp),%ecx - movl 44(%esp),%esi - movl 52(%esp),%edx - movl 60(%esp),%edi - addl 92(%esp),%ebp - addl 104(%esp),%ecx - addl 108(%esp),%esi - addl 116(%esp),%edx - addl 124(%esp),%edi - xorl 28(%ebx),%ebp - xorl 40(%ebx),%ecx - xorl 44(%ebx),%esi - xorl 52(%ebx),%edx - xorl 60(%ebx),%edi - leal 64(%ebx),%ebx - movl %ebp,28(%eax) - movl (%esp),%ebp - movl %ecx,40(%eax) - movl 160(%esp),%ecx - movl %esi,44(%eax) - movl %edx,52(%eax) - movl %edi,60(%eax) - movl %ebp,(%eax) - leal 64(%eax),%eax - subl $64,%ecx - jnz .L003outer_loop - jmp .L006done -.L005tail: - addl 112(%esp),%edx - addl 120(%esp),%edi - movl %eax,(%esp) - movl %ebp,16(%esp) - movl %ecx,32(%esp) - movl %esi,36(%esp) - movl %edx,48(%esp) - movl %edi,56(%esp) - movl 4(%esp),%ebp - movl 8(%esp),%ecx - movl 12(%esp),%esi - movl 20(%esp),%edx - movl 24(%esp),%edi - addl $857760878,%ebp - addl $2036477234,%ecx - addl $1797285236,%esi - addl 84(%esp),%edx - addl 88(%esp),%edi - movl %ebp,4(%esp) - movl %ecx,8(%esp) - movl %esi,12(%esp) - movl %edx,20(%esp) - movl %edi,24(%esp) - movl 28(%esp),%ebp - movl 40(%esp),%ecx - movl 44(%esp),%esi - movl 52(%esp),%edx - movl 60(%esp),%edi - addl 92(%esp),%ebp - addl 104(%esp),%ecx - addl 108(%esp),%esi - addl 116(%esp),%edx - addl 124(%esp),%edi - movl %ebp,28(%esp) - movl 156(%esp),%ebp - movl %ecx,40(%esp) - movl 152(%esp),%ecx - movl %esi,44(%esp) - xorl %esi,%esi - movl %edx,52(%esp) - movl %edi,60(%esp) - xorl %eax,%eax - xorl %edx,%edx -.L007tail_loop: - movb (%esi,%ebp,1),%al - movb (%esp,%esi,1),%dl - leal 1(%esi),%esi - xorb %dl,%al - movb %al,-1(%ecx,%esi,1) - decl %ebx - jnz .L007tail_loop -.L006done: - addl $132,%esp -.L000no_data: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size ChaCha20_ctr32,.-.L_ChaCha20_ctr32_begin -.globl ChaCha20_ssse3 -.hidden ChaCha20_ssse3 -.type ChaCha20_ssse3,@function -.align 16 -ChaCha20_ssse3: -.L_ChaCha20_ssse3_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -.Lssse3_shortcut: - movl 20(%esp),%edi - movl 24(%esp),%esi - movl 28(%esp),%ecx - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl %esp,%ebp - subl $524,%esp - andl $-64,%esp - movl %ebp,512(%esp) - leal .Lssse3_data-.Lpic_point(%eax),%eax - movdqu (%ebx),%xmm3 - cmpl $256,%ecx - jb .L0081x - movl %edx,516(%esp) - movl %ebx,520(%esp) - subl $256,%ecx - leal 384(%esp),%ebp - movdqu (%edx),%xmm7 - pshufd $0,%xmm3,%xmm0 - pshufd $85,%xmm3,%xmm1 - pshufd $170,%xmm3,%xmm2 - pshufd $255,%xmm3,%xmm3 - paddd 48(%eax),%xmm0 - pshufd $0,%xmm7,%xmm4 - pshufd $85,%xmm7,%xmm5 - psubd 64(%eax),%xmm0 - pshufd $170,%xmm7,%xmm6 - pshufd $255,%xmm7,%xmm7 - movdqa %xmm0,64(%ebp) - movdqa %xmm1,80(%ebp) - movdqa %xmm2,96(%ebp) - movdqa %xmm3,112(%ebp) - movdqu 16(%edx),%xmm3 - movdqa %xmm4,-64(%ebp) - movdqa %xmm5,-48(%ebp) - movdqa %xmm6,-32(%ebp) - movdqa %xmm7,-16(%ebp) - movdqa 32(%eax),%xmm7 - leal 128(%esp),%ebx - pshufd $0,%xmm3,%xmm0 - pshufd $85,%xmm3,%xmm1 - pshufd $170,%xmm3,%xmm2 - pshufd $255,%xmm3,%xmm3 - pshufd $0,%xmm7,%xmm4 - pshufd $85,%xmm7,%xmm5 - pshufd $170,%xmm7,%xmm6 - pshufd $255,%xmm7,%xmm7 - movdqa %xmm0,(%ebp) - movdqa %xmm1,16(%ebp) - movdqa %xmm2,32(%ebp) - movdqa %xmm3,48(%ebp) - movdqa %xmm4,-128(%ebp) - movdqa %xmm5,-112(%ebp) - movdqa %xmm6,-96(%ebp) - movdqa %xmm7,-80(%ebp) - leal 128(%esi),%esi - leal 128(%edi),%edi - jmp .L009outer_loop -.align 16 -.L009outer_loop: - movdqa -112(%ebp),%xmm1 - movdqa -96(%ebp),%xmm2 - movdqa -80(%ebp),%xmm3 - movdqa -48(%ebp),%xmm5 - movdqa -32(%ebp),%xmm6 - movdqa -16(%ebp),%xmm7 - movdqa %xmm1,-112(%ebx) - movdqa %xmm2,-96(%ebx) - movdqa %xmm3,-80(%ebx) - movdqa %xmm5,-48(%ebx) - movdqa %xmm6,-32(%ebx) - movdqa %xmm7,-16(%ebx) - movdqa 32(%ebp),%xmm2 - movdqa 48(%ebp),%xmm3 - movdqa 64(%ebp),%xmm4 - movdqa 80(%ebp),%xmm5 - movdqa 96(%ebp),%xmm6 - movdqa 112(%ebp),%xmm7 - paddd 64(%eax),%xmm4 - movdqa %xmm2,32(%ebx) - movdqa %xmm3,48(%ebx) - movdqa %xmm4,64(%ebx) - movdqa %xmm5,80(%ebx) - movdqa %xmm6,96(%ebx) - movdqa %xmm7,112(%ebx) - movdqa %xmm4,64(%ebp) - movdqa -128(%ebp),%xmm0 - movdqa %xmm4,%xmm6 - movdqa -64(%ebp),%xmm3 - movdqa (%ebp),%xmm4 - movdqa 16(%ebp),%xmm5 - movl $10,%edx - nop -.align 16 -.L010loop: - paddd %xmm3,%xmm0 - movdqa %xmm3,%xmm2 - pxor %xmm0,%xmm6 - pshufb (%eax),%xmm6 - paddd %xmm6,%xmm4 - pxor %xmm4,%xmm2 - movdqa -48(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -112(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 80(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-128(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,64(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - movdqa %xmm4,(%ebx) - pshufb (%eax),%xmm7 - movdqa %xmm2,-64(%ebx) - paddd %xmm7,%xmm5 - movdqa 32(%ebx),%xmm4 - pxor %xmm5,%xmm3 - movdqa -32(%ebx),%xmm2 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -96(%ebx),%xmm0 - paddd %xmm3,%xmm1 - movdqa 96(%ebx),%xmm6 - pxor %xmm1,%xmm7 - movdqa %xmm1,-112(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,80(%ebx) - pxor %xmm5,%xmm3 - paddd %xmm2,%xmm0 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - pxor %xmm0,%xmm6 - por %xmm1,%xmm3 - movdqa %xmm5,16(%ebx) - pshufb (%eax),%xmm6 - movdqa %xmm3,-48(%ebx) - paddd %xmm6,%xmm4 - movdqa 48(%ebx),%xmm5 - pxor %xmm4,%xmm2 - movdqa -16(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -80(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 112(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-96(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,96(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - pshufb (%eax),%xmm7 - movdqa %xmm2,-32(%ebx) - paddd %xmm7,%xmm5 - pxor %xmm5,%xmm3 - movdqa -48(%ebx),%xmm2 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -128(%ebx),%xmm0 - paddd %xmm3,%xmm1 - pxor %xmm1,%xmm7 - movdqa %xmm1,-80(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,%xmm6 - pxor %xmm5,%xmm3 - paddd %xmm2,%xmm0 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - pxor %xmm0,%xmm6 - por %xmm1,%xmm3 - pshufb (%eax),%xmm6 - movdqa %xmm3,-16(%ebx) - paddd %xmm6,%xmm4 - pxor %xmm4,%xmm2 - movdqa -32(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -112(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 64(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-128(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,112(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - movdqa %xmm4,32(%ebx) - pshufb (%eax),%xmm7 - movdqa %xmm2,-48(%ebx) - paddd %xmm7,%xmm5 - movdqa (%ebx),%xmm4 - pxor %xmm5,%xmm3 - movdqa -16(%ebx),%xmm2 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -96(%ebx),%xmm0 - paddd %xmm3,%xmm1 - movdqa 80(%ebx),%xmm6 - pxor %xmm1,%xmm7 - movdqa %xmm1,-112(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,64(%ebx) - pxor %xmm5,%xmm3 - paddd %xmm2,%xmm0 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - pxor %xmm0,%xmm6 - por %xmm1,%xmm3 - movdqa %xmm5,48(%ebx) - pshufb (%eax),%xmm6 - movdqa %xmm3,-32(%ebx) - paddd %xmm6,%xmm4 - movdqa 16(%ebx),%xmm5 - pxor %xmm4,%xmm2 - movdqa -64(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -80(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 96(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-96(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,80(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - pshufb (%eax),%xmm7 - movdqa %xmm2,-16(%ebx) - paddd %xmm7,%xmm5 - pxor %xmm5,%xmm3 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -128(%ebx),%xmm0 - paddd %xmm3,%xmm1 - movdqa 64(%ebx),%xmm6 - pxor %xmm1,%xmm7 - movdqa %xmm1,-80(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,96(%ebx) - pxor %xmm5,%xmm3 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - por %xmm1,%xmm3 - decl %edx - jnz .L010loop - movdqa %xmm3,-64(%ebx) - movdqa %xmm4,(%ebx) - movdqa %xmm5,16(%ebx) - movdqa %xmm6,64(%ebx) - movdqa %xmm7,96(%ebx) - movdqa -112(%ebx),%xmm1 - movdqa -96(%ebx),%xmm2 - movdqa -80(%ebx),%xmm3 - paddd -128(%ebp),%xmm0 - paddd -112(%ebp),%xmm1 - paddd -96(%ebp),%xmm2 - paddd -80(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 16(%esi),%esi - pxor %xmm0,%xmm4 - movdqa -64(%ebx),%xmm0 - pxor %xmm1,%xmm5 - movdqa -48(%ebx),%xmm1 - pxor %xmm2,%xmm6 - movdqa -32(%ebx),%xmm2 - pxor %xmm3,%xmm7 - movdqa -16(%ebx),%xmm3 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 16(%edi),%edi - paddd -64(%ebp),%xmm0 - paddd -48(%ebp),%xmm1 - paddd -32(%ebp),%xmm2 - paddd -16(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 16(%esi),%esi - pxor %xmm0,%xmm4 - movdqa (%ebx),%xmm0 - pxor %xmm1,%xmm5 - movdqa 16(%ebx),%xmm1 - pxor %xmm2,%xmm6 - movdqa 32(%ebx),%xmm2 - pxor %xmm3,%xmm7 - movdqa 48(%ebx),%xmm3 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 16(%edi),%edi - paddd (%ebp),%xmm0 - paddd 16(%ebp),%xmm1 - paddd 32(%ebp),%xmm2 - paddd 48(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 16(%esi),%esi - pxor %xmm0,%xmm4 - movdqa 64(%ebx),%xmm0 - pxor %xmm1,%xmm5 - movdqa 80(%ebx),%xmm1 - pxor %xmm2,%xmm6 - movdqa 96(%ebx),%xmm2 - pxor %xmm3,%xmm7 - movdqa 112(%ebx),%xmm3 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 16(%edi),%edi - paddd 64(%ebp),%xmm0 - paddd 80(%ebp),%xmm1 - paddd 96(%ebp),%xmm2 - paddd 112(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 208(%esi),%esi - pxor %xmm0,%xmm4 - pxor %xmm1,%xmm5 - pxor %xmm2,%xmm6 - pxor %xmm3,%xmm7 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 208(%edi),%edi - subl $256,%ecx - jnc .L009outer_loop - addl $256,%ecx - jz .L011done - movl 520(%esp),%ebx - leal -128(%esi),%esi - movl 516(%esp),%edx - leal -128(%edi),%edi - movd 64(%ebp),%xmm2 - movdqu (%ebx),%xmm3 - paddd 96(%eax),%xmm2 - pand 112(%eax),%xmm3 - por %xmm2,%xmm3 -.L0081x: - movdqa 32(%eax),%xmm0 - movdqu (%edx),%xmm1 - movdqu 16(%edx),%xmm2 - movdqa (%eax),%xmm6 - movdqa 16(%eax),%xmm7 - movl %ebp,48(%esp) - movdqa %xmm0,(%esp) - movdqa %xmm1,16(%esp) - movdqa %xmm2,32(%esp) - movdqa %xmm3,48(%esp) - movl $10,%edx - jmp .L012loop1x -.align 16 -.L013outer1x: - movdqa 80(%eax),%xmm3 - movdqa (%esp),%xmm0 - movdqa 16(%esp),%xmm1 - movdqa 32(%esp),%xmm2 - paddd 48(%esp),%xmm3 - movl $10,%edx - movdqa %xmm3,48(%esp) - jmp .L012loop1x -.align 16 -.L012loop1x: - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $57,%xmm1,%xmm1 - pshufd $147,%xmm3,%xmm3 - nop - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $147,%xmm1,%xmm1 - pshufd $57,%xmm3,%xmm3 - decl %edx - jnz .L012loop1x - paddd (%esp),%xmm0 - paddd 16(%esp),%xmm1 - paddd 32(%esp),%xmm2 - paddd 48(%esp),%xmm3 - cmpl $64,%ecx - jb .L014tail - movdqu (%esi),%xmm4 - movdqu 16(%esi),%xmm5 - pxor %xmm4,%xmm0 - movdqu 32(%esi),%xmm4 - pxor %xmm5,%xmm1 - movdqu 48(%esi),%xmm5 - pxor %xmm4,%xmm2 - pxor %xmm5,%xmm3 - leal 64(%esi),%esi - movdqu %xmm0,(%edi) - movdqu %xmm1,16(%edi) - movdqu %xmm2,32(%edi) - movdqu %xmm3,48(%edi) - leal 64(%edi),%edi - subl $64,%ecx - jnz .L013outer1x - jmp .L011done -.L014tail: - movdqa %xmm0,(%esp) - movdqa %xmm1,16(%esp) - movdqa %xmm2,32(%esp) - movdqa %xmm3,48(%esp) - xorl %eax,%eax - xorl %edx,%edx - xorl %ebp,%ebp -.L015tail_loop: - movb (%esp,%ebp,1),%al - movb (%esi,%ebp,1),%dl - leal 1(%ebp),%ebp - xorb %dl,%al - movb %al,-1(%edi,%ebp,1) - decl %ecx - jnz .L015tail_loop -.L011done: - movl 512(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size ChaCha20_ssse3,.-.L_ChaCha20_ssse3_begin -.align 64 -.Lssse3_data: -.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 -.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 -.long 1634760805,857760878,2036477234,1797285236 -.long 0,1,2,3 -.long 4,4,4,4 -.long 1,0,0,0 -.long 4,0,0,0 -.long 0,-1,-1,-1 -.align 64 -.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 -.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 -.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 -.byte 114,103,62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/aes-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/aes-586.S deleted file mode 100644 index e43cfea942..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/aes-586.S +++ /dev/null @@ -1,3263 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.hidden _x86_AES_encrypt_compact -.type _x86_AES_encrypt_compact,@function -.align 16 -_x86_AES_encrypt_compact: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl -128(%ebp),%edi - movl -96(%ebp),%esi - movl -64(%ebp),%edi - movl -32(%ebp),%esi - movl (%ebp),%edi - movl 32(%ebp),%esi - movl 64(%ebp),%edi - movl 96(%ebp),%esi -.align 16 -.L000loop: - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movzbl -128(%ebp,%esi,1),%esi - movzbl %ch,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ah,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $8,%eax - xorl %eax,%edx - movl 4(%esp),%eax - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - movzbl -128(%ebp,%ecx,1),%ecx - shll $24,%ecx - xorl %ecx,%edx - movl %esi,%ecx - - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %ecx,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %ecx,%edi - xorl %esi,%ecx - rorl $24,%edi - xorl %ebp,%esi - roll $24,%ecx - xorl %edi,%esi - movl $2155905152,%ebp - xorl %esi,%ecx - andl %edx,%ebp - leal (%edx,%edx,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %edx,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %edx,%edi - xorl %esi,%edx - rorl $24,%edi - xorl %ebp,%esi - roll $24,%edx - xorl %edi,%esi - movl $2155905152,%ebp - xorl %esi,%edx - andl %eax,%ebp - leal (%eax,%eax,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %eax,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %eax,%edi - xorl %esi,%eax - rorl $24,%edi - xorl %ebp,%esi - roll $24,%eax - xorl %edi,%esi - movl $2155905152,%ebp - xorl %esi,%eax - andl %ebx,%ebp - leal (%ebx,%ebx,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %ebx,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %ebx,%edi - xorl %esi,%ebx - rorl $24,%edi - xorl %ebp,%esi - roll $24,%ebx - xorl %edi,%esi - xorl %esi,%ebx - movl 20(%esp),%edi - movl 28(%esp),%ebp - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb .L000loop - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movzbl -128(%ebp,%esi,1),%esi - movzbl %ch,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - - movl 20(%esp),%edi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ah,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $8,%eax - xorl %eax,%edx - movl 4(%esp),%eax - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - movzbl -128(%ebp,%ecx,1),%ecx - shll $24,%ecx - xorl %ecx,%edx - movl %esi,%ecx - - xorl 16(%edi),%eax - xorl 20(%edi),%ebx - xorl 24(%edi),%ecx - xorl 28(%edi),%edx - ret -.size _x86_AES_encrypt_compact,.-_x86_AES_encrypt_compact -.hidden _sse_AES_encrypt_compact -.type _sse_AES_encrypt_compact,@function -.align 16 -_sse_AES_encrypt_compact: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl $454761243,%eax - movl %eax,8(%esp) - movl %eax,12(%esp) - movl -128(%ebp),%eax - movl -96(%ebp),%ebx - movl -64(%ebp),%ecx - movl -32(%ebp),%edx - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%edx -.align 16 -.L001loop: - pshufw $8,%mm0,%mm1 - pshufw $13,%mm4,%mm5 - movd %mm1,%eax - movd %mm5,%ebx - movl %edi,20(%esp) - movzbl %al,%esi - movzbl %ah,%edx - pshufw $13,%mm0,%mm2 - movzbl -128(%ebp,%esi,1),%ecx - movzbl %bl,%edi - movzbl -128(%ebp,%edx,1),%edx - shrl $16,%eax - shll $8,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $16,%esi - pshufw $8,%mm4,%mm6 - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %ah,%edi - shll $24,%esi - shrl $16,%ebx - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $8,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shll $24,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - movd %mm2,%eax - movd %ecx,%mm0 - movzbl -128(%ebp,%edi,1),%ecx - movzbl %ah,%edi - shll $16,%ecx - movd %mm6,%ebx - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $24,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - shll $8,%esi - shrl $16,%ebx - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shrl $16,%eax - movd %ecx,%mm1 - movzbl -128(%ebp,%edi,1),%ecx - movzbl %ah,%edi - shll $16,%ecx - andl $255,%eax - orl %esi,%ecx - punpckldq %mm1,%mm0 - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $24,%esi - andl $255,%ebx - movzbl -128(%ebp,%eax,1),%eax - orl %esi,%ecx - shll $16,%eax - movzbl -128(%ebp,%edi,1),%esi - orl %eax,%edx - shll $8,%esi - movzbl -128(%ebp,%ebx,1),%ebx - orl %esi,%ecx - orl %ebx,%edx - movl 20(%esp),%edi - movd %ecx,%mm4 - movd %edx,%mm5 - punpckldq %mm5,%mm4 - addl $16,%edi - cmpl 24(%esp),%edi - ja .L002out - movq 8(%esp),%mm2 - pxor %mm3,%mm3 - pxor %mm7,%mm7 - movq %mm0,%mm1 - movq %mm4,%mm5 - pcmpgtb %mm0,%mm3 - pcmpgtb %mm4,%mm7 - pand %mm2,%mm3 - pand %mm2,%mm7 - pshufw $177,%mm0,%mm2 - pshufw $177,%mm4,%mm6 - paddb %mm0,%mm0 - paddb %mm4,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pshufw $177,%mm2,%mm3 - pshufw $177,%mm6,%mm7 - pxor %mm0,%mm1 - pxor %mm4,%mm5 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - movq %mm3,%mm2 - movq %mm7,%mm6 - pslld $8,%mm3 - pslld $8,%mm7 - psrld $24,%mm2 - psrld $24,%mm6 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - movq %mm1,%mm3 - movq %mm5,%mm7 - movq (%edi),%mm2 - movq 8(%edi),%mm6 - psrld $8,%mm1 - psrld $8,%mm5 - movl -128(%ebp),%eax - pslld $24,%mm3 - pslld $24,%mm7 - movl -64(%ebp),%ebx - pxor %mm1,%mm0 - pxor %mm5,%mm4 - movl (%ebp),%ecx - pxor %mm3,%mm0 - pxor %mm7,%mm4 - movl 64(%ebp),%edx - pxor %mm2,%mm0 - pxor %mm6,%mm4 - jmp .L001loop -.align 16 -.L002out: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - ret -.size _sse_AES_encrypt_compact,.-_sse_AES_encrypt_compact -.hidden _x86_AES_encrypt -.type _x86_AES_encrypt,@function -.align 16 -_x86_AES_encrypt: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) -.align 16 -.L003loop: - movl %eax,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %bh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,4(%esp) - - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movl (%ebp,%esi,8),%esi - movzbl %ch,%edi - xorl 3(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %eax,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,8(%esp) - - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movl (%ebp,%esi,8),%esi - movzbl %dh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movzbl %bh,%edi - xorl 1(%ebp,%edi,8),%esi - - movl 20(%esp),%edi - movl (%ebp,%edx,8),%edx - movzbl %ah,%eax - xorl 3(%ebp,%eax,8),%edx - movl 4(%esp),%eax - andl $255,%ebx - xorl 2(%ebp,%ebx,8),%edx - movl 8(%esp),%ebx - xorl 1(%ebp,%ecx,8),%edx - movl %esi,%ecx - - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb .L003loop - movl %eax,%esi - andl $255,%esi - movl 2(%ebp,%esi,8),%esi - andl $255,%esi - movzbl %bh,%edi - movl (%ebp,%edi,8),%edi - andl $65280,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movl (%ebp,%edi,8),%edi - andl $16711680,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movl 2(%ebp,%edi,8),%edi - andl $4278190080,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movl 2(%ebp,%esi,8),%esi - andl $255,%esi - movzbl %ch,%edi - movl (%ebp,%edi,8),%edi - andl $65280,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movl (%ebp,%edi,8),%edi - andl $16711680,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $24,%edi - movl 2(%ebp,%edi,8),%edi - andl $4278190080,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movl 2(%ebp,%esi,8),%esi - andl $255,%esi - movzbl %dh,%edi - movl (%ebp,%edi,8),%edi - andl $65280,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - movl (%ebp,%edi,8),%edi - andl $16711680,%edi - xorl %edi,%esi - movzbl %bh,%edi - movl 2(%ebp,%edi,8),%edi - andl $4278190080,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movl 2(%ebp,%edx,8),%edx - andl $255,%edx - movzbl %ah,%eax - movl (%ebp,%eax,8),%eax - andl $65280,%eax - xorl %eax,%edx - movl 4(%esp),%eax - andl $255,%ebx - movl (%ebp,%ebx,8),%ebx - andl $16711680,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - movl 2(%ebp,%ecx,8),%ecx - andl $4278190080,%ecx - xorl %ecx,%edx - movl %esi,%ecx - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - ret -.align 64 -.LAES_Te: -.long 2774754246,2774754246 -.long 2222750968,2222750968 -.long 2574743534,2574743534 -.long 2373680118,2373680118 -.long 234025727,234025727 -.long 3177933782,3177933782 -.long 2976870366,2976870366 -.long 1422247313,1422247313 -.long 1345335392,1345335392 -.long 50397442,50397442 -.long 2842126286,2842126286 -.long 2099981142,2099981142 -.long 436141799,436141799 -.long 1658312629,1658312629 -.long 3870010189,3870010189 -.long 2591454956,2591454956 -.long 1170918031,1170918031 -.long 2642575903,2642575903 -.long 1086966153,1086966153 -.long 2273148410,2273148410 -.long 368769775,368769775 -.long 3948501426,3948501426 -.long 3376891790,3376891790 -.long 200339707,200339707 -.long 3970805057,3970805057 -.long 1742001331,1742001331 -.long 4255294047,4255294047 -.long 3937382213,3937382213 -.long 3214711843,3214711843 -.long 4154762323,4154762323 -.long 2524082916,2524082916 -.long 1539358875,1539358875 -.long 3266819957,3266819957 -.long 486407649,486407649 -.long 2928907069,2928907069 -.long 1780885068,1780885068 -.long 1513502316,1513502316 -.long 1094664062,1094664062 -.long 49805301,49805301 -.long 1338821763,1338821763 -.long 1546925160,1546925160 -.long 4104496465,4104496465 -.long 887481809,887481809 -.long 150073849,150073849 -.long 2473685474,2473685474 -.long 1943591083,1943591083 -.long 1395732834,1395732834 -.long 1058346282,1058346282 -.long 201589768,201589768 -.long 1388824469,1388824469 -.long 1696801606,1696801606 -.long 1589887901,1589887901 -.long 672667696,672667696 -.long 2711000631,2711000631 -.long 251987210,251987210 -.long 3046808111,3046808111 -.long 151455502,151455502 -.long 907153956,907153956 -.long 2608889883,2608889883 -.long 1038279391,1038279391 -.long 652995533,652995533 -.long 1764173646,1764173646 -.long 3451040383,3451040383 -.long 2675275242,2675275242 -.long 453576978,453576978 -.long 2659418909,2659418909 -.long 1949051992,1949051992 -.long 773462580,773462580 -.long 756751158,756751158 -.long 2993581788,2993581788 -.long 3998898868,3998898868 -.long 4221608027,4221608027 -.long 4132590244,4132590244 -.long 1295727478,1295727478 -.long 1641469623,1641469623 -.long 3467883389,3467883389 -.long 2066295122,2066295122 -.long 1055122397,1055122397 -.long 1898917726,1898917726 -.long 2542044179,2542044179 -.long 4115878822,4115878822 -.long 1758581177,1758581177 -.long 0,0 -.long 753790401,753790401 -.long 1612718144,1612718144 -.long 536673507,536673507 -.long 3367088505,3367088505 -.long 3982187446,3982187446 -.long 3194645204,3194645204 -.long 1187761037,1187761037 -.long 3653156455,3653156455 -.long 1262041458,1262041458 -.long 3729410708,3729410708 -.long 3561770136,3561770136 -.long 3898103984,3898103984 -.long 1255133061,1255133061 -.long 1808847035,1808847035 -.long 720367557,720367557 -.long 3853167183,3853167183 -.long 385612781,385612781 -.long 3309519750,3309519750 -.long 3612167578,3612167578 -.long 1429418854,1429418854 -.long 2491778321,2491778321 -.long 3477423498,3477423498 -.long 284817897,284817897 -.long 100794884,100794884 -.long 2172616702,2172616702 -.long 4031795360,4031795360 -.long 1144798328,1144798328 -.long 3131023141,3131023141 -.long 3819481163,3819481163 -.long 4082192802,4082192802 -.long 4272137053,4272137053 -.long 3225436288,3225436288 -.long 2324664069,2324664069 -.long 2912064063,2912064063 -.long 3164445985,3164445985 -.long 1211644016,1211644016 -.long 83228145,83228145 -.long 3753688163,3753688163 -.long 3249976951,3249976951 -.long 1977277103,1977277103 -.long 1663115586,1663115586 -.long 806359072,806359072 -.long 452984805,452984805 -.long 250868733,250868733 -.long 1842533055,1842533055 -.long 1288555905,1288555905 -.long 336333848,336333848 -.long 890442534,890442534 -.long 804056259,804056259 -.long 3781124030,3781124030 -.long 2727843637,2727843637 -.long 3427026056,3427026056 -.long 957814574,957814574 -.long 1472513171,1472513171 -.long 4071073621,4071073621 -.long 2189328124,2189328124 -.long 1195195770,1195195770 -.long 2892260552,2892260552 -.long 3881655738,3881655738 -.long 723065138,723065138 -.long 2507371494,2507371494 -.long 2690670784,2690670784 -.long 2558624025,2558624025 -.long 3511635870,3511635870 -.long 2145180835,2145180835 -.long 1713513028,1713513028 -.long 2116692564,2116692564 -.long 2878378043,2878378043 -.long 2206763019,2206763019 -.long 3393603212,3393603212 -.long 703524551,703524551 -.long 3552098411,3552098411 -.long 1007948840,1007948840 -.long 2044649127,2044649127 -.long 3797835452,3797835452 -.long 487262998,487262998 -.long 1994120109,1994120109 -.long 1004593371,1004593371 -.long 1446130276,1446130276 -.long 1312438900,1312438900 -.long 503974420,503974420 -.long 3679013266,3679013266 -.long 168166924,168166924 -.long 1814307912,1814307912 -.long 3831258296,3831258296 -.long 1573044895,1573044895 -.long 1859376061,1859376061 -.long 4021070915,4021070915 -.long 2791465668,2791465668 -.long 2828112185,2828112185 -.long 2761266481,2761266481 -.long 937747667,937747667 -.long 2339994098,2339994098 -.long 854058965,854058965 -.long 1137232011,1137232011 -.long 1496790894,1496790894 -.long 3077402074,3077402074 -.long 2358086913,2358086913 -.long 1691735473,1691735473 -.long 3528347292,3528347292 -.long 3769215305,3769215305 -.long 3027004632,3027004632 -.long 4199962284,4199962284 -.long 133494003,133494003 -.long 636152527,636152527 -.long 2942657994,2942657994 -.long 2390391540,2390391540 -.long 3920539207,3920539207 -.long 403179536,403179536 -.long 3585784431,3585784431 -.long 2289596656,2289596656 -.long 1864705354,1864705354 -.long 1915629148,1915629148 -.long 605822008,605822008 -.long 4054230615,4054230615 -.long 3350508659,3350508659 -.long 1371981463,1371981463 -.long 602466507,602466507 -.long 2094914977,2094914977 -.long 2624877800,2624877800 -.long 555687742,555687742 -.long 3712699286,3712699286 -.long 3703422305,3703422305 -.long 2257292045,2257292045 -.long 2240449039,2240449039 -.long 2423288032,2423288032 -.long 1111375484,1111375484 -.long 3300242801,3300242801 -.long 2858837708,2858837708 -.long 3628615824,3628615824 -.long 84083462,84083462 -.long 32962295,32962295 -.long 302911004,302911004 -.long 2741068226,2741068226 -.long 1597322602,1597322602 -.long 4183250862,4183250862 -.long 3501832553,3501832553 -.long 2441512471,2441512471 -.long 1489093017,1489093017 -.long 656219450,656219450 -.long 3114180135,3114180135 -.long 954327513,954327513 -.long 335083755,335083755 -.long 3013122091,3013122091 -.long 856756514,856756514 -.long 3144247762,3144247762 -.long 1893325225,1893325225 -.long 2307821063,2307821063 -.long 2811532339,2811532339 -.long 3063651117,3063651117 -.long 572399164,572399164 -.long 2458355477,2458355477 -.long 552200649,552200649 -.long 1238290055,1238290055 -.long 4283782570,4283782570 -.long 2015897680,2015897680 -.long 2061492133,2061492133 -.long 2408352771,2408352771 -.long 4171342169,4171342169 -.long 2156497161,2156497161 -.long 386731290,386731290 -.long 3669999461,3669999461 -.long 837215959,837215959 -.long 3326231172,3326231172 -.long 3093850320,3093850320 -.long 3275833730,3275833730 -.long 2962856233,2962856233 -.long 1999449434,1999449434 -.long 286199582,286199582 -.long 3417354363,3417354363 -.long 4233385128,4233385128 -.long 3602627437,3602627437 -.long 974525996,974525996 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.long 1,2,4,8 -.long 16,32,64,128 -.long 27,54,0,0 -.long 0,0,0,0 -.size _x86_AES_encrypt,.-_x86_AES_encrypt -.globl aes_nohw_encrypt -.hidden aes_nohw_encrypt -.type aes_nohw_encrypt,@function -.align 16 -aes_nohw_encrypt: -.L_aes_nohw_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 28(%esp),%edi - movl %esp,%eax - subl $36,%esp - andl $-64,%esp - leal -127(%edi),%ebx - subl %esp,%ebx - negl %ebx - andl $960,%ebx - subl %ebx,%esp - addl $4,%esp - movl %eax,28(%esp) - call .L004pic_point -.L004pic_point: - popl %ebp - leal OPENSSL_ia32cap_P-.L004pic_point(%ebp),%eax - leal .LAES_Te-.L004pic_point(%ebp),%ebp - leal 764(%esp),%ebx - subl %ebp,%ebx - andl $768,%ebx - leal 2176(%ebp,%ebx,1),%ebp - btl $25,(%eax) - jnc .L005x86 - movq (%esi),%mm0 - movq 8(%esi),%mm4 - call _sse_AES_encrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movq %mm0,(%esi) - movq %mm4,8(%esi) - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 16 -.L005x86: - movl %ebp,24(%esp) - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - call _x86_AES_encrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_nohw_encrypt,.-.L_aes_nohw_encrypt_begin -.hidden _x86_AES_decrypt_compact -.type _x86_AES_decrypt_compact,@function -.align 16 -_x86_AES_decrypt_compact: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl -128(%ebp),%edi - movl -96(%ebp),%esi - movl -64(%ebp),%edi - movl -32(%ebp),%esi - movl (%ebp),%edi - movl 32(%ebp),%esi - movl 64(%ebp),%edi - movl 96(%ebp),%esi -.align 16 -.L006loop: - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ebx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %ah,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ch,%ecx - movzbl -128(%ebp,%ecx,1),%ecx - shll $8,%ecx - xorl %ecx,%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - shrl $24,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $24,%eax - xorl %eax,%edx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%eax - subl %edi,%esi - andl $4278124286,%eax - andl $454761243,%esi - xorl %esi,%eax - movl $2155905152,%edi - andl %eax,%edi - movl %edi,%esi - shrl $7,%edi - leal (%eax,%eax,1),%ebx - subl %edi,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %ecx,%eax - xorl %esi,%ebx - movl $2155905152,%edi - andl %ebx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ebx,%ebx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %ecx,%ebx - roll $8,%ecx - xorl %esi,%ebp - xorl %eax,%ecx - xorl %ebp,%eax - xorl %ebx,%ecx - xorl %ebp,%ebx - roll $24,%eax - xorl %ebp,%ecx - roll $16,%ebx - xorl %eax,%ecx - roll $8,%ebp - xorl %ebx,%ecx - movl 4(%esp),%eax - xorl %ebp,%ecx - movl %ecx,12(%esp) - movl $2155905152,%edi - andl %edx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%edx,%edx,1),%ebx - subl %edi,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %esi,%ebx - movl $2155905152,%edi - andl %ebx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ebx,%ebx,1),%ecx - subl %edi,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %edx,%ebx - xorl %esi,%ecx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %edx,%ecx - roll $8,%edx - xorl %esi,%ebp - xorl %ebx,%edx - xorl %ebp,%ebx - xorl %ecx,%edx - xorl %ebp,%ecx - roll $24,%ebx - xorl %ebp,%edx - roll $16,%ecx - xorl %ebx,%edx - roll $8,%ebp - xorl %ecx,%edx - movl 8(%esp),%ebx - xorl %ebp,%edx - movl %edx,16(%esp) - movl $2155905152,%edi - andl %eax,%edi - movl %edi,%esi - shrl $7,%edi - leal (%eax,%eax,1),%ecx - subl %edi,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %esi,%ecx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%edx - subl %edi,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %eax,%ecx - xorl %esi,%edx - movl $2155905152,%edi - andl %edx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%edx,%edx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %eax,%edx - roll $8,%eax - xorl %esi,%ebp - xorl %ecx,%eax - xorl %ebp,%ecx - xorl %edx,%eax - xorl %ebp,%edx - roll $24,%ecx - xorl %ebp,%eax - roll $16,%edx - xorl %ecx,%eax - roll $8,%ebp - xorl %edx,%eax - xorl %ebp,%eax - movl $2155905152,%edi - andl %ebx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ebx,%ebx,1),%ecx - subl %edi,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %esi,%ecx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%edx - subl %edi,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %ebx,%ecx - xorl %esi,%edx - movl $2155905152,%edi - andl %edx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%edx,%edx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %ebx,%edx - roll $8,%ebx - xorl %esi,%ebp - xorl %ecx,%ebx - xorl %ebp,%ecx - xorl %edx,%ebx - xorl %ebp,%edx - roll $24,%ecx - xorl %ebp,%ebx - roll $16,%edx - xorl %ecx,%ebx - roll $8,%ebp - xorl %edx,%ebx - movl 12(%esp),%ecx - xorl %ebp,%ebx - movl 16(%esp),%edx - movl 20(%esp),%edi - movl 28(%esp),%ebp - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb .L006loop - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ebx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %ah,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ch,%ecx - movzbl -128(%ebp,%ecx,1),%ecx - shll $8,%ecx - xorl %ecx,%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - shrl $24,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $24,%eax - xorl %eax,%edx - movl 4(%esp),%eax - xorl 16(%edi),%eax - xorl 20(%edi),%ebx - xorl 24(%edi),%ecx - xorl 28(%edi),%edx - ret -.size _x86_AES_decrypt_compact,.-_x86_AES_decrypt_compact -.hidden _sse_AES_decrypt_compact -.type _sse_AES_decrypt_compact,@function -.align 16 -_sse_AES_decrypt_compact: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl $454761243,%eax - movl %eax,8(%esp) - movl %eax,12(%esp) - movl -128(%ebp),%eax - movl -96(%ebp),%ebx - movl -64(%ebp),%ecx - movl -32(%ebp),%edx - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%edx -.align 16 -.L007loop: - pshufw $12,%mm0,%mm1 - pshufw $9,%mm4,%mm5 - movd %mm1,%eax - movd %mm5,%ebx - movl %edi,20(%esp) - movzbl %al,%esi - movzbl %ah,%edx - pshufw $6,%mm0,%mm2 - movzbl -128(%ebp,%esi,1),%ecx - movzbl %bl,%edi - movzbl -128(%ebp,%edx,1),%edx - shrl $16,%eax - shll $8,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $16,%esi - pshufw $3,%mm4,%mm6 - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %ah,%edi - shll $24,%esi - shrl $16,%ebx - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $24,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shll $8,%esi - movd %mm2,%eax - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - shll $16,%esi - movd %mm6,%ebx - movd %ecx,%mm0 - movzbl -128(%ebp,%edi,1),%ecx - movzbl %al,%edi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %ah,%edi - shll $16,%esi - shrl $16,%eax - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shrl $16,%ebx - shll $8,%esi - movd %edx,%mm1 - movzbl -128(%ebp,%edi,1),%edx - movzbl %bh,%edi - shll $24,%edx - andl $255,%ebx - orl %esi,%edx - punpckldq %mm1,%mm0 - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shll $8,%esi - movzbl %ah,%eax - movzbl -128(%ebp,%ebx,1),%ebx - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - orl %ebx,%edx - shll $16,%esi - movzbl -128(%ebp,%eax,1),%eax - orl %esi,%edx - shll $24,%eax - orl %eax,%ecx - movl 20(%esp),%edi - movd %edx,%mm4 - movd %ecx,%mm5 - punpckldq %mm5,%mm4 - addl $16,%edi - cmpl 24(%esp),%edi - ja .L008out - movq %mm0,%mm3 - movq %mm4,%mm7 - pshufw $228,%mm0,%mm2 - pshufw $228,%mm4,%mm6 - movq %mm0,%mm1 - movq %mm4,%mm5 - pshufw $177,%mm0,%mm0 - pshufw $177,%mm4,%mm4 - pslld $8,%mm2 - pslld $8,%mm6 - psrld $8,%mm3 - psrld $8,%mm7 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pslld $16,%mm2 - pslld $16,%mm6 - psrld $16,%mm3 - psrld $16,%mm7 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - movq 8(%esp),%mm3 - pxor %mm2,%mm2 - pxor %mm6,%mm6 - pcmpgtb %mm1,%mm2 - pcmpgtb %mm5,%mm6 - pand %mm3,%mm2 - pand %mm3,%mm6 - paddb %mm1,%mm1 - paddb %mm5,%mm5 - pxor %mm2,%mm1 - pxor %mm6,%mm5 - movq %mm1,%mm3 - movq %mm5,%mm7 - movq %mm1,%mm2 - movq %mm5,%mm6 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - pslld $24,%mm3 - pslld $24,%mm7 - psrld $8,%mm2 - psrld $8,%mm6 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - movq 8(%esp),%mm2 - pxor %mm3,%mm3 - pxor %mm7,%mm7 - pcmpgtb %mm1,%mm3 - pcmpgtb %mm5,%mm7 - pand %mm2,%mm3 - pand %mm2,%mm7 - paddb %mm1,%mm1 - paddb %mm5,%mm5 - pxor %mm3,%mm1 - pxor %mm7,%mm5 - pshufw $177,%mm1,%mm3 - pshufw $177,%mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm3,%mm3 - pxor %mm7,%mm7 - pcmpgtb %mm1,%mm3 - pcmpgtb %mm5,%mm7 - pand %mm2,%mm3 - pand %mm2,%mm7 - paddb %mm1,%mm1 - paddb %mm5,%mm5 - pxor %mm3,%mm1 - pxor %mm7,%mm5 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - movq %mm1,%mm3 - movq %mm5,%mm7 - pshufw $177,%mm1,%mm2 - pshufw $177,%mm5,%mm6 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - pslld $8,%mm1 - pslld $8,%mm5 - psrld $8,%mm3 - psrld $8,%mm7 - movq (%edi),%mm2 - movq 8(%edi),%mm6 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - movl -128(%ebp),%eax - pslld $16,%mm1 - pslld $16,%mm5 - movl -64(%ebp),%ebx - psrld $16,%mm3 - psrld $16,%mm7 - movl (%ebp),%ecx - pxor %mm1,%mm0 - pxor %mm5,%mm4 - movl 64(%ebp),%edx - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - jmp .L007loop -.align 16 -.L008out: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - ret -.size _sse_AES_decrypt_compact,.-_sse_AES_decrypt_compact -.hidden _x86_AES_decrypt -.type _x86_AES_decrypt,@function -.align 16 -_x86_AES_decrypt: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) -.align 16 -.L009loop: - movl %eax,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %dh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %ebx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,4(%esp) - - movl %ebx,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %ah,%edi - xorl 3(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %ecx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,8(%esp) - - movl %ecx,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %bh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - - movl 20(%esp),%edi - andl $255,%edx - movl (%ebp,%edx,8),%edx - movzbl %ch,%ecx - xorl 3(%ebp,%ecx,8),%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - xorl 2(%ebp,%ebx,8),%edx - movl 8(%esp),%ebx - shrl $24,%eax - xorl 1(%ebp,%eax,8),%edx - movl 4(%esp),%eax - - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb .L009loop - leal 2176(%ebp),%ebp - movl -128(%ebp),%edi - movl -96(%ebp),%esi - movl -64(%ebp),%edi - movl -32(%ebp),%esi - movl (%ebp),%edi - movl 32(%ebp),%esi - movl 64(%ebp),%edi - movl 96(%ebp),%esi - leal -128(%ebp),%ebp - movl %eax,%esi - andl $255,%esi - movzbl (%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl (%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl (%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ebx,%edi - shrl $24,%edi - movzbl (%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movzbl (%ebp,%esi,1),%esi - movzbl %ah,%edi - movzbl (%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl (%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $24,%edi - movzbl (%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movzbl (%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl (%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - movzbl (%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl (%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movzbl (%ebp,%edx,1),%edx - movzbl %ch,%ecx - movzbl (%ebp,%ecx,1),%ecx - shll $8,%ecx - xorl %ecx,%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - movzbl (%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - shrl $24,%eax - movzbl (%ebp,%eax,1),%eax - shll $24,%eax - xorl %eax,%edx - movl 4(%esp),%eax - leal -2048(%ebp),%ebp - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - ret -.align 64 -.LAES_Td: -.long 1353184337,1353184337 -.long 1399144830,1399144830 -.long 3282310938,3282310938 -.long 2522752826,2522752826 -.long 3412831035,3412831035 -.long 4047871263,4047871263 -.long 2874735276,2874735276 -.long 2466505547,2466505547 -.long 1442459680,1442459680 -.long 4134368941,4134368941 -.long 2440481928,2440481928 -.long 625738485,625738485 -.long 4242007375,4242007375 -.long 3620416197,3620416197 -.long 2151953702,2151953702 -.long 2409849525,2409849525 -.long 1230680542,1230680542 -.long 1729870373,1729870373 -.long 2551114309,2551114309 -.long 3787521629,3787521629 -.long 41234371,41234371 -.long 317738113,317738113 -.long 2744600205,2744600205 -.long 3338261355,3338261355 -.long 3881799427,3881799427 -.long 2510066197,2510066197 -.long 3950669247,3950669247 -.long 3663286933,3663286933 -.long 763608788,763608788 -.long 3542185048,3542185048 -.long 694804553,694804553 -.long 1154009486,1154009486 -.long 1787413109,1787413109 -.long 2021232372,2021232372 -.long 1799248025,1799248025 -.long 3715217703,3715217703 -.long 3058688446,3058688446 -.long 397248752,397248752 -.long 1722556617,1722556617 -.long 3023752829,3023752829 -.long 407560035,407560035 -.long 2184256229,2184256229 -.long 1613975959,1613975959 -.long 1165972322,1165972322 -.long 3765920945,3765920945 -.long 2226023355,2226023355 -.long 480281086,480281086 -.long 2485848313,2485848313 -.long 1483229296,1483229296 -.long 436028815,436028815 -.long 2272059028,2272059028 -.long 3086515026,3086515026 -.long 601060267,601060267 -.long 3791801202,3791801202 -.long 1468997603,1468997603 -.long 715871590,715871590 -.long 120122290,120122290 -.long 63092015,63092015 -.long 2591802758,2591802758 -.long 2768779219,2768779219 -.long 4068943920,4068943920 -.long 2997206819,2997206819 -.long 3127509762,3127509762 -.long 1552029421,1552029421 -.long 723308426,723308426 -.long 2461301159,2461301159 -.long 4042393587,4042393587 -.long 2715969870,2715969870 -.long 3455375973,3455375973 -.long 3586000134,3586000134 -.long 526529745,526529745 -.long 2331944644,2331944644 -.long 2639474228,2639474228 -.long 2689987490,2689987490 -.long 853641733,853641733 -.long 1978398372,1978398372 -.long 971801355,971801355 -.long 2867814464,2867814464 -.long 111112542,111112542 -.long 1360031421,1360031421 -.long 4186579262,4186579262 -.long 1023860118,1023860118 -.long 2919579357,2919579357 -.long 1186850381,1186850381 -.long 3045938321,3045938321 -.long 90031217,90031217 -.long 1876166148,1876166148 -.long 4279586912,4279586912 -.long 620468249,620468249 -.long 2548678102,2548678102 -.long 3426959497,3426959497 -.long 2006899047,2006899047 -.long 3175278768,3175278768 -.long 2290845959,2290845959 -.long 945494503,945494503 -.long 3689859193,3689859193 -.long 1191869601,1191869601 -.long 3910091388,3910091388 -.long 3374220536,3374220536 -.long 0,0 -.long 2206629897,2206629897 -.long 1223502642,1223502642 -.long 2893025566,2893025566 -.long 1316117100,1316117100 -.long 4227796733,4227796733 -.long 1446544655,1446544655 -.long 517320253,517320253 -.long 658058550,658058550 -.long 1691946762,1691946762 -.long 564550760,564550760 -.long 3511966619,3511966619 -.long 976107044,976107044 -.long 2976320012,2976320012 -.long 266819475,266819475 -.long 3533106868,3533106868 -.long 2660342555,2660342555 -.long 1338359936,1338359936 -.long 2720062561,2720062561 -.long 1766553434,1766553434 -.long 370807324,370807324 -.long 179999714,179999714 -.long 3844776128,3844776128 -.long 1138762300,1138762300 -.long 488053522,488053522 -.long 185403662,185403662 -.long 2915535858,2915535858 -.long 3114841645,3114841645 -.long 3366526484,3366526484 -.long 2233069911,2233069911 -.long 1275557295,1275557295 -.long 3151862254,3151862254 -.long 4250959779,4250959779 -.long 2670068215,2670068215 -.long 3170202204,3170202204 -.long 3309004356,3309004356 -.long 880737115,880737115 -.long 1982415755,1982415755 -.long 3703972811,3703972811 -.long 1761406390,1761406390 -.long 1676797112,1676797112 -.long 3403428311,3403428311 -.long 277177154,277177154 -.long 1076008723,1076008723 -.long 538035844,538035844 -.long 2099530373,2099530373 -.long 4164795346,4164795346 -.long 288553390,288553390 -.long 1839278535,1839278535 -.long 1261411869,1261411869 -.long 4080055004,4080055004 -.long 3964831245,3964831245 -.long 3504587127,3504587127 -.long 1813426987,1813426987 -.long 2579067049,2579067049 -.long 4199060497,4199060497 -.long 577038663,577038663 -.long 3297574056,3297574056 -.long 440397984,440397984 -.long 3626794326,3626794326 -.long 4019204898,4019204898 -.long 3343796615,3343796615 -.long 3251714265,3251714265 -.long 4272081548,4272081548 -.long 906744984,906744984 -.long 3481400742,3481400742 -.long 685669029,685669029 -.long 646887386,646887386 -.long 2764025151,2764025151 -.long 3835509292,3835509292 -.long 227702864,227702864 -.long 2613862250,2613862250 -.long 1648787028,1648787028 -.long 3256061430,3256061430 -.long 3904428176,3904428176 -.long 1593260334,1593260334 -.long 4121936770,4121936770 -.long 3196083615,3196083615 -.long 2090061929,2090061929 -.long 2838353263,2838353263 -.long 3004310991,3004310991 -.long 999926984,999926984 -.long 2809993232,2809993232 -.long 1852021992,1852021992 -.long 2075868123,2075868123 -.long 158869197,158869197 -.long 4095236462,4095236462 -.long 28809964,28809964 -.long 2828685187,2828685187 -.long 1701746150,1701746150 -.long 2129067946,2129067946 -.long 147831841,147831841 -.long 3873969647,3873969647 -.long 3650873274,3650873274 -.long 3459673930,3459673930 -.long 3557400554,3557400554 -.long 3598495785,3598495785 -.long 2947720241,2947720241 -.long 824393514,824393514 -.long 815048134,815048134 -.long 3227951669,3227951669 -.long 935087732,935087732 -.long 2798289660,2798289660 -.long 2966458592,2966458592 -.long 366520115,366520115 -.long 1251476721,1251476721 -.long 4158319681,4158319681 -.long 240176511,240176511 -.long 804688151,804688151 -.long 2379631990,2379631990 -.long 1303441219,1303441219 -.long 1414376140,1414376140 -.long 3741619940,3741619940 -.long 3820343710,3820343710 -.long 461924940,461924940 -.long 3089050817,3089050817 -.long 2136040774,2136040774 -.long 82468509,82468509 -.long 1563790337,1563790337 -.long 1937016826,1937016826 -.long 776014843,776014843 -.long 1511876531,1511876531 -.long 1389550482,1389550482 -.long 861278441,861278441 -.long 323475053,323475053 -.long 2355222426,2355222426 -.long 2047648055,2047648055 -.long 2383738969,2383738969 -.long 2302415851,2302415851 -.long 3995576782,3995576782 -.long 902390199,902390199 -.long 3991215329,3991215329 -.long 1018251130,1018251130 -.long 1507840668,1507840668 -.long 1064563285,1064563285 -.long 2043548696,2043548696 -.long 3208103795,3208103795 -.long 3939366739,3939366739 -.long 1537932639,1537932639 -.long 342834655,342834655 -.long 2262516856,2262516856 -.long 2180231114,2180231114 -.long 1053059257,1053059257 -.long 741614648,741614648 -.long 1598071746,1598071746 -.long 1925389590,1925389590 -.long 203809468,203809468 -.long 2336832552,2336832552 -.long 1100287487,1100287487 -.long 1895934009,1895934009 -.long 3736275976,3736275976 -.long 2632234200,2632234200 -.long 2428589668,2428589668 -.long 1636092795,1636092795 -.long 1890988757,1890988757 -.long 1952214088,1952214088 -.long 1113045200,1113045200 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.size _x86_AES_decrypt,.-_x86_AES_decrypt -.globl aes_nohw_decrypt -.hidden aes_nohw_decrypt -.type aes_nohw_decrypt,@function -.align 16 -aes_nohw_decrypt: -.L_aes_nohw_decrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 28(%esp),%edi - movl %esp,%eax - subl $36,%esp - andl $-64,%esp - leal -127(%edi),%ebx - subl %esp,%ebx - negl %ebx - andl $960,%ebx - subl %ebx,%esp - addl $4,%esp - movl %eax,28(%esp) - call .L010pic_point -.L010pic_point: - popl %ebp - leal OPENSSL_ia32cap_P-.L010pic_point(%ebp),%eax - leal .LAES_Td-.L010pic_point(%ebp),%ebp - leal 764(%esp),%ebx - subl %ebp,%ebx - andl $768,%ebx - leal 2176(%ebp,%ebx,1),%ebp - btl $25,(%eax) - jnc .L011x86 - movq (%esi),%mm0 - movq 8(%esi),%mm4 - call _sse_AES_decrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movq %mm0,(%esi) - movq %mm4,8(%esi) - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 16 -.L011x86: - movl %ebp,24(%esp) - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - call _x86_AES_decrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_nohw_decrypt,.-.L_aes_nohw_decrypt_begin -.globl aes_nohw_cbc_encrypt -.hidden aes_nohw_cbc_encrypt -.type aes_nohw_cbc_encrypt,@function -.align 16 -aes_nohw_cbc_encrypt: -.L_aes_nohw_cbc_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 28(%esp),%ecx - cmpl $0,%ecx - je .L012drop_out - call .L013pic_point -.L013pic_point: - popl %ebp - leal OPENSSL_ia32cap_P-.L013pic_point(%ebp),%eax - cmpl $0,40(%esp) - leal .LAES_Te-.L013pic_point(%ebp),%ebp - jne .L014picked_te - leal .LAES_Td-.LAES_Te(%ebp),%ebp -.L014picked_te: - pushfl - cld - cmpl $512,%ecx - jb .L015slow_way - testl $15,%ecx - jnz .L015slow_way - btl $28,(%eax) - jc .L015slow_way - leal -324(%esp),%esi - andl $-64,%esi - movl %ebp,%eax - leal 2304(%ebp),%ebx - movl %esi,%edx - andl $4095,%eax - andl $4095,%ebx - andl $4095,%edx - cmpl %ebx,%edx - jb .L016tbl_break_out - subl %ebx,%edx - subl %edx,%esi - jmp .L017tbl_ok -.align 4 -.L016tbl_break_out: - subl %eax,%edx - andl $4095,%edx - addl $384,%edx - subl %edx,%esi -.align 4 -.L017tbl_ok: - leal 24(%esp),%edx - xchgl %esi,%esp - addl $4,%esp - movl %ebp,24(%esp) - movl %esi,28(%esp) - movl (%edx),%eax - movl 4(%edx),%ebx - movl 12(%edx),%edi - movl 16(%edx),%esi - movl 20(%edx),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,40(%esp) - movl %edi,44(%esp) - movl %esi,48(%esp) - movl $0,316(%esp) - movl %edi,%ebx - movl $61,%ecx - subl %ebp,%ebx - movl %edi,%esi - andl $4095,%ebx - leal 76(%esp),%edi - cmpl $2304,%ebx - jb .L018do_copy - cmpl $3852,%ebx - jb .L019skip_copy -.align 4 -.L018do_copy: - movl %edi,44(%esp) -.long 2784229001 -.L019skip_copy: - movl $16,%edi -.align 4 -.L020prefetch_tbl: - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%esi - leal 128(%ebp),%ebp - subl $1,%edi - jnz .L020prefetch_tbl - subl $2048,%ebp - movl 32(%esp),%esi - movl 48(%esp),%edi - cmpl $0,%edx - je .L021fast_decrypt - movl (%edi),%eax - movl 4(%edi),%ebx -.align 16 -.L022fast_enc_loop: - movl 8(%edi),%ecx - movl 12(%edi),%edx - xorl (%esi),%eax - xorl 4(%esi),%ebx - xorl 8(%esi),%ecx - xorl 12(%esi),%edx - movl 44(%esp),%edi - call _x86_AES_encrypt - movl 32(%esp),%esi - movl 36(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - leal 16(%esi),%esi - movl 40(%esp),%ecx - movl %esi,32(%esp) - leal 16(%edi),%edx - movl %edx,36(%esp) - subl $16,%ecx - movl %ecx,40(%esp) - jnz .L022fast_enc_loop - movl 48(%esp),%esi - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - cmpl $0,316(%esp) - movl 44(%esp),%edi - je .L023skip_ezero - movl $60,%ecx - xorl %eax,%eax -.align 4 -.long 2884892297 -.L023skip_ezero: - movl 28(%esp),%esp - popfl -.L012drop_out: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L021fast_decrypt: - cmpl 36(%esp),%esi - je .L024fast_dec_in_place - movl %edi,52(%esp) -.align 4 -.align 16 -.L025fast_dec_loop: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl 44(%esp),%edi - call _x86_AES_decrypt - movl 52(%esp),%edi - movl 40(%esp),%esi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 36(%esp),%edi - movl 32(%esp),%esi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 40(%esp),%ecx - movl %esi,52(%esp) - leal 16(%esi),%esi - movl %esi,32(%esp) - leal 16(%edi),%edi - movl %edi,36(%esp) - subl $16,%ecx - movl %ecx,40(%esp) - jnz .L025fast_dec_loop - movl 52(%esp),%edi - movl 48(%esp),%esi - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - jmp .L026fast_dec_out -.align 16 -.L024fast_dec_in_place: -.L027fast_dec_in_place_loop: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - leal 60(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 44(%esp),%edi - call _x86_AES_decrypt - movl 48(%esp),%edi - movl 36(%esp),%esi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - leal 16(%esi),%esi - movl %esi,36(%esp) - leal 60(%esp),%esi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 32(%esp),%esi - movl 40(%esp),%ecx - leal 16(%esi),%esi - movl %esi,32(%esp) - subl $16,%ecx - movl %ecx,40(%esp) - jnz .L027fast_dec_in_place_loop -.align 4 -.L026fast_dec_out: - cmpl $0,316(%esp) - movl 44(%esp),%edi - je .L028skip_dzero - movl $60,%ecx - xorl %eax,%eax -.align 4 -.long 2884892297 -.L028skip_dzero: - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L015slow_way: - movl (%eax),%eax - movl 36(%esp),%edi - leal -80(%esp),%esi - andl $-64,%esi - leal -143(%edi),%ebx - subl %esi,%ebx - negl %ebx - andl $960,%ebx - subl %ebx,%esi - leal 768(%esi),%ebx - subl %ebp,%ebx - andl $768,%ebx - leal 2176(%ebp,%ebx,1),%ebp - leal 24(%esp),%edx - xchgl %esi,%esp - addl $4,%esp - movl %ebp,24(%esp) - movl %esi,28(%esp) - movl %eax,52(%esp) - movl (%edx),%eax - movl 4(%edx),%ebx - movl 16(%edx),%esi - movl 20(%edx),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,40(%esp) - movl %edi,44(%esp) - movl %esi,48(%esp) - movl %esi,%edi - movl %eax,%esi - cmpl $0,%edx - je .L029slow_decrypt - cmpl $16,%ecx - movl %ebx,%edx - jb .L030slow_enc_tail - btl $25,52(%esp) - jnc .L031slow_enc_x86 - movq (%edi),%mm0 - movq 8(%edi),%mm4 -.align 16 -.L032slow_enc_loop_sse: - pxor (%esi),%mm0 - pxor 8(%esi),%mm4 - movl 44(%esp),%edi - call _sse_AES_encrypt_compact - movl 32(%esp),%esi - movl 36(%esp),%edi - movl 40(%esp),%ecx - movq %mm0,(%edi) - movq %mm4,8(%edi) - leal 16(%esi),%esi - movl %esi,32(%esp) - leal 16(%edi),%edx - movl %edx,36(%esp) - subl $16,%ecx - cmpl $16,%ecx - movl %ecx,40(%esp) - jae .L032slow_enc_loop_sse - testl $15,%ecx - jnz .L030slow_enc_tail - movl 48(%esp),%esi - movq %mm0,(%esi) - movq %mm4,8(%esi) - emms - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L031slow_enc_x86: - movl (%edi),%eax - movl 4(%edi),%ebx -.align 4 -.L033slow_enc_loop_x86: - movl 8(%edi),%ecx - movl 12(%edi),%edx - xorl (%esi),%eax - xorl 4(%esi),%ebx - xorl 8(%esi),%ecx - xorl 12(%esi),%edx - movl 44(%esp),%edi - call _x86_AES_encrypt_compact - movl 32(%esp),%esi - movl 36(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 40(%esp),%ecx - leal 16(%esi),%esi - movl %esi,32(%esp) - leal 16(%edi),%edx - movl %edx,36(%esp) - subl $16,%ecx - cmpl $16,%ecx - movl %ecx,40(%esp) - jae .L033slow_enc_loop_x86 - testl $15,%ecx - jnz .L030slow_enc_tail - movl 48(%esp),%esi - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L030slow_enc_tail: - emms - movl %edx,%edi - movl $16,%ebx - subl %ecx,%ebx - cmpl %esi,%edi - je .L034enc_in_place -.align 4 -.long 2767451785 - jmp .L035enc_skip_in_place -.L034enc_in_place: - leal (%edi,%ecx,1),%edi -.L035enc_skip_in_place: - movl %ebx,%ecx - xorl %eax,%eax -.align 4 -.long 2868115081 - movl 48(%esp),%edi - movl %edx,%esi - movl (%edi),%eax - movl 4(%edi),%ebx - movl $16,40(%esp) - jmp .L033slow_enc_loop_x86 -.align 16 -.L029slow_decrypt: - btl $25,52(%esp) - jnc .L036slow_dec_loop_x86 -.align 4 -.L037slow_dec_loop_sse: - movq (%esi),%mm0 - movq 8(%esi),%mm4 - movl 44(%esp),%edi - call _sse_AES_decrypt_compact - movl 32(%esp),%esi - leal 60(%esp),%eax - movl 36(%esp),%ebx - movl 40(%esp),%ecx - movl 48(%esp),%edi - movq (%esi),%mm1 - movq 8(%esi),%mm5 - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - movq %mm1,(%edi) - movq %mm5,8(%edi) - subl $16,%ecx - jc .L038slow_dec_partial_sse - movq %mm0,(%ebx) - movq %mm4,8(%ebx) - leal 16(%ebx),%ebx - movl %ebx,36(%esp) - leal 16(%esi),%esi - movl %esi,32(%esp) - movl %ecx,40(%esp) - jnz .L037slow_dec_loop_sse - emms - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L038slow_dec_partial_sse: - movq %mm0,(%eax) - movq %mm4,8(%eax) - emms - addl $16,%ecx - movl %ebx,%edi - movl %eax,%esi -.align 4 -.long 2767451785 - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L036slow_dec_loop_x86: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - leal 60(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 44(%esp),%edi - call _x86_AES_decrypt_compact - movl 48(%esp),%edi - movl 40(%esp),%esi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - subl $16,%esi - jc .L039slow_dec_partial_x86 - movl %esi,40(%esp) - movl 36(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - leal 16(%esi),%esi - movl %esi,36(%esp) - leal 60(%esp),%esi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 32(%esp),%esi - leal 16(%esi),%esi - movl %esi,32(%esp) - jnz .L036slow_dec_loop_x86 - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 16 -.L039slow_dec_partial_x86: - leal 60(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - movl 32(%esp),%esi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 40(%esp),%ecx - movl 36(%esp),%edi - leal 60(%esp),%esi -.align 4 -.long 2767451785 - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_nohw_cbc_encrypt,.-.L_aes_nohw_cbc_encrypt_begin -.hidden _x86_AES_set_encrypt_key -.type _x86_AES_set_encrypt_key,@function -.align 16 -_x86_AES_set_encrypt_key: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 24(%esp),%esi - movl 32(%esp),%edi - testl $-1,%esi - jz .L040badpointer - testl $-1,%edi - jz .L040badpointer - call .L041pic_point -.L041pic_point: - popl %ebp - leal .LAES_Te-.L041pic_point(%ebp),%ebp - leal 2176(%ebp),%ebp - movl -128(%ebp),%eax - movl -96(%ebp),%ebx - movl -64(%ebp),%ecx - movl -32(%ebp),%edx - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%edx - movl 28(%esp),%ecx - cmpl $128,%ecx - je .L04210rounds - cmpl $192,%ecx - je .L04312rounds - cmpl $256,%ecx - je .L04414rounds - movl $-2,%eax - jmp .L045exit -.L04210rounds: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - xorl %ecx,%ecx - jmp .L04610shortcut -.align 4 -.L04710loop: - movl (%edi),%eax - movl 12(%edi),%edx -.L04610shortcut: - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - xorl 896(%ebp,%ecx,4),%eax - movl %eax,16(%edi) - xorl 4(%edi),%eax - movl %eax,20(%edi) - xorl 8(%edi),%eax - movl %eax,24(%edi) - xorl 12(%edi),%eax - movl %eax,28(%edi) - incl %ecx - addl $16,%edi - cmpl $10,%ecx - jl .L04710loop - movl $10,80(%edi) - xorl %eax,%eax - jmp .L045exit -.L04312rounds: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 16(%esi),%ecx - movl 20(%esi),%edx - movl %ecx,16(%edi) - movl %edx,20(%edi) - xorl %ecx,%ecx - jmp .L04812shortcut -.align 4 -.L04912loop: - movl (%edi),%eax - movl 20(%edi),%edx -.L04812shortcut: - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - xorl 896(%ebp,%ecx,4),%eax - movl %eax,24(%edi) - xorl 4(%edi),%eax - movl %eax,28(%edi) - xorl 8(%edi),%eax - movl %eax,32(%edi) - xorl 12(%edi),%eax - movl %eax,36(%edi) - cmpl $7,%ecx - je .L05012break - incl %ecx - xorl 16(%edi),%eax - movl %eax,40(%edi) - xorl 20(%edi),%eax - movl %eax,44(%edi) - addl $24,%edi - jmp .L04912loop -.L05012break: - movl $12,72(%edi) - xorl %eax,%eax - jmp .L045exit -.L04414rounds: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - movl %eax,16(%edi) - movl %ebx,20(%edi) - movl %ecx,24(%edi) - movl %edx,28(%edi) - xorl %ecx,%ecx - jmp .L05114shortcut -.align 4 -.L05214loop: - movl 28(%edi),%edx -.L05114shortcut: - movl (%edi),%eax - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - xorl 896(%ebp,%ecx,4),%eax - movl %eax,32(%edi) - xorl 4(%edi),%eax - movl %eax,36(%edi) - xorl 8(%edi),%eax - movl %eax,40(%edi) - xorl 12(%edi),%eax - movl %eax,44(%edi) - cmpl $6,%ecx - je .L05314break - incl %ecx - movl %eax,%edx - movl 16(%edi),%eax - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - shll $8,%ebx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $16,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $24,%ebx - xorl %ebx,%eax - movl %eax,48(%edi) - xorl 20(%edi),%eax - movl %eax,52(%edi) - xorl 24(%edi),%eax - movl %eax,56(%edi) - xorl 28(%edi),%eax - movl %eax,60(%edi) - addl $32,%edi - jmp .L05214loop -.L05314break: - movl $14,48(%edi) - xorl %eax,%eax - jmp .L045exit -.L040badpointer: - movl $-1,%eax -.L045exit: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size _x86_AES_set_encrypt_key,.-_x86_AES_set_encrypt_key -.globl aes_nohw_set_encrypt_key -.hidden aes_nohw_set_encrypt_key -.type aes_nohw_set_encrypt_key,@function -.align 16 -aes_nohw_set_encrypt_key: -.L_aes_nohw_set_encrypt_key_begin: - call _x86_AES_set_encrypt_key - ret -.size aes_nohw_set_encrypt_key,.-.L_aes_nohw_set_encrypt_key_begin -.globl aes_nohw_set_decrypt_key -.hidden aes_nohw_set_decrypt_key -.type aes_nohw_set_decrypt_key,@function -.align 16 -aes_nohw_set_decrypt_key: -.L_aes_nohw_set_decrypt_key_begin: - call _x86_AES_set_encrypt_key - cmpl $0,%eax - je .L054proceed - ret -.L054proceed: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 28(%esp),%esi - movl 240(%esi),%ecx - leal (,%ecx,4),%ecx - leal (%esi,%ecx,4),%edi -.align 4 -.L055invert: - movl (%esi),%eax - movl 4(%esi),%ebx - movl (%edi),%ecx - movl 4(%edi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,(%esi) - movl %edx,4(%esi) - movl 8(%esi),%eax - movl 12(%esi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,8(%edi) - movl %ebx,12(%edi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - addl $16,%esi - subl $16,%edi - cmpl %edi,%esi - jne .L055invert - movl 28(%esp),%edi - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,28(%esp) - movl 16(%edi),%eax -.align 4 -.L056permute: - addl $16,%edi - movl $2155905152,%ebp - andl %eax,%ebp - leal (%eax,%eax,1),%ebx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %esi,%ebx - movl $2155905152,%ebp - andl %ebx,%ebp - leal (%ebx,%ebx,1),%ecx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %eax,%ebx - xorl %esi,%ecx - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edx - movl %ebp,%esi - shrl $7,%ebp - xorl %eax,%ecx - subl %ebp,%esi - andl $4278124286,%edx - andl $454761243,%esi - roll $8,%eax - xorl %esi,%edx - movl 4(%edi),%ebp - xorl %ebx,%eax - xorl %edx,%ebx - xorl %ecx,%eax - roll $24,%ebx - xorl %edx,%ecx - xorl %edx,%eax - roll $16,%ecx - xorl %ebx,%eax - roll $8,%edx - xorl %ecx,%eax - movl %ebp,%ebx - xorl %edx,%eax - movl %eax,(%edi) - movl $2155905152,%ebp - andl %ebx,%ebp - leal (%ebx,%ebx,1),%ecx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %esi,%ecx - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %ebx,%ecx - xorl %esi,%edx - movl $2155905152,%ebp - andl %edx,%ebp - leal (%edx,%edx,1),%eax - movl %ebp,%esi - shrl $7,%ebp - xorl %ebx,%edx - subl %ebp,%esi - andl $4278124286,%eax - andl $454761243,%esi - roll $8,%ebx - xorl %esi,%eax - movl 8(%edi),%ebp - xorl %ecx,%ebx - xorl %eax,%ecx - xorl %edx,%ebx - roll $24,%ecx - xorl %eax,%edx - xorl %eax,%ebx - roll $16,%edx - xorl %ecx,%ebx - roll $8,%eax - xorl %edx,%ebx - movl %ebp,%ecx - xorl %eax,%ebx - movl %ebx,4(%edi) - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %esi,%edx - movl $2155905152,%ebp - andl %edx,%ebp - leal (%edx,%edx,1),%eax - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%eax - andl $454761243,%esi - xorl %ecx,%edx - xorl %esi,%eax - movl $2155905152,%ebp - andl %eax,%ebp - leal (%eax,%eax,1),%ebx - movl %ebp,%esi - shrl $7,%ebp - xorl %ecx,%eax - subl %ebp,%esi - andl $4278124286,%ebx - andl $454761243,%esi - roll $8,%ecx - xorl %esi,%ebx - movl 12(%edi),%ebp - xorl %edx,%ecx - xorl %ebx,%edx - xorl %eax,%ecx - roll $24,%edx - xorl %ebx,%eax - xorl %ebx,%ecx - roll $16,%eax - xorl %edx,%ecx - roll $8,%ebx - xorl %eax,%ecx - movl %ebp,%edx - xorl %ebx,%ecx - movl %ecx,8(%edi) - movl $2155905152,%ebp - andl %edx,%ebp - leal (%edx,%edx,1),%eax - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%eax - andl $454761243,%esi - xorl %esi,%eax - movl $2155905152,%ebp - andl %eax,%ebp - leal (%eax,%eax,1),%ebx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %edx,%eax - xorl %esi,%ebx - movl $2155905152,%ebp - andl %ebx,%ebp - leal (%ebx,%ebx,1),%ecx - movl %ebp,%esi - shrl $7,%ebp - xorl %edx,%ebx - subl %ebp,%esi - andl $4278124286,%ecx - andl $454761243,%esi - roll $8,%edx - xorl %esi,%ecx - movl 16(%edi),%ebp - xorl %eax,%edx - xorl %ecx,%eax - xorl %ebx,%edx - roll $24,%eax - xorl %ecx,%ebx - xorl %ecx,%edx - roll $16,%ebx - xorl %eax,%edx - roll $8,%ecx - xorl %ebx,%edx - movl %ebp,%eax - xorl %ecx,%edx - movl %edx,12(%edi) - cmpl 28(%esp),%edi - jb .L056permute - xorl %eax,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_nohw_set_decrypt_key,.-.L_aes_nohw_set_decrypt_key_begin -.byte 65,69,83,32,102,111,114,32,120,56,54,44,32,67,82,89 -.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114 -.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/aesni-x86.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/aesni-x86.S deleted file mode 100644 index a418869701..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/aesni-x86.S +++ /dev/null @@ -1,2513 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -#ifdef BORINGSSL_DISPATCH_TEST -#endif -.globl aes_hw_encrypt -.hidden aes_hw_encrypt -.type aes_hw_encrypt,@function -.align 16 -aes_hw_encrypt: -.L_aes_hw_encrypt_begin: -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call .L000pic -.L000pic: - popl %ebx - leal BORINGSSL_function_hit+1-.L000pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 4(%esp),%eax - movl 12(%esp),%edx - movups (%eax),%xmm2 - movl 240(%edx),%ecx - movl 8(%esp),%eax - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L001enc1_loop_1: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L001enc1_loop_1 -.byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%eax) - pxor %xmm2,%xmm2 - ret -.size aes_hw_encrypt,.-.L_aes_hw_encrypt_begin -.globl aes_hw_decrypt -.hidden aes_hw_decrypt -.type aes_hw_decrypt,@function -.align 16 -aes_hw_decrypt: -.L_aes_hw_decrypt_begin: - movl 4(%esp),%eax - movl 12(%esp),%edx - movups (%eax),%xmm2 - movl 240(%edx),%ecx - movl 8(%esp),%eax - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L002dec1_loop_2: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L002dec1_loop_2 -.byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%eax) - pxor %xmm2,%xmm2 - ret -.size aes_hw_decrypt,.-.L_aes_hw_decrypt_begin -.hidden _aesni_encrypt2 -.type _aesni_encrypt2,@function -.align 16 -_aesni_encrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L003enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L003enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - ret -.size _aesni_encrypt2,.-_aesni_encrypt2 -.hidden _aesni_decrypt2 -.type _aesni_decrypt2,@function -.align 16 -_aesni_decrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L004dec2_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L004dec2_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 - ret -.size _aesni_decrypt2,.-_aesni_decrypt2 -.hidden _aesni_encrypt3 -.type _aesni_encrypt3,@function -.align 16 -_aesni_encrypt3: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L005enc3_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L005enc3_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 - ret -.size _aesni_encrypt3,.-_aesni_encrypt3 -.hidden _aesni_decrypt3 -.type _aesni_decrypt3,@function -.align 16 -_aesni_decrypt3: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L006dec3_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L006dec3_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 - ret -.size _aesni_decrypt3,.-_aesni_decrypt3 -.hidden _aesni_encrypt4 -.type _aesni_encrypt4,@function -.align 16 -_aesni_encrypt4: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - shll $4,%ecx - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -.L007enc4_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L007enc4_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 - ret -.size _aesni_encrypt4,.-_aesni_encrypt4 -.hidden _aesni_decrypt4 -.type _aesni_decrypt4,@function -.align 16 -_aesni_decrypt4: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - shll $4,%ecx - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -.L008dec4_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L008dec4_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 - ret -.size _aesni_decrypt4,.-_aesni_decrypt4 -.hidden _aesni_encrypt6 -.type _aesni_encrypt6,@function -.align 16 -_aesni_encrypt6: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,220,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,220,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 102,15,56,220,225 - pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp .L009_aesni_encrypt6_inner -.align 16 -.L010enc6_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.L009_aesni_encrypt6_inner: -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.L_aesni_encrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L010enc6_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 -.byte 102,15,56,221,240 -.byte 102,15,56,221,248 - ret -.size _aesni_encrypt6,.-_aesni_encrypt6 -.hidden _aesni_decrypt6 -.type _aesni_decrypt6,@function -.align 16 -_aesni_decrypt6: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,222,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,222,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 102,15,56,222,225 - pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp .L011_aesni_decrypt6_inner -.align 16 -.L012dec6_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.L011_aesni_decrypt6_inner: -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.L_aesni_decrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L012dec6_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 -.byte 102,15,56,223,240 -.byte 102,15,56,223,248 - ret -.size _aesni_decrypt6,.-_aesni_decrypt6 -.globl aes_hw_ecb_encrypt -.hidden aes_hw_ecb_encrypt -.type aes_hw_ecb_encrypt,@function -.align 16 -aes_hw_ecb_encrypt: -.L_aes_hw_ecb_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - andl $-16,%eax - jz .L013ecb_ret - movl 240(%edx),%ecx - testl %ebx,%ebx - jz .L014ecb_decrypt - movl %edx,%ebp - movl %ecx,%ebx - cmpl $96,%eax - jb .L015ecb_enc_tail - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - movdqu 48(%esi),%xmm5 - movdqu 64(%esi),%xmm6 - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi - subl $96,%eax - jmp .L016ecb_enc_loop6_enter -.align 16 -.L017ecb_enc_loop6: - movups %xmm2,(%edi) - movdqu (%esi),%xmm2 - movups %xmm3,16(%edi) - movdqu 16(%esi),%xmm3 - movups %xmm4,32(%edi) - movdqu 32(%esi),%xmm4 - movups %xmm5,48(%edi) - movdqu 48(%esi),%xmm5 - movups %xmm6,64(%edi) - movdqu 64(%esi),%xmm6 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi -.L016ecb_enc_loop6_enter: - call _aesni_encrypt6 - movl %ebp,%edx - movl %ebx,%ecx - subl $96,%eax - jnc .L017ecb_enc_loop6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - movups %xmm7,80(%edi) - leal 96(%edi),%edi - addl $96,%eax - jz .L013ecb_ret -.L015ecb_enc_tail: - movups (%esi),%xmm2 - cmpl $32,%eax - jb .L018ecb_enc_one - movups 16(%esi),%xmm3 - je .L019ecb_enc_two - movups 32(%esi),%xmm4 - cmpl $64,%eax - jb .L020ecb_enc_three - movups 48(%esi),%xmm5 - je .L021ecb_enc_four - movups 64(%esi),%xmm6 - xorps %xmm7,%xmm7 - call _aesni_encrypt6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - jmp .L013ecb_ret -.align 16 -.L018ecb_enc_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L022enc1_loop_3: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L022enc1_loop_3 -.byte 102,15,56,221,209 - movups %xmm2,(%edi) - jmp .L013ecb_ret -.align 16 -.L019ecb_enc_two: - call _aesni_encrypt2 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - jmp .L013ecb_ret -.align 16 -.L020ecb_enc_three: - call _aesni_encrypt3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - jmp .L013ecb_ret -.align 16 -.L021ecb_enc_four: - call _aesni_encrypt4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - jmp .L013ecb_ret -.align 16 -.L014ecb_decrypt: - movl %edx,%ebp - movl %ecx,%ebx - cmpl $96,%eax - jb .L023ecb_dec_tail - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - movdqu 48(%esi),%xmm5 - movdqu 64(%esi),%xmm6 - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi - subl $96,%eax - jmp .L024ecb_dec_loop6_enter -.align 16 -.L025ecb_dec_loop6: - movups %xmm2,(%edi) - movdqu (%esi),%xmm2 - movups %xmm3,16(%edi) - movdqu 16(%esi),%xmm3 - movups %xmm4,32(%edi) - movdqu 32(%esi),%xmm4 - movups %xmm5,48(%edi) - movdqu 48(%esi),%xmm5 - movups %xmm6,64(%edi) - movdqu 64(%esi),%xmm6 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi -.L024ecb_dec_loop6_enter: - call _aesni_decrypt6 - movl %ebp,%edx - movl %ebx,%ecx - subl $96,%eax - jnc .L025ecb_dec_loop6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - movups %xmm7,80(%edi) - leal 96(%edi),%edi - addl $96,%eax - jz .L013ecb_ret -.L023ecb_dec_tail: - movups (%esi),%xmm2 - cmpl $32,%eax - jb .L026ecb_dec_one - movups 16(%esi),%xmm3 - je .L027ecb_dec_two - movups 32(%esi),%xmm4 - cmpl $64,%eax - jb .L028ecb_dec_three - movups 48(%esi),%xmm5 - je .L029ecb_dec_four - movups 64(%esi),%xmm6 - xorps %xmm7,%xmm7 - call _aesni_decrypt6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - jmp .L013ecb_ret -.align 16 -.L026ecb_dec_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L030dec1_loop_4: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L030dec1_loop_4 -.byte 102,15,56,223,209 - movups %xmm2,(%edi) - jmp .L013ecb_ret -.align 16 -.L027ecb_dec_two: - call _aesni_decrypt2 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - jmp .L013ecb_ret -.align 16 -.L028ecb_dec_three: - call _aesni_decrypt3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - jmp .L013ecb_ret -.align 16 -.L029ecb_dec_four: - call _aesni_decrypt4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) -.L013ecb_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_ecb_encrypt,.-.L_aes_hw_ecb_encrypt_begin -.globl aes_hw_ccm64_encrypt_blocks -.hidden aes_hw_ccm64_encrypt_blocks -.type aes_hw_ccm64_encrypt_blocks,@function -.align 16 -aes_hw_ccm64_encrypt_blocks: -.L_aes_hw_ccm64_encrypt_blocks_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl 40(%esp),%ecx - movl %esp,%ebp - subl $60,%esp - andl $-16,%esp - movl %ebp,48(%esp) - movdqu (%ebx),%xmm7 - movdqu (%ecx),%xmm3 - movl 240(%edx),%ecx - movl $202182159,(%esp) - movl $134810123,4(%esp) - movl $67438087,8(%esp) - movl $66051,12(%esp) - movl $1,%ebx - xorl %ebp,%ebp - movl %ebx,16(%esp) - movl %ebp,20(%esp) - movl %ebp,24(%esp) - movl %ebp,28(%esp) - shll $4,%ecx - movl $16,%ebx - leal (%edx),%ebp - movdqa (%esp),%xmm5 - movdqa %xmm7,%xmm2 - leal 32(%edx,%ecx,1),%edx - subl %ecx,%ebx -.byte 102,15,56,0,253 -.L031ccm64_enc_outer: - movups (%ebp),%xmm0 - movl %ebx,%ecx - movups (%esi),%xmm6 - xorps %xmm0,%xmm2 - movups 16(%ebp),%xmm1 - xorps %xmm6,%xmm0 - xorps %xmm0,%xmm3 - movups 32(%ebp),%xmm0 -.L032ccm64_enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L032ccm64_enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - paddq 16(%esp),%xmm7 - decl %eax -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - leal 16(%esi),%esi - xorps %xmm2,%xmm6 - movdqa %xmm7,%xmm2 - movups %xmm6,(%edi) -.byte 102,15,56,0,213 - leal 16(%edi),%edi - jnz .L031ccm64_enc_outer - movl 48(%esp),%esp - movl 40(%esp),%edi - movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_ccm64_encrypt_blocks,.-.L_aes_hw_ccm64_encrypt_blocks_begin -.globl aes_hw_ccm64_decrypt_blocks -.hidden aes_hw_ccm64_decrypt_blocks -.type aes_hw_ccm64_decrypt_blocks,@function -.align 16 -aes_hw_ccm64_decrypt_blocks: -.L_aes_hw_ccm64_decrypt_blocks_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl 40(%esp),%ecx - movl %esp,%ebp - subl $60,%esp - andl $-16,%esp - movl %ebp,48(%esp) - movdqu (%ebx),%xmm7 - movdqu (%ecx),%xmm3 - movl 240(%edx),%ecx - movl $202182159,(%esp) - movl $134810123,4(%esp) - movl $67438087,8(%esp) - movl $66051,12(%esp) - movl $1,%ebx - xorl %ebp,%ebp - movl %ebx,16(%esp) - movl %ebp,20(%esp) - movl %ebp,24(%esp) - movl %ebp,28(%esp) - movdqa (%esp),%xmm5 - movdqa %xmm7,%xmm2 - movl %edx,%ebp - movl %ecx,%ebx -.byte 102,15,56,0,253 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L033enc1_loop_5: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L033enc1_loop_5 -.byte 102,15,56,221,209 - shll $4,%ebx - movl $16,%ecx - movups (%esi),%xmm6 - paddq 16(%esp),%xmm7 - leal 16(%esi),%esi - subl %ebx,%ecx - leal 32(%ebp,%ebx,1),%edx - movl %ecx,%ebx - jmp .L034ccm64_dec_outer -.align 16 -.L034ccm64_dec_outer: - xorps %xmm2,%xmm6 - movdqa %xmm7,%xmm2 - movups %xmm6,(%edi) - leal 16(%edi),%edi -.byte 102,15,56,0,213 - subl $1,%eax - jz .L035ccm64_dec_break - movups (%ebp),%xmm0 - movl %ebx,%ecx - movups 16(%ebp),%xmm1 - xorps %xmm0,%xmm6 - xorps %xmm0,%xmm2 - xorps %xmm6,%xmm3 - movups 32(%ebp),%xmm0 -.L036ccm64_dec2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L036ccm64_dec2_loop - movups (%esi),%xmm6 - paddq 16(%esp),%xmm7 -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - leal 16(%esi),%esi - jmp .L034ccm64_dec_outer -.align 16 -.L035ccm64_dec_break: - movl 240(%ebp),%ecx - movl %ebp,%edx - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm6 - leal 32(%edx),%edx - xorps %xmm6,%xmm3 -.L037enc1_loop_6: -.byte 102,15,56,220,217 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L037enc1_loop_6 -.byte 102,15,56,221,217 - movl 48(%esp),%esp - movl 40(%esp),%edi - movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_ccm64_decrypt_blocks,.-.L_aes_hw_ccm64_decrypt_blocks_begin -.globl aes_hw_ctr32_encrypt_blocks -.hidden aes_hw_ctr32_encrypt_blocks -.type aes_hw_ctr32_encrypt_blocks,@function -.align 16 -aes_hw_ctr32_encrypt_blocks: -.L_aes_hw_ctr32_encrypt_blocks_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call .L038pic -.L038pic: - popl %ebx - leal BORINGSSL_function_hit+0-.L038pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl %esp,%ebp - subl $88,%esp - andl $-16,%esp - movl %ebp,80(%esp) - cmpl $1,%eax - je .L039ctr32_one_shortcut - movdqu (%ebx),%xmm7 - movl $202182159,(%esp) - movl $134810123,4(%esp) - movl $67438087,8(%esp) - movl $66051,12(%esp) - movl $6,%ecx - xorl %ebp,%ebp - movl %ecx,16(%esp) - movl %ecx,20(%esp) - movl %ecx,24(%esp) - movl %ebp,28(%esp) -.byte 102,15,58,22,251,3 -.byte 102,15,58,34,253,3 - movl 240(%edx),%ecx - bswap %ebx - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movdqa (%esp),%xmm2 -.byte 102,15,58,34,195,0 - leal 3(%ebx),%ebp -.byte 102,15,58,34,205,0 - incl %ebx -.byte 102,15,58,34,195,1 - incl %ebp -.byte 102,15,58,34,205,1 - incl %ebx -.byte 102,15,58,34,195,2 - incl %ebp -.byte 102,15,58,34,205,2 - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - movdqu (%edx),%xmm6 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 - pshufd $192,%xmm0,%xmm2 - pshufd $128,%xmm0,%xmm3 - cmpl $6,%eax - jb .L040ctr32_tail - pxor %xmm6,%xmm7 - shll $4,%ecx - movl $16,%ebx - movdqa %xmm7,32(%esp) - movl %edx,%ebp - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - subl $6,%eax - jmp .L041ctr32_loop6 -.align 16 -.L041ctr32_loop6: - pshufd $64,%xmm0,%xmm4 - movdqa 32(%esp),%xmm0 - pshufd $192,%xmm1,%xmm5 - pxor %xmm0,%xmm2 - pshufd $128,%xmm1,%xmm6 - pxor %xmm0,%xmm3 - pshufd $64,%xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 -.byte 102,15,56,220,209 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 -.byte 102,15,56,220,217 - movups 32(%ebp),%xmm0 - movl %ebx,%ecx -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 - call .L_aesni_encrypt6_enter - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps %xmm1,%xmm2 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm3 - movups %xmm2,(%edi) - movdqa 16(%esp),%xmm0 - xorps %xmm1,%xmm4 - movdqa 64(%esp),%xmm1 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - paddd %xmm0,%xmm1 - paddd 48(%esp),%xmm0 - movdqa (%esp),%xmm2 - movups 48(%esi),%xmm3 - movups 64(%esi),%xmm4 - xorps %xmm3,%xmm5 - movups 80(%esi),%xmm3 - leal 96(%esi),%esi - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - xorps %xmm4,%xmm6 - movups %xmm5,48(%edi) - xorps %xmm3,%xmm7 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 - movups %xmm6,64(%edi) - pshufd $192,%xmm0,%xmm2 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - pshufd $128,%xmm0,%xmm3 - subl $6,%eax - jnc .L041ctr32_loop6 - addl $6,%eax - jz .L042ctr32_ret - movdqu (%ebp),%xmm7 - movl %ebp,%edx - pxor 32(%esp),%xmm7 - movl 240(%ebp),%ecx -.L040ctr32_tail: - por %xmm7,%xmm2 - cmpl $2,%eax - jb .L043ctr32_one - pshufd $64,%xmm0,%xmm4 - por %xmm7,%xmm3 - je .L044ctr32_two - pshufd $192,%xmm1,%xmm5 - por %xmm7,%xmm4 - cmpl $4,%eax - jb .L045ctr32_three - pshufd $128,%xmm1,%xmm6 - por %xmm7,%xmm5 - je .L046ctr32_four - por %xmm7,%xmm6 - call _aesni_encrypt6 - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps %xmm1,%xmm2 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm3 - movups 48(%esi),%xmm0 - xorps %xmm1,%xmm4 - movups 64(%esi),%xmm1 - xorps %xmm0,%xmm5 - movups %xmm2,(%edi) - xorps %xmm1,%xmm6 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - jmp .L042ctr32_ret -.align 16 -.L039ctr32_one_shortcut: - movups (%ebx),%xmm2 - movl 240(%edx),%ecx -.L043ctr32_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L047enc1_loop_7: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L047enc1_loop_7 -.byte 102,15,56,221,209 - movups (%esi),%xmm6 - xorps %xmm2,%xmm6 - movups %xmm6,(%edi) - jmp .L042ctr32_ret -.align 16 -.L044ctr32_two: - call _aesni_encrypt2 - movups (%esi),%xmm5 - movups 16(%esi),%xmm6 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - jmp .L042ctr32_ret -.align 16 -.L045ctr32_three: - call _aesni_encrypt3 - movups (%esi),%xmm5 - movups 16(%esi),%xmm6 - xorps %xmm5,%xmm2 - movups 32(%esi),%xmm7 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - xorps %xmm7,%xmm4 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - jmp .L042ctr32_ret -.align 16 -.L046ctr32_four: - call _aesni_encrypt4 - movups (%esi),%xmm6 - movups 16(%esi),%xmm7 - movups 32(%esi),%xmm1 - xorps %xmm6,%xmm2 - movups 48(%esi),%xmm0 - xorps %xmm7,%xmm3 - movups %xmm2,(%edi) - xorps %xmm1,%xmm4 - movups %xmm3,16(%edi) - xorps %xmm0,%xmm5 - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) -.L042ctr32_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movl 80(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin -.globl aes_hw_xts_encrypt -.hidden aes_hw_xts_encrypt -.type aes_hw_xts_encrypt,@function -.align 16 -aes_hw_xts_encrypt: -.L_aes_hw_xts_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 36(%esp),%edx - movl 40(%esp),%esi - movl 240(%edx),%ecx - movups (%esi),%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L048enc1_loop_8: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L048enc1_loop_8 -.byte 102,15,56,221,209 - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl %esp,%ebp - subl $120,%esp - movl 240(%edx),%ecx - andl $-16,%esp - movl $135,96(%esp) - movl $0,100(%esp) - movl $1,104(%esp) - movl $0,108(%esp) - movl %eax,112(%esp) - movl %ebp,116(%esp) - movdqa %xmm2,%xmm1 - pxor %xmm0,%xmm0 - movdqa 96(%esp),%xmm3 - pcmpgtd %xmm1,%xmm0 - andl $-16,%eax - movl %edx,%ebp - movl %ecx,%ebx - subl $96,%eax - jc .L049xts_enc_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp .L050xts_enc_loop6 -.align 16 -.L050xts_enc_loop6: - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,16(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,32(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,64(%esp) - paddq %xmm1,%xmm1 - movups (%ebp),%xmm0 - pand %xmm3,%xmm7 - movups (%esi),%xmm2 - pxor %xmm1,%xmm7 - movl %ebx,%ecx - movdqu 16(%esi),%xmm3 - xorps %xmm0,%xmm2 - movdqu 32(%esi),%xmm4 - pxor %xmm0,%xmm3 - movdqu 48(%esi),%xmm5 - pxor %xmm0,%xmm4 - movdqu 64(%esi),%xmm6 - pxor %xmm0,%xmm5 - movdqu 80(%esi),%xmm1 - pxor %xmm0,%xmm6 - leal 96(%esi),%esi - pxor (%esp),%xmm2 - movdqa %xmm7,80(%esp) - pxor %xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 -.byte 102,15,56,220,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 -.byte 102,15,56,220,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 - call .L_aesni_encrypt6_enter - movdqa 80(%esp),%xmm1 - pxor %xmm0,%xmm0 - xorps (%esp),%xmm2 - pcmpgtd %xmm1,%xmm0 - xorps 16(%esp),%xmm3 - movups %xmm2,(%edi) - xorps 32(%esp),%xmm4 - movups %xmm3,16(%edi) - xorps 48(%esp),%xmm5 - movups %xmm4,32(%edi) - xorps 64(%esp),%xmm6 - movups %xmm5,48(%edi) - xorps %xmm1,%xmm7 - movups %xmm6,64(%edi) - pshufd $19,%xmm0,%xmm2 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqa 96(%esp),%xmm3 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - subl $96,%eax - jnc .L050xts_enc_loop6 - movl 240(%ebp),%ecx - movl %ebp,%edx - movl %ecx,%ebx -.L049xts_enc_short: - addl $96,%eax - jz .L051xts_enc_done6x - movdqa %xmm1,%xmm5 - cmpl $32,%eax - jb .L052xts_enc_one - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - je .L053xts_enc_two - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm6 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - cmpl $64,%eax - jb .L054xts_enc_three - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm7 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - movdqa %xmm5,(%esp) - movdqa %xmm6,16(%esp) - je .L055xts_enc_four - movdqa %xmm7,32(%esp) - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm7 - pxor %xmm1,%xmm7 - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - pxor (%esp),%xmm2 - movdqu 48(%esi),%xmm5 - pxor 16(%esp),%xmm3 - movdqu 64(%esi),%xmm6 - pxor 32(%esp),%xmm4 - leal 80(%esi),%esi - pxor 48(%esp),%xmm5 - movdqa %xmm7,64(%esp) - pxor %xmm7,%xmm6 - call _aesni_encrypt6 - movaps 64(%esp),%xmm1 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps 32(%esp),%xmm4 - movups %xmm2,(%edi) - xorps 48(%esp),%xmm5 - movups %xmm3,16(%edi) - xorps %xmm1,%xmm6 - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - leal 80(%edi),%edi - jmp .L056xts_enc_done -.align 16 -.L052xts_enc_one: - movups (%esi),%xmm2 - leal 16(%esi),%esi - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L057enc1_loop_9: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L057enc1_loop_9 -.byte 102,15,56,221,209 - xorps %xmm5,%xmm2 - movups %xmm2,(%edi) - leal 16(%edi),%edi - movdqa %xmm5,%xmm1 - jmp .L056xts_enc_done -.align 16 -.L053xts_enc_two: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - leal 32(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - call _aesni_encrypt2 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - leal 32(%edi),%edi - movdqa %xmm6,%xmm1 - jmp .L056xts_enc_done -.align 16 -.L054xts_enc_three: - movaps %xmm1,%xmm7 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - leal 48(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - call _aesni_encrypt3 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - leal 48(%edi),%edi - movdqa %xmm7,%xmm1 - jmp .L056xts_enc_done -.align 16 -.L055xts_enc_four: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - xorps (%esp),%xmm2 - movups 48(%esi),%xmm5 - leal 64(%esi),%esi - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - xorps %xmm6,%xmm5 - call _aesni_encrypt4 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - xorps %xmm6,%xmm5 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - leal 64(%edi),%edi - movdqa %xmm6,%xmm1 - jmp .L056xts_enc_done -.align 16 -.L051xts_enc_done6x: - movl 112(%esp),%eax - andl $15,%eax - jz .L058xts_enc_ret - movdqa %xmm1,%xmm5 - movl %eax,112(%esp) - jmp .L059xts_enc_steal -.align 16 -.L056xts_enc_done: - movl 112(%esp),%eax - pxor %xmm0,%xmm0 - andl $15,%eax - jz .L058xts_enc_ret - pcmpgtd %xmm1,%xmm0 - movl %eax,112(%esp) - pshufd $19,%xmm0,%xmm5 - paddq %xmm1,%xmm1 - pand 96(%esp),%xmm5 - pxor %xmm1,%xmm5 -.L059xts_enc_steal: - movzbl (%esi),%ecx - movzbl -16(%edi),%edx - leal 1(%esi),%esi - movb %cl,-16(%edi) - movb %dl,(%edi) - leal 1(%edi),%edi - subl $1,%eax - jnz .L059xts_enc_steal - subl 112(%esp),%edi - movl %ebp,%edx - movl %ebx,%ecx - movups -16(%edi),%xmm2 - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L060enc1_loop_10: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L060enc1_loop_10 -.byte 102,15,56,221,209 - xorps %xmm5,%xmm2 - movups %xmm2,-16(%edi) -.L058xts_enc_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) - movl 116(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_xts_encrypt,.-.L_aes_hw_xts_encrypt_begin -.globl aes_hw_xts_decrypt -.hidden aes_hw_xts_decrypt -.type aes_hw_xts_decrypt,@function -.align 16 -aes_hw_xts_decrypt: -.L_aes_hw_xts_decrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 36(%esp),%edx - movl 40(%esp),%esi - movl 240(%edx),%ecx - movups (%esi),%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L061enc1_loop_11: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L061enc1_loop_11 -.byte 102,15,56,221,209 - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl %esp,%ebp - subl $120,%esp - andl $-16,%esp - xorl %ebx,%ebx - testl $15,%eax - setnz %bl - shll $4,%ebx - subl %ebx,%eax - movl $135,96(%esp) - movl $0,100(%esp) - movl $1,104(%esp) - movl $0,108(%esp) - movl %eax,112(%esp) - movl %ebp,116(%esp) - movl 240(%edx),%ecx - movl %edx,%ebp - movl %ecx,%ebx - movdqa %xmm2,%xmm1 - pxor %xmm0,%xmm0 - movdqa 96(%esp),%xmm3 - pcmpgtd %xmm1,%xmm0 - andl $-16,%eax - subl $96,%eax - jc .L062xts_dec_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp .L063xts_dec_loop6 -.align 16 -.L063xts_dec_loop6: - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,16(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,32(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,64(%esp) - paddq %xmm1,%xmm1 - movups (%ebp),%xmm0 - pand %xmm3,%xmm7 - movups (%esi),%xmm2 - pxor %xmm1,%xmm7 - movl %ebx,%ecx - movdqu 16(%esi),%xmm3 - xorps %xmm0,%xmm2 - movdqu 32(%esi),%xmm4 - pxor %xmm0,%xmm3 - movdqu 48(%esi),%xmm5 - pxor %xmm0,%xmm4 - movdqu 64(%esi),%xmm6 - pxor %xmm0,%xmm5 - movdqu 80(%esi),%xmm1 - pxor %xmm0,%xmm6 - leal 96(%esi),%esi - pxor (%esp),%xmm2 - movdqa %xmm7,80(%esp) - pxor %xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 -.byte 102,15,56,222,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 -.byte 102,15,56,222,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 - call .L_aesni_decrypt6_enter - movdqa 80(%esp),%xmm1 - pxor %xmm0,%xmm0 - xorps (%esp),%xmm2 - pcmpgtd %xmm1,%xmm0 - xorps 16(%esp),%xmm3 - movups %xmm2,(%edi) - xorps 32(%esp),%xmm4 - movups %xmm3,16(%edi) - xorps 48(%esp),%xmm5 - movups %xmm4,32(%edi) - xorps 64(%esp),%xmm6 - movups %xmm5,48(%edi) - xorps %xmm1,%xmm7 - movups %xmm6,64(%edi) - pshufd $19,%xmm0,%xmm2 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqa 96(%esp),%xmm3 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - subl $96,%eax - jnc .L063xts_dec_loop6 - movl 240(%ebp),%ecx - movl %ebp,%edx - movl %ecx,%ebx -.L062xts_dec_short: - addl $96,%eax - jz .L064xts_dec_done6x - movdqa %xmm1,%xmm5 - cmpl $32,%eax - jb .L065xts_dec_one - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - je .L066xts_dec_two - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm6 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - cmpl $64,%eax - jb .L067xts_dec_three - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm7 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - movdqa %xmm5,(%esp) - movdqa %xmm6,16(%esp) - je .L068xts_dec_four - movdqa %xmm7,32(%esp) - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm7 - pxor %xmm1,%xmm7 - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - pxor (%esp),%xmm2 - movdqu 48(%esi),%xmm5 - pxor 16(%esp),%xmm3 - movdqu 64(%esi),%xmm6 - pxor 32(%esp),%xmm4 - leal 80(%esi),%esi - pxor 48(%esp),%xmm5 - movdqa %xmm7,64(%esp) - pxor %xmm7,%xmm6 - call _aesni_decrypt6 - movaps 64(%esp),%xmm1 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps 32(%esp),%xmm4 - movups %xmm2,(%edi) - xorps 48(%esp),%xmm5 - movups %xmm3,16(%edi) - xorps %xmm1,%xmm6 - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - leal 80(%edi),%edi - jmp .L069xts_dec_done -.align 16 -.L065xts_dec_one: - movups (%esi),%xmm2 - leal 16(%esi),%esi - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L070dec1_loop_12: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L070dec1_loop_12 -.byte 102,15,56,223,209 - xorps %xmm5,%xmm2 - movups %xmm2,(%edi) - leal 16(%edi),%edi - movdqa %xmm5,%xmm1 - jmp .L069xts_dec_done -.align 16 -.L066xts_dec_two: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - leal 32(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - call _aesni_decrypt2 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - leal 32(%edi),%edi - movdqa %xmm6,%xmm1 - jmp .L069xts_dec_done -.align 16 -.L067xts_dec_three: - movaps %xmm1,%xmm7 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - leal 48(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - call _aesni_decrypt3 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - leal 48(%edi),%edi - movdqa %xmm7,%xmm1 - jmp .L069xts_dec_done -.align 16 -.L068xts_dec_four: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - xorps (%esp),%xmm2 - movups 48(%esi),%xmm5 - leal 64(%esi),%esi - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - xorps %xmm6,%xmm5 - call _aesni_decrypt4 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - xorps %xmm6,%xmm5 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - leal 64(%edi),%edi - movdqa %xmm6,%xmm1 - jmp .L069xts_dec_done -.align 16 -.L064xts_dec_done6x: - movl 112(%esp),%eax - andl $15,%eax - jz .L071xts_dec_ret - movl %eax,112(%esp) - jmp .L072xts_dec_only_one_more -.align 16 -.L069xts_dec_done: - movl 112(%esp),%eax - pxor %xmm0,%xmm0 - andl $15,%eax - jz .L071xts_dec_ret - pcmpgtd %xmm1,%xmm0 - movl %eax,112(%esp) - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa 96(%esp),%xmm3 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 -.L072xts_dec_only_one_more: - pshufd $19,%xmm0,%xmm5 - movdqa %xmm1,%xmm6 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm5 - pxor %xmm1,%xmm5 - movl %ebp,%edx - movl %ebx,%ecx - movups (%esi),%xmm2 - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L073dec1_loop_13: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L073dec1_loop_13 -.byte 102,15,56,223,209 - xorps %xmm5,%xmm2 - movups %xmm2,(%edi) -.L074xts_dec_steal: - movzbl 16(%esi),%ecx - movzbl (%edi),%edx - leal 1(%esi),%esi - movb %cl,(%edi) - movb %dl,16(%edi) - leal 1(%edi),%edi - subl $1,%eax - jnz .L074xts_dec_steal - subl 112(%esp),%edi - movl %ebp,%edx - movl %ebx,%ecx - movups (%edi),%xmm2 - xorps %xmm6,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L075dec1_loop_14: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L075dec1_loop_14 -.byte 102,15,56,223,209 - xorps %xmm6,%xmm2 - movups %xmm2,(%edi) -.L071xts_dec_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) - movl 116(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_xts_decrypt,.-.L_aes_hw_xts_decrypt_begin -.globl aes_hw_cbc_encrypt -.hidden aes_hw_cbc_encrypt -.type aes_hw_cbc_encrypt,@function -.align 16 -aes_hw_cbc_encrypt: -.L_aes_hw_cbc_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl %esp,%ebx - movl 24(%esp),%edi - subl $24,%ebx - movl 28(%esp),%eax - andl $-16,%ebx - movl 32(%esp),%edx - movl 36(%esp),%ebp - testl %eax,%eax - jz .L076cbc_abort - cmpl $0,40(%esp) - xchgl %esp,%ebx - movups (%ebp),%xmm7 - movl 240(%edx),%ecx - movl %edx,%ebp - movl %ebx,16(%esp) - movl %ecx,%ebx - je .L077cbc_decrypt - movaps %xmm7,%xmm2 - cmpl $16,%eax - jb .L078cbc_enc_tail - subl $16,%eax - jmp .L079cbc_enc_loop -.align 16 -.L079cbc_enc_loop: - movups (%esi),%xmm7 - leal 16(%esi),%esi - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm7 - leal 32(%edx),%edx - xorps %xmm7,%xmm2 -.L080enc1_loop_15: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L080enc1_loop_15 -.byte 102,15,56,221,209 - movl %ebx,%ecx - movl %ebp,%edx - movups %xmm2,(%edi) - leal 16(%edi),%edi - subl $16,%eax - jnc .L079cbc_enc_loop - addl $16,%eax - jnz .L078cbc_enc_tail - movaps %xmm2,%xmm7 - pxor %xmm2,%xmm2 - jmp .L081cbc_ret -.L078cbc_enc_tail: - movl %eax,%ecx -.long 2767451785 - movl $16,%ecx - subl %eax,%ecx - xorl %eax,%eax -.long 2868115081 - leal -16(%edi),%edi - movl %ebx,%ecx - movl %edi,%esi - movl %ebp,%edx - jmp .L079cbc_enc_loop -.align 16 -.L077cbc_decrypt: - cmpl $80,%eax - jbe .L082cbc_dec_tail - movaps %xmm7,(%esp) - subl $80,%eax - jmp .L083cbc_dec_loop6_enter -.align 16 -.L084cbc_dec_loop6: - movaps %xmm0,(%esp) - movups %xmm7,(%edi) - leal 16(%edi),%edi -.L083cbc_dec_loop6_enter: - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - movdqu 48(%esi),%xmm5 - movdqu 64(%esi),%xmm6 - movdqu 80(%esi),%xmm7 - call _aesni_decrypt6 - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps (%esp),%xmm2 - xorps %xmm1,%xmm3 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm4 - movups 48(%esi),%xmm0 - xorps %xmm1,%xmm5 - movups 64(%esi),%xmm1 - xorps %xmm0,%xmm6 - movups 80(%esi),%xmm0 - xorps %xmm1,%xmm7 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - leal 96(%esi),%esi - movups %xmm4,32(%edi) - movl %ebx,%ecx - movups %xmm5,48(%edi) - movl %ebp,%edx - movups %xmm6,64(%edi) - leal 80(%edi),%edi - subl $96,%eax - ja .L084cbc_dec_loop6 - movaps %xmm7,%xmm2 - movaps %xmm0,%xmm7 - addl $80,%eax - jle .L085cbc_dec_clear_tail_collected - movups %xmm2,(%edi) - leal 16(%edi),%edi -.L082cbc_dec_tail: - movups (%esi),%xmm2 - movaps %xmm2,%xmm6 - cmpl $16,%eax - jbe .L086cbc_dec_one - movups 16(%esi),%xmm3 - movaps %xmm3,%xmm5 - cmpl $32,%eax - jbe .L087cbc_dec_two - movups 32(%esi),%xmm4 - cmpl $48,%eax - jbe .L088cbc_dec_three - movups 48(%esi),%xmm5 - cmpl $64,%eax - jbe .L089cbc_dec_four - movups 64(%esi),%xmm6 - movaps %xmm7,(%esp) - movups (%esi),%xmm2 - xorps %xmm7,%xmm7 - call _aesni_decrypt6 - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps (%esp),%xmm2 - xorps %xmm1,%xmm3 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm4 - movups 48(%esi),%xmm0 - xorps %xmm1,%xmm5 - movups 64(%esi),%xmm7 - xorps %xmm0,%xmm6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%edi) - pxor %xmm5,%xmm5 - leal 64(%edi),%edi - movaps %xmm6,%xmm2 - pxor %xmm6,%xmm6 - subl $80,%eax - jmp .L090cbc_dec_tail_collected -.align 16 -.L086cbc_dec_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -.L091dec1_loop_16: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz .L091dec1_loop_16 -.byte 102,15,56,223,209 - xorps %xmm7,%xmm2 - movaps %xmm6,%xmm7 - subl $16,%eax - jmp .L090cbc_dec_tail_collected -.align 16 -.L087cbc_dec_two: - call _aesni_decrypt2 - xorps %xmm7,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movaps %xmm3,%xmm2 - pxor %xmm3,%xmm3 - leal 16(%edi),%edi - movaps %xmm5,%xmm7 - subl $32,%eax - jmp .L090cbc_dec_tail_collected -.align 16 -.L088cbc_dec_three: - call _aesni_decrypt3 - xorps %xmm7,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm5,%xmm4 - movups %xmm2,(%edi) - movaps %xmm4,%xmm2 - pxor %xmm4,%xmm4 - movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 - leal 32(%edi),%edi - movups 32(%esi),%xmm7 - subl $48,%eax - jmp .L090cbc_dec_tail_collected -.align 16 -.L089cbc_dec_four: - call _aesni_decrypt4 - movups 16(%esi),%xmm1 - movups 32(%esi),%xmm0 - xorps %xmm7,%xmm2 - movups 48(%esi),%xmm7 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - xorps %xmm1,%xmm4 - movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 - xorps %xmm0,%xmm5 - movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 - leal 48(%edi),%edi - movaps %xmm5,%xmm2 - pxor %xmm5,%xmm5 - subl $64,%eax - jmp .L090cbc_dec_tail_collected -.align 16 -.L085cbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 -.L090cbc_dec_tail_collected: - andl $15,%eax - jnz .L092cbc_dec_tail_partial - movups %xmm2,(%edi) - pxor %xmm0,%xmm0 - jmp .L081cbc_ret -.align 16 -.L092cbc_dec_tail_partial: - movaps %xmm2,(%esp) - pxor %xmm0,%xmm0 - movl $16,%ecx - movl %esp,%esi - subl %eax,%ecx -.long 2767451785 - movdqa %xmm2,(%esp) -.L081cbc_ret: - movl 16(%esp),%esp - movl 36(%esp),%ebp - pxor %xmm2,%xmm2 - pxor %xmm1,%xmm1 - movups %xmm7,(%ebp) - pxor %xmm7,%xmm7 -.L076cbc_abort: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size aes_hw_cbc_encrypt,.-.L_aes_hw_cbc_encrypt_begin -.hidden _aesni_set_encrypt_key -.type _aesni_set_encrypt_key,@function -.align 16 -_aesni_set_encrypt_key: - pushl %ebp - pushl %ebx - testl %eax,%eax - jz .L093bad_pointer - testl %edx,%edx - jz .L093bad_pointer - call .L094pic -.L094pic: - popl %ebx - leal .Lkey_const-.L094pic(%ebx),%ebx - leal OPENSSL_ia32cap_P-.Lkey_const(%ebx),%ebp - movups (%eax),%xmm0 - xorps %xmm4,%xmm4 - movl 4(%ebp),%ebp - leal 16(%edx),%edx - andl $268437504,%ebp - cmpl $256,%ecx - je .L09514rounds - cmpl $192,%ecx - je .L09612rounds - cmpl $128,%ecx - jne .L097bad_keybits -.align 16 -.L09810rounds: - cmpl $268435456,%ebp - je .L09910rounds_alt - movl $9,%ecx - movups %xmm0,-16(%edx) -.byte 102,15,58,223,200,1 - call .L100key_128_cold -.byte 102,15,58,223,200,2 - call .L101key_128 -.byte 102,15,58,223,200,4 - call .L101key_128 -.byte 102,15,58,223,200,8 - call .L101key_128 -.byte 102,15,58,223,200,16 - call .L101key_128 -.byte 102,15,58,223,200,32 - call .L101key_128 -.byte 102,15,58,223,200,64 - call .L101key_128 -.byte 102,15,58,223,200,128 - call .L101key_128 -.byte 102,15,58,223,200,27 - call .L101key_128 -.byte 102,15,58,223,200,54 - call .L101key_128 - movups %xmm0,(%edx) - movl %ecx,80(%edx) - jmp .L102good_key -.align 16 -.L101key_128: - movups %xmm0,(%edx) - leal 16(%edx),%edx -.L100key_128_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - ret -.align 16 -.L09910rounds_alt: - movdqa (%ebx),%xmm5 - movl $8,%ecx - movdqa 32(%ebx),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,-16(%edx) -.L103loop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leal 16(%edx),%edx - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%edx) - movdqa %xmm0,%xmm2 - decl %ecx - jnz .L103loop_key128 - movdqa 48(%ebx),%xmm4 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%edx) - movl $9,%ecx - movl %ecx,96(%edx) - jmp .L102good_key -.align 16 -.L09612rounds: - movq 16(%eax),%xmm2 - cmpl $268435456,%ebp - je .L10412rounds_alt - movl $11,%ecx - movups %xmm0,-16(%edx) -.byte 102,15,58,223,202,1 - call .L105key_192a_cold -.byte 102,15,58,223,202,2 - call .L106key_192b -.byte 102,15,58,223,202,4 - call .L107key_192a -.byte 102,15,58,223,202,8 - call .L106key_192b -.byte 102,15,58,223,202,16 - call .L107key_192a -.byte 102,15,58,223,202,32 - call .L106key_192b -.byte 102,15,58,223,202,64 - call .L107key_192a -.byte 102,15,58,223,202,128 - call .L106key_192b - movups %xmm0,(%edx) - movl %ecx,48(%edx) - jmp .L102good_key -.align 16 -.L107key_192a: - movups %xmm0,(%edx) - leal 16(%edx),%edx -.align 16 -.L105key_192a_cold: - movaps %xmm2,%xmm5 -.L108key_192b_warm: - shufps $16,%xmm0,%xmm4 - movdqa %xmm2,%xmm3 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - pslldq $4,%xmm3 - xorps %xmm4,%xmm0 - pshufd $85,%xmm1,%xmm1 - pxor %xmm3,%xmm2 - pxor %xmm1,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm3,%xmm2 - ret -.align 16 -.L106key_192b: - movaps %xmm0,%xmm3 - shufps $68,%xmm0,%xmm5 - movups %xmm5,(%edx) - shufps $78,%xmm2,%xmm3 - movups %xmm3,16(%edx) - leal 32(%edx),%edx - jmp .L108key_192b_warm -.align 16 -.L10412rounds_alt: - movdqa 16(%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $8,%ecx - movdqu %xmm0,-16(%edx) -.L109loop_key192: - movq %xmm2,(%edx) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leal 24(%edx),%edx - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%edx) - decl %ecx - jnz .L109loop_key192 - movl $11,%ecx - movl %ecx,32(%edx) - jmp .L102good_key -.align 16 -.L09514rounds: - movups 16(%eax),%xmm2 - leal 16(%edx),%edx - cmpl $268435456,%ebp - je .L11014rounds_alt - movl $13,%ecx - movups %xmm0,-32(%edx) - movups %xmm2,-16(%edx) -.byte 102,15,58,223,202,1 - call .L111key_256a_cold -.byte 102,15,58,223,200,1 - call .L112key_256b -.byte 102,15,58,223,202,2 - call .L113key_256a -.byte 102,15,58,223,200,2 - call .L112key_256b -.byte 102,15,58,223,202,4 - call .L113key_256a -.byte 102,15,58,223,200,4 - call .L112key_256b -.byte 102,15,58,223,202,8 - call .L113key_256a -.byte 102,15,58,223,200,8 - call .L112key_256b -.byte 102,15,58,223,202,16 - call .L113key_256a -.byte 102,15,58,223,200,16 - call .L112key_256b -.byte 102,15,58,223,202,32 - call .L113key_256a -.byte 102,15,58,223,200,32 - call .L112key_256b -.byte 102,15,58,223,202,64 - call .L113key_256a - movups %xmm0,(%edx) - movl %ecx,16(%edx) - xorl %eax,%eax - jmp .L102good_key -.align 16 -.L113key_256a: - movups %xmm2,(%edx) - leal 16(%edx),%edx -.L111key_256a_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - ret -.align 16 -.L112key_256b: - movups %xmm0,(%edx) - leal 16(%edx),%edx - shufps $16,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $140,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $170,%xmm1,%xmm1 - xorps %xmm1,%xmm2 - ret -.align 16 -.L11014rounds_alt: - movdqa (%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $7,%ecx - movdqu %xmm0,-32(%edx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,-16(%edx) -.L114loop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - decl %ecx - jz .L115done_key256 - pshufd $255,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%edx) - leal 32(%edx),%edx - movdqa %xmm2,%xmm1 - jmp .L114loop_key256 -.L115done_key256: - movl $13,%ecx - movl %ecx,16(%edx) -.L102good_key: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - xorl %eax,%eax - popl %ebx - popl %ebp - ret -.align 4 -.L093bad_pointer: - movl $-1,%eax - popl %ebx - popl %ebp - ret -.align 4 -.L097bad_keybits: - pxor %xmm0,%xmm0 - movl $-2,%eax - popl %ebx - popl %ebp - ret -.size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key -.globl aes_hw_set_encrypt_key -.hidden aes_hw_set_encrypt_key -.type aes_hw_set_encrypt_key,@function -.align 16 -aes_hw_set_encrypt_key: -.L_aes_hw_set_encrypt_key_begin: -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call .L116pic -.L116pic: - popl %ebx - leal BORINGSSL_function_hit+3-.L116pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 4(%esp),%eax - movl 8(%esp),%ecx - movl 12(%esp),%edx - call _aesni_set_encrypt_key - ret -.size aes_hw_set_encrypt_key,.-.L_aes_hw_set_encrypt_key_begin -.globl aes_hw_set_decrypt_key -.hidden aes_hw_set_decrypt_key -.type aes_hw_set_decrypt_key,@function -.align 16 -aes_hw_set_decrypt_key: -.L_aes_hw_set_decrypt_key_begin: - movl 4(%esp),%eax - movl 8(%esp),%ecx - movl 12(%esp),%edx - call _aesni_set_encrypt_key - movl 12(%esp),%edx - shll $4,%ecx - testl %eax,%eax - jnz .L117dec_key_ret - leal 16(%edx,%ecx,1),%eax - movups (%edx),%xmm0 - movups (%eax),%xmm1 - movups %xmm0,(%eax) - movups %xmm1,(%edx) - leal 16(%edx),%edx - leal -16(%eax),%eax -.L118dec_key_inverse: - movups (%edx),%xmm0 - movups (%eax),%xmm1 -.byte 102,15,56,219,192 -.byte 102,15,56,219,201 - leal 16(%edx),%edx - leal -16(%eax),%eax - movups %xmm0,16(%eax) - movups %xmm1,-16(%edx) - cmpl %edx,%eax - ja .L118dec_key_inverse - movups (%edx),%xmm0 -.byte 102,15,56,219,192 - movups %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - xorl %eax,%eax -.L117dec_key_ret: - ret -.size aes_hw_set_decrypt_key,.-.L_aes_hw_set_decrypt_key_begin -.align 64 -.Lkey_const: -.long 202313229,202313229,202313229,202313229 -.long 67569157,67569157,67569157,67569157 -.long 1,1,1,1 -.long 27,27,27,27 -.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 -.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 -.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 -.byte 115,108,46,111,114,103,62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/bn-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/bn-586.S deleted file mode 100644 index 4a6ccfbfac..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/bn-586.S +++ /dev/null @@ -1,1544 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl bn_mul_add_words -.hidden bn_mul_add_words -.type bn_mul_add_words,@function -.align 16 -bn_mul_add_words: -.L_bn_mul_add_words_begin: - call .L000PIC_me_up -.L000PIC_me_up: - popl %eax - leal OPENSSL_ia32cap_P-.L000PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc .L001maw_non_sse2 - movl 4(%esp),%eax - movl 8(%esp),%edx - movl 12(%esp),%ecx - movd 16(%esp),%mm0 - pxor %mm1,%mm1 - jmp .L002maw_sse2_entry -.align 16 -.L003maw_sse2_unrolled: - movd (%eax),%mm3 - paddq %mm3,%mm1 - movd (%edx),%mm2 - pmuludq %mm0,%mm2 - movd 4(%edx),%mm4 - pmuludq %mm0,%mm4 - movd 8(%edx),%mm6 - pmuludq %mm0,%mm6 - movd 12(%edx),%mm7 - pmuludq %mm0,%mm7 - paddq %mm2,%mm1 - movd 4(%eax),%mm3 - paddq %mm4,%mm3 - movd 8(%eax),%mm5 - paddq %mm6,%mm5 - movd 12(%eax),%mm4 - paddq %mm4,%mm7 - movd %mm1,(%eax) - movd 16(%edx),%mm2 - pmuludq %mm0,%mm2 - psrlq $32,%mm1 - movd 20(%edx),%mm4 - pmuludq %mm0,%mm4 - paddq %mm3,%mm1 - movd 24(%edx),%mm6 - pmuludq %mm0,%mm6 - movd %mm1,4(%eax) - psrlq $32,%mm1 - movd 28(%edx),%mm3 - addl $32,%edx - pmuludq %mm0,%mm3 - paddq %mm5,%mm1 - movd 16(%eax),%mm5 - paddq %mm5,%mm2 - movd %mm1,8(%eax) - psrlq $32,%mm1 - paddq %mm7,%mm1 - movd 20(%eax),%mm5 - paddq %mm5,%mm4 - movd %mm1,12(%eax) - psrlq $32,%mm1 - paddq %mm2,%mm1 - movd 24(%eax),%mm5 - paddq %mm5,%mm6 - movd %mm1,16(%eax) - psrlq $32,%mm1 - paddq %mm4,%mm1 - movd 28(%eax),%mm5 - paddq %mm5,%mm3 - movd %mm1,20(%eax) - psrlq $32,%mm1 - paddq %mm6,%mm1 - movd %mm1,24(%eax) - psrlq $32,%mm1 - paddq %mm3,%mm1 - movd %mm1,28(%eax) - leal 32(%eax),%eax - psrlq $32,%mm1 - subl $8,%ecx - jz .L004maw_sse2_exit -.L002maw_sse2_entry: - testl $4294967288,%ecx - jnz .L003maw_sse2_unrolled -.align 4 -.L005maw_sse2_loop: - movd (%edx),%mm2 - movd (%eax),%mm3 - pmuludq %mm0,%mm2 - leal 4(%edx),%edx - paddq %mm3,%mm1 - paddq %mm2,%mm1 - movd %mm1,(%eax) - subl $1,%ecx - psrlq $32,%mm1 - leal 4(%eax),%eax - jnz .L005maw_sse2_loop -.L004maw_sse2_exit: - movd %mm1,%eax - emms - ret -.align 16 -.L001maw_non_sse2: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - xorl %esi,%esi - movl 20(%esp),%edi - movl 28(%esp),%ecx - movl 24(%esp),%ebx - andl $4294967288,%ecx - movl 32(%esp),%ebp - pushl %ecx - jz .L006maw_finish -.align 16 -.L007maw_loop: - - movl (%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl (%edi),%eax - adcl $0,%edx - movl %eax,(%edi) - movl %edx,%esi - - movl 4(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 4(%edi),%eax - adcl $0,%edx - movl %eax,4(%edi) - movl %edx,%esi - - movl 8(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 8(%edi),%eax - adcl $0,%edx - movl %eax,8(%edi) - movl %edx,%esi - - movl 12(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 12(%edi),%eax - adcl $0,%edx - movl %eax,12(%edi) - movl %edx,%esi - - movl 16(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 16(%edi),%eax - adcl $0,%edx - movl %eax,16(%edi) - movl %edx,%esi - - movl 20(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 20(%edi),%eax - adcl $0,%edx - movl %eax,20(%edi) - movl %edx,%esi - - movl 24(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 24(%edi),%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi - - movl 28(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 28(%edi),%eax - adcl $0,%edx - movl %eax,28(%edi) - movl %edx,%esi - - subl $8,%ecx - leal 32(%ebx),%ebx - leal 32(%edi),%edi - jnz .L007maw_loop -.L006maw_finish: - movl 32(%esp),%ecx - andl $7,%ecx - jnz .L008maw_finish2 - jmp .L009maw_end -.L008maw_finish2: - - movl (%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl (%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,(%edi) - movl %edx,%esi - jz .L009maw_end - - movl 4(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 4(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,4(%edi) - movl %edx,%esi - jz .L009maw_end - - movl 8(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 8(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,8(%edi) - movl %edx,%esi - jz .L009maw_end - - movl 12(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 12(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,12(%edi) - movl %edx,%esi - jz .L009maw_end - - movl 16(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 16(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,16(%edi) - movl %edx,%esi - jz .L009maw_end - - movl 20(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 20(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,20(%edi) - movl %edx,%esi - jz .L009maw_end - - movl 24(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 24(%edi),%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi -.L009maw_end: - movl %esi,%eax - popl %ecx - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_mul_add_words,.-.L_bn_mul_add_words_begin -.globl bn_mul_words -.hidden bn_mul_words -.type bn_mul_words,@function -.align 16 -bn_mul_words: -.L_bn_mul_words_begin: - call .L010PIC_me_up -.L010PIC_me_up: - popl %eax - leal OPENSSL_ia32cap_P-.L010PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc .L011mw_non_sse2 - movl 4(%esp),%eax - movl 8(%esp),%edx - movl 12(%esp),%ecx - movd 16(%esp),%mm0 - pxor %mm1,%mm1 -.align 16 -.L012mw_sse2_loop: - movd (%edx),%mm2 - pmuludq %mm0,%mm2 - leal 4(%edx),%edx - paddq %mm2,%mm1 - movd %mm1,(%eax) - subl $1,%ecx - psrlq $32,%mm1 - leal 4(%eax),%eax - jnz .L012mw_sse2_loop - movd %mm1,%eax - emms - ret -.align 16 -.L011mw_non_sse2: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - xorl %esi,%esi - movl 20(%esp),%edi - movl 24(%esp),%ebx - movl 28(%esp),%ebp - movl 32(%esp),%ecx - andl $4294967288,%ebp - jz .L013mw_finish -.L014mw_loop: - - movl (%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,(%edi) - movl %edx,%esi - - movl 4(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,4(%edi) - movl %edx,%esi - - movl 8(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,8(%edi) - movl %edx,%esi - - movl 12(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,12(%edi) - movl %edx,%esi - - movl 16(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,16(%edi) - movl %edx,%esi - - movl 20(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,20(%edi) - movl %edx,%esi - - movl 24(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi - - movl 28(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,28(%edi) - movl %edx,%esi - - addl $32,%ebx - addl $32,%edi - subl $8,%ebp - jz .L013mw_finish - jmp .L014mw_loop -.L013mw_finish: - movl 28(%esp),%ebp - andl $7,%ebp - jnz .L015mw_finish2 - jmp .L016mw_end -.L015mw_finish2: - - movl (%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,(%edi) - movl %edx,%esi - decl %ebp - jz .L016mw_end - - movl 4(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,4(%edi) - movl %edx,%esi - decl %ebp - jz .L016mw_end - - movl 8(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,8(%edi) - movl %edx,%esi - decl %ebp - jz .L016mw_end - - movl 12(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,12(%edi) - movl %edx,%esi - decl %ebp - jz .L016mw_end - - movl 16(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,16(%edi) - movl %edx,%esi - decl %ebp - jz .L016mw_end - - movl 20(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,20(%edi) - movl %edx,%esi - decl %ebp - jz .L016mw_end - - movl 24(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi -.L016mw_end: - movl %esi,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_mul_words,.-.L_bn_mul_words_begin -.globl bn_sqr_words -.hidden bn_sqr_words -.type bn_sqr_words,@function -.align 16 -bn_sqr_words: -.L_bn_sqr_words_begin: - call .L017PIC_me_up -.L017PIC_me_up: - popl %eax - leal OPENSSL_ia32cap_P-.L017PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc .L018sqr_non_sse2 - movl 4(%esp),%eax - movl 8(%esp),%edx - movl 12(%esp),%ecx -.align 16 -.L019sqr_sse2_loop: - movd (%edx),%mm0 - pmuludq %mm0,%mm0 - leal 4(%edx),%edx - movq %mm0,(%eax) - subl $1,%ecx - leal 8(%eax),%eax - jnz .L019sqr_sse2_loop - emms - ret -.align 16 -.L018sqr_non_sse2: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%ebx - andl $4294967288,%ebx - jz .L020sw_finish -.L021sw_loop: - - movl (%edi),%eax - mull %eax - movl %eax,(%esi) - movl %edx,4(%esi) - - movl 4(%edi),%eax - mull %eax - movl %eax,8(%esi) - movl %edx,12(%esi) - - movl 8(%edi),%eax - mull %eax - movl %eax,16(%esi) - movl %edx,20(%esi) - - movl 12(%edi),%eax - mull %eax - movl %eax,24(%esi) - movl %edx,28(%esi) - - movl 16(%edi),%eax - mull %eax - movl %eax,32(%esi) - movl %edx,36(%esi) - - movl 20(%edi),%eax - mull %eax - movl %eax,40(%esi) - movl %edx,44(%esi) - - movl 24(%edi),%eax - mull %eax - movl %eax,48(%esi) - movl %edx,52(%esi) - - movl 28(%edi),%eax - mull %eax - movl %eax,56(%esi) - movl %edx,60(%esi) - - addl $32,%edi - addl $64,%esi - subl $8,%ebx - jnz .L021sw_loop -.L020sw_finish: - movl 28(%esp),%ebx - andl $7,%ebx - jz .L022sw_end - - movl (%edi),%eax - mull %eax - movl %eax,(%esi) - decl %ebx - movl %edx,4(%esi) - jz .L022sw_end - - movl 4(%edi),%eax - mull %eax - movl %eax,8(%esi) - decl %ebx - movl %edx,12(%esi) - jz .L022sw_end - - movl 8(%edi),%eax - mull %eax - movl %eax,16(%esi) - decl %ebx - movl %edx,20(%esi) - jz .L022sw_end - - movl 12(%edi),%eax - mull %eax - movl %eax,24(%esi) - decl %ebx - movl %edx,28(%esi) - jz .L022sw_end - - movl 16(%edi),%eax - mull %eax - movl %eax,32(%esi) - decl %ebx - movl %edx,36(%esi) - jz .L022sw_end - - movl 20(%edi),%eax - mull %eax - movl %eax,40(%esi) - decl %ebx - movl %edx,44(%esi) - jz .L022sw_end - - movl 24(%edi),%eax - mull %eax - movl %eax,48(%esi) - movl %edx,52(%esi) -.L022sw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_sqr_words,.-.L_bn_sqr_words_begin -.globl bn_div_words -.hidden bn_div_words -.type bn_div_words,@function -.align 16 -bn_div_words: -.L_bn_div_words_begin: - movl 4(%esp),%edx - movl 8(%esp),%eax - movl 12(%esp),%ecx - divl %ecx - ret -.size bn_div_words,.-.L_bn_div_words_begin -.globl bn_add_words -.hidden bn_add_words -.type bn_add_words,@function -.align 16 -bn_add_words: -.L_bn_add_words_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%ebx - movl 24(%esp),%esi - movl 28(%esp),%edi - movl 32(%esp),%ebp - xorl %eax,%eax - andl $4294967288,%ebp - jz .L023aw_finish -.L024aw_loop: - - movl (%esi),%ecx - movl (%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - - movl 4(%esi),%ecx - movl 4(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - - movl 8(%esi),%ecx - movl 8(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - - movl 12(%esi),%ecx - movl 12(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - - movl 16(%esi),%ecx - movl 16(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - - movl 20(%esi),%ecx - movl 20(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - - movl 24(%esi),%ecx - movl 24(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - - movl 28(%esi),%ecx - movl 28(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%esi - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz .L024aw_loop -.L023aw_finish: - movl 32(%esp),%ebp - andl $7,%ebp - jz .L025aw_end - - movl (%esi),%ecx - movl (%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,(%ebx) - jz .L025aw_end - - movl 4(%esi),%ecx - movl 4(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,4(%ebx) - jz .L025aw_end - - movl 8(%esi),%ecx - movl 8(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,8(%ebx) - jz .L025aw_end - - movl 12(%esi),%ecx - movl 12(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,12(%ebx) - jz .L025aw_end - - movl 16(%esi),%ecx - movl 16(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,16(%ebx) - jz .L025aw_end - - movl 20(%esi),%ecx - movl 20(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,20(%ebx) - jz .L025aw_end - - movl 24(%esi),%ecx - movl 24(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) -.L025aw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_add_words,.-.L_bn_add_words_begin -.globl bn_sub_words -.hidden bn_sub_words -.type bn_sub_words,@function -.align 16 -bn_sub_words: -.L_bn_sub_words_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%ebx - movl 24(%esp),%esi - movl 28(%esp),%edi - movl 32(%esp),%ebp - xorl %eax,%eax - andl $4294967288,%ebp - jz .L026aw_finish -.L027aw_loop: - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - - movl 4(%esi),%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - - movl 8(%esi),%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - - movl 12(%esi),%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - - movl 16(%esi),%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - - movl 20(%esi),%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - - movl 24(%esi),%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - - movl 28(%esi),%ecx - movl 28(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%esi - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz .L027aw_loop -.L026aw_finish: - movl 32(%esp),%ebp - andl $7,%ebp - jz .L028aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,(%ebx) - jz .L028aw_end - - movl 4(%esi),%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,4(%ebx) - jz .L028aw_end - - movl 8(%esi),%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,8(%ebx) - jz .L028aw_end - - movl 12(%esi),%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,12(%ebx) - jz .L028aw_end - - movl 16(%esi),%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,16(%ebx) - jz .L028aw_end - - movl 20(%esi),%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,20(%ebx) - jz .L028aw_end - - movl 24(%esi),%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) -.L028aw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_sub_words,.-.L_bn_sub_words_begin -.globl bn_sub_part_words -.hidden bn_sub_part_words -.type bn_sub_part_words,@function -.align 16 -bn_sub_part_words: -.L_bn_sub_part_words_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%ebx - movl 24(%esp),%esi - movl 28(%esp),%edi - movl 32(%esp),%ebp - xorl %eax,%eax - andl $4294967288,%ebp - jz .L029aw_finish -.L030aw_loop: - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - - movl 4(%esi),%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - - movl 8(%esi),%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - - movl 12(%esi),%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - - movl 16(%esi),%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - - movl 20(%esi),%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - - movl 24(%esi),%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - - movl 28(%esi),%ecx - movl 28(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%esi - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz .L030aw_loop -.L029aw_finish: - movl 32(%esp),%ebp - andl $7,%ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz .L031aw_end - - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx -.L031aw_end: - cmpl $0,36(%esp) - je .L032pw_end - movl 36(%esp),%ebp - cmpl $0,%ebp - je .L032pw_end - jge .L033pw_pos - - movl $0,%edx - subl %ebp,%edx - movl %edx,%ebp - andl $4294967288,%ebp - jz .L034pw_neg_finish -.L035pw_neg_loop: - - movl $0,%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - - movl $0,%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - - movl $0,%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - - movl $0,%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - - movl $0,%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - - movl $0,%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - - movl $0,%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - - movl $0,%ecx - movl 28(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz .L035pw_neg_loop -.L034pw_neg_finish: - movl 36(%esp),%edx - movl $0,%ebp - subl %edx,%ebp - andl $7,%ebp - jz .L032pw_end - - movl $0,%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,(%ebx) - jz .L032pw_end - - movl $0,%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,4(%ebx) - jz .L032pw_end - - movl $0,%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,8(%ebx) - jz .L032pw_end - - movl $0,%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,12(%ebx) - jz .L032pw_end - - movl $0,%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,16(%ebx) - jz .L032pw_end - - movl $0,%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,20(%ebx) - jz .L032pw_end - - movl $0,%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - jmp .L032pw_end -.L033pw_pos: - andl $4294967288,%ebp - jz .L036pw_pos_finish -.L037pw_pos_loop: - - movl (%esi),%ecx - subl %eax,%ecx - movl %ecx,(%ebx) - jnc .L038pw_nc0 - - movl 4(%esi),%ecx - subl %eax,%ecx - movl %ecx,4(%ebx) - jnc .L039pw_nc1 - - movl 8(%esi),%ecx - subl %eax,%ecx - movl %ecx,8(%ebx) - jnc .L040pw_nc2 - - movl 12(%esi),%ecx - subl %eax,%ecx - movl %ecx,12(%ebx) - jnc .L041pw_nc3 - - movl 16(%esi),%ecx - subl %eax,%ecx - movl %ecx,16(%ebx) - jnc .L042pw_nc4 - - movl 20(%esi),%ecx - subl %eax,%ecx - movl %ecx,20(%ebx) - jnc .L043pw_nc5 - - movl 24(%esi),%ecx - subl %eax,%ecx - movl %ecx,24(%ebx) - jnc .L044pw_nc6 - - movl 28(%esi),%ecx - subl %eax,%ecx - movl %ecx,28(%ebx) - jnc .L045pw_nc7 - - addl $32,%esi - addl $32,%ebx - subl $8,%ebp - jnz .L037pw_pos_loop -.L036pw_pos_finish: - movl 36(%esp),%ebp - andl $7,%ebp - jz .L032pw_end - - movl (%esi),%ecx - subl %eax,%ecx - movl %ecx,(%ebx) - jnc .L046pw_tail_nc0 - decl %ebp - jz .L032pw_end - - movl 4(%esi),%ecx - subl %eax,%ecx - movl %ecx,4(%ebx) - jnc .L047pw_tail_nc1 - decl %ebp - jz .L032pw_end - - movl 8(%esi),%ecx - subl %eax,%ecx - movl %ecx,8(%ebx) - jnc .L048pw_tail_nc2 - decl %ebp - jz .L032pw_end - - movl 12(%esi),%ecx - subl %eax,%ecx - movl %ecx,12(%ebx) - jnc .L049pw_tail_nc3 - decl %ebp - jz .L032pw_end - - movl 16(%esi),%ecx - subl %eax,%ecx - movl %ecx,16(%ebx) - jnc .L050pw_tail_nc4 - decl %ebp - jz .L032pw_end - - movl 20(%esi),%ecx - subl %eax,%ecx - movl %ecx,20(%ebx) - jnc .L051pw_tail_nc5 - decl %ebp - jz .L032pw_end - - movl 24(%esi),%ecx - subl %eax,%ecx - movl %ecx,24(%ebx) - jnc .L052pw_tail_nc6 - movl $1,%eax - jmp .L032pw_end -.L053pw_nc_loop: - movl (%esi),%ecx - movl %ecx,(%ebx) -.L038pw_nc0: - movl 4(%esi),%ecx - movl %ecx,4(%ebx) -.L039pw_nc1: - movl 8(%esi),%ecx - movl %ecx,8(%ebx) -.L040pw_nc2: - movl 12(%esi),%ecx - movl %ecx,12(%ebx) -.L041pw_nc3: - movl 16(%esi),%ecx - movl %ecx,16(%ebx) -.L042pw_nc4: - movl 20(%esi),%ecx - movl %ecx,20(%ebx) -.L043pw_nc5: - movl 24(%esi),%ecx - movl %ecx,24(%ebx) -.L044pw_nc6: - movl 28(%esi),%ecx - movl %ecx,28(%ebx) -.L045pw_nc7: - - addl $32,%esi - addl $32,%ebx - subl $8,%ebp - jnz .L053pw_nc_loop - movl 36(%esp),%ebp - andl $7,%ebp - jz .L054pw_nc_end - movl (%esi),%ecx - movl %ecx,(%ebx) -.L046pw_tail_nc0: - decl %ebp - jz .L054pw_nc_end - movl 4(%esi),%ecx - movl %ecx,4(%ebx) -.L047pw_tail_nc1: - decl %ebp - jz .L054pw_nc_end - movl 8(%esi),%ecx - movl %ecx,8(%ebx) -.L048pw_tail_nc2: - decl %ebp - jz .L054pw_nc_end - movl 12(%esi),%ecx - movl %ecx,12(%ebx) -.L049pw_tail_nc3: - decl %ebp - jz .L054pw_nc_end - movl 16(%esi),%ecx - movl %ecx,16(%ebx) -.L050pw_tail_nc4: - decl %ebp - jz .L054pw_nc_end - movl 20(%esi),%ecx - movl %ecx,20(%ebx) -.L051pw_tail_nc5: - decl %ebp - jz .L054pw_nc_end - movl 24(%esi),%ecx - movl %ecx,24(%ebx) -.L052pw_tail_nc6: -.L054pw_nc_end: - movl $0,%eax -.L032pw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_sub_part_words,.-.L_bn_sub_part_words_begin -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/co-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/co-586.S deleted file mode 100644 index 837b0cb5c7..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/co-586.S +++ /dev/null @@ -1,1266 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl bn_mul_comba8 -.hidden bn_mul_comba8 -.type bn_mul_comba8,@function -.align 16 -bn_mul_comba8: -.L_bn_mul_comba8_begin: - pushl %esi - movl 12(%esp),%esi - pushl %edi - movl 20(%esp),%edi - pushl %ebp - pushl %ebx - xorl %ebx,%ebx - movl (%esi),%eax - xorl %ecx,%ecx - movl (%edi),%edx - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,(%eax) - movl 4(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl (%edi),%edx - adcl $0,%ebx - movl %ecx,4(%eax) - movl 8(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 4(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl (%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl (%edi),%edx - adcl $0,%ecx - movl %ebp,8(%eax) - movl 12(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 4(%esi),%eax - adcl %edx,%ecx - movl 8(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl (%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,12(%eax) - movl 16(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl 12(%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 8(%esi),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 4(%esi),%eax - adcl %edx,%ebp - movl 12(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl (%edi),%edx - adcl $0,%ebx - movl %ecx,16(%eax) - movl 20(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 16(%esi),%eax - adcl %edx,%ebx - movl 4(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 12(%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 8(%esi),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 16(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl (%esi),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl (%edi),%edx - adcl $0,%ecx - movl %ebp,20(%eax) - movl 24(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esi),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 16(%esi),%eax - adcl %edx,%ecx - movl 8(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 12(%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 16(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 4(%esi),%eax - adcl %edx,%ecx - movl 20(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl (%esi),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,24(%eax) - movl 28(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl 24(%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esi),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 16(%esi),%eax - adcl %edx,%ebp - movl 12(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 12(%esi),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 8(%esi),%eax - adcl %edx,%ebp - movl 20(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 4(%esi),%eax - adcl %edx,%ebp - movl 24(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - movl %ecx,28(%eax) - movl 28(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 24(%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esi),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 16(%esi),%eax - adcl %edx,%ebx - movl 16(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 12(%esi),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 8(%esi),%eax - adcl %edx,%ebx - movl 24(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 28(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - movl %ebp,32(%eax) - movl 28(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 24(%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esi),%eax - adcl %edx,%ecx - movl 16(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 16(%esi),%eax - adcl %edx,%ecx - movl 20(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 12(%esi),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 28(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - movl %ebx,36(%eax) - movl 28(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl 24(%esi),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esi),%eax - adcl %edx,%ebp - movl 20(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 16(%esi),%eax - adcl %edx,%ebp - movl 24(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 12(%esi),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - movl %ecx,40(%eax) - movl 28(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 24(%esi),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esi),%eax - adcl %edx,%ebx - movl 24(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 16(%esi),%eax - adcl %edx,%ebx - movl 28(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - movl %ebp,44(%eax) - movl 28(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 24(%esi),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esi),%eax - adcl %edx,%ecx - movl 28(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - movl %ebx,48(%eax) - movl 28(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl 24(%esi),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - movl %ecx,52(%eax) - movl 28(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - adcl $0,%ecx - movl %ebp,56(%eax) - - - movl %ebx,60(%eax) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.size bn_mul_comba8,.-.L_bn_mul_comba8_begin -.globl bn_mul_comba4 -.hidden bn_mul_comba4 -.type bn_mul_comba4,@function -.align 16 -bn_mul_comba4: -.L_bn_mul_comba4_begin: - pushl %esi - movl 12(%esp),%esi - pushl %edi - movl 20(%esp),%edi - pushl %ebp - pushl %ebx - xorl %ebx,%ebx - movl (%esi),%eax - xorl %ecx,%ecx - movl (%edi),%edx - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,(%eax) - movl 4(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl (%edi),%edx - adcl $0,%ebx - movl %ecx,4(%eax) - movl 8(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 4(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl (%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl (%edi),%edx - adcl $0,%ecx - movl %ebp,8(%eax) - movl 12(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 4(%esi),%eax - adcl %edx,%ecx - movl 8(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl (%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - movl %ebx,12(%eax) - movl 12(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%ecx - movl 8(%esi),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 4(%esi),%eax - adcl %edx,%ebp - movl 12(%edi),%edx - adcl $0,%ebx - - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - movl %ecx,16(%eax) - movl 12(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%ebp - movl 8(%esi),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - movl %ebp,20(%eax) - movl 12(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - adcl $0,%ebp - movl %ebx,24(%eax) - - - movl %ecx,28(%eax) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.size bn_mul_comba4,.-.L_bn_mul_comba4_begin -.globl bn_sqr_comba8 -.hidden bn_sqr_comba8 -.type bn_sqr_comba8,@function -.align 16 -bn_sqr_comba8: -.L_bn_sqr_comba8_begin: - pushl %esi - pushl %edi - pushl %ebp - pushl %ebx - movl 20(%esp),%edi - movl 24(%esp),%esi - xorl %ebx,%ebx - xorl %ecx,%ecx - movl (%esi),%eax - - xorl %ebp,%ebp - - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl (%esi),%edx - adcl $0,%ebp - movl %ebx,(%edi) - movl 4(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - movl %ecx,4(%edi) - movl (%esi),%edx - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 4(%esi),%eax - adcl $0,%ecx - - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - movl (%esi),%edx - adcl $0,%ecx - movl %ebp,8(%edi) - movl 12(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 8(%esi),%eax - adcl $0,%ebp - movl 4(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 16(%esi),%eax - adcl $0,%ebp - movl %ebx,12(%edi) - movl (%esi),%edx - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 12(%esi),%eax - adcl $0,%ebx - movl 4(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - - mull %eax - addl %eax,%ecx - adcl %edx,%ebp - movl (%esi),%edx - adcl $0,%ebx - movl %ecx,16(%edi) - movl 20(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 16(%esi),%eax - adcl $0,%ecx - movl 4(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 12(%esi),%eax - adcl $0,%ecx - movl 8(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 24(%esi),%eax - adcl $0,%ecx - movl %ebp,20(%edi) - movl (%esi),%edx - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 20(%esi),%eax - adcl $0,%ebp - movl 4(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 16(%esi),%eax - adcl $0,%ebp - movl 8(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 12(%esi),%eax - adcl $0,%ebp - - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl (%esi),%edx - adcl $0,%ebp - movl %ebx,24(%edi) - movl 28(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 24(%esi),%eax - adcl $0,%ebx - movl 4(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 20(%esi),%eax - adcl $0,%ebx - movl 8(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 16(%esi),%eax - adcl $0,%ebx - movl 12(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 28(%esi),%eax - adcl $0,%ebx - movl %ecx,28(%edi) - movl 4(%esi),%edx - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 24(%esi),%eax - adcl $0,%ecx - movl 8(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 20(%esi),%eax - adcl $0,%ecx - movl 12(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 16(%esi),%eax - adcl $0,%ecx - - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - movl 8(%esi),%edx - adcl $0,%ecx - movl %ebp,32(%edi) - movl 28(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 24(%esi),%eax - adcl $0,%ebp - movl 12(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 20(%esi),%eax - adcl $0,%ebp - movl 16(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 28(%esi),%eax - adcl $0,%ebp - movl %ebx,36(%edi) - movl 12(%esi),%edx - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 24(%esi),%eax - adcl $0,%ebx - movl 16(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 20(%esi),%eax - adcl $0,%ebx - - mull %eax - addl %eax,%ecx - adcl %edx,%ebp - movl 16(%esi),%edx - adcl $0,%ebx - movl %ecx,40(%edi) - movl 28(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 24(%esi),%eax - adcl $0,%ecx - movl 20(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 28(%esi),%eax - adcl $0,%ecx - movl %ebp,44(%edi) - movl 20(%esi),%edx - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 24(%esi),%eax - adcl $0,%ebp - - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl 24(%esi),%edx - adcl $0,%ebp - movl %ebx,48(%edi) - movl 28(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 28(%esi),%eax - adcl $0,%ebx - movl %ecx,52(%edi) - - - xorl %ecx,%ecx - - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - adcl $0,%ecx - movl %ebp,56(%edi) - - movl %ebx,60(%edi) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.size bn_sqr_comba8,.-.L_bn_sqr_comba8_begin -.globl bn_sqr_comba4 -.hidden bn_sqr_comba4 -.type bn_sqr_comba4,@function -.align 16 -bn_sqr_comba4: -.L_bn_sqr_comba4_begin: - pushl %esi - pushl %edi - pushl %ebp - pushl %ebx - movl 20(%esp),%edi - movl 24(%esp),%esi - xorl %ebx,%ebx - xorl %ecx,%ecx - movl (%esi),%eax - - xorl %ebp,%ebp - - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl (%esi),%edx - adcl $0,%ebp - movl %ebx,(%edi) - movl 4(%esi),%eax - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - movl %ecx,4(%edi) - movl (%esi),%edx - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 4(%esi),%eax - adcl $0,%ecx - - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - movl (%esi),%edx - adcl $0,%ecx - movl %ebp,8(%edi) - movl 12(%esi),%eax - - - xorl %ebp,%ebp - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 8(%esi),%eax - adcl $0,%ebp - movl 4(%esi),%edx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 12(%esi),%eax - adcl $0,%ebp - movl %ebx,12(%edi) - movl 4(%esi),%edx - - - xorl %ebx,%ebx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - - mull %eax - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%edx - adcl $0,%ebx - movl %ecx,16(%edi) - movl 12(%esi),%eax - - - xorl %ecx,%ecx - - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 12(%esi),%eax - adcl $0,%ecx - movl %ebp,20(%edi) - - - xorl %ebp,%ebp - - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - adcl $0,%ebp - movl %ebx,24(%edi) - - movl %ecx,28(%edi) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.size bn_sqr_comba4,.-.L_bn_sqr_comba4_begin -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S deleted file mode 100644 index 3e5f2d7e54..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S +++ /dev/null @@ -1,294 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl gcm_gmult_ssse3 -.hidden gcm_gmult_ssse3 -.type gcm_gmult_ssse3,@function -.align 16 -gcm_gmult_ssse3: -.L_gcm_gmult_ssse3_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%edi - movl 24(%esp),%esi - movdqu (%edi),%xmm0 - call .L000pic_point -.L000pic_point: - popl %eax - movdqa .Lreverse_bytes-.L000pic_point(%eax),%xmm7 - movdqa .Llow4_mask-.L000pic_point(%eax),%xmm2 -.byte 102,15,56,0,199 - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - movl $5,%eax -.L001loop_row_1: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz .L001loop_row_1 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $5,%eax -.L002loop_row_2: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz .L002loop_row_2 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $6,%eax -.L003loop_row_3: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz .L003loop_row_3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,0,215 - movdqu %xmm2,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size gcm_gmult_ssse3,.-.L_gcm_gmult_ssse3_begin -.globl gcm_ghash_ssse3 -.hidden gcm_ghash_ssse3 -.type gcm_ghash_ssse3,@function -.align 16 -gcm_ghash_ssse3: -.L_gcm_ghash_ssse3_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%edi - movl 24(%esp),%esi - movl 28(%esp),%edx - movl 32(%esp),%ecx - movdqu (%edi),%xmm0 - call .L004pic_point -.L004pic_point: - popl %ebx - movdqa .Lreverse_bytes-.L004pic_point(%ebx),%xmm7 - andl $-16,%ecx -.byte 102,15,56,0,199 - pxor %xmm3,%xmm3 -.L005loop_ghash: - movdqa .Llow4_mask-.L004pic_point(%ebx),%xmm2 - movdqu (%edx),%xmm1 -.byte 102,15,56,0,207 - pxor %xmm1,%xmm0 - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - pxor %xmm2,%xmm2 - movl $5,%eax -.L006loop_row_4: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz .L006loop_row_4 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $5,%eax -.L007loop_row_5: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz .L007loop_row_5 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $6,%eax -.L008loop_row_6: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz .L008loop_row_6 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movdqa %xmm2,%xmm0 - leal -256(%esi),%esi - leal 16(%edx),%edx - subl $16,%ecx - jnz .L005loop_ghash -.byte 102,15,56,0,199 - movdqu %xmm0,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size gcm_ghash_ssse3,.-.L_gcm_ghash_ssse3_begin -.align 16 -.Lreverse_bytes: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.align 16 -.Llow4_mask: -.long 252645135,252645135,252645135,252645135 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/ghash-x86.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/ghash-x86.S deleted file mode 100644 index 7016235c0a..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/ghash-x86.S +++ /dev/null @@ -1,1075 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl gcm_gmult_4bit_mmx -.hidden gcm_gmult_4bit_mmx -.type gcm_gmult_4bit_mmx,@function -.align 16 -gcm_gmult_4bit_mmx: -.L_gcm_gmult_4bit_mmx_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%edi - movl 24(%esp),%esi - call .L000pic_point -.L000pic_point: - popl %eax - leal .Lrem_4bit-.L000pic_point(%eax),%eax - movzbl 15(%edi),%ebx - xorl %ecx,%ecx - movl %ebx,%edx - movb %dl,%cl - movl $14,%ebp - shlb $4,%cl - andl $240,%edx - movq 8(%esi,%ecx,1),%mm0 - movq (%esi,%ecx,1),%mm1 - movd %mm0,%ebx - jmp .L001mmx_loop -.align 16 -.L001mmx_loop: - psrlq $4,%mm0 - andl $15,%ebx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%edx,1),%mm0 - movb (%edi,%ebp,1),%cl - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - decl %ebp - movd %mm0,%ebx - pxor (%esi,%edx,1),%mm1 - movl %ecx,%edx - pxor %mm2,%mm0 - js .L002mmx_break - shlb $4,%cl - andl $15,%ebx - psrlq $4,%mm0 - andl $240,%edx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%ecx,1),%mm0 - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - movd %mm0,%ebx - pxor (%esi,%ecx,1),%mm1 - pxor %mm2,%mm0 - jmp .L001mmx_loop -.align 16 -.L002mmx_break: - shlb $4,%cl - andl $15,%ebx - psrlq $4,%mm0 - andl $240,%edx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%ecx,1),%mm0 - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - movd %mm0,%ebx - pxor (%esi,%ecx,1),%mm1 - pxor %mm2,%mm0 - psrlq $4,%mm0 - andl $15,%ebx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%edx,1),%mm0 - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - movd %mm0,%ebx - pxor (%esi,%edx,1),%mm1 - pxor %mm2,%mm0 - psrlq $32,%mm0 - movd %mm1,%edx - psrlq $32,%mm1 - movd %mm0,%ecx - movd %mm1,%ebp - bswap %ebx - bswap %edx - bswap %ecx - bswap %ebp - emms - movl %ebx,12(%edi) - movl %edx,4(%edi) - movl %ecx,8(%edi) - movl %ebp,(%edi) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size gcm_gmult_4bit_mmx,.-.L_gcm_gmult_4bit_mmx_begin -.globl gcm_ghash_4bit_mmx -.hidden gcm_ghash_4bit_mmx -.type gcm_ghash_4bit_mmx,@function -.align 16 -gcm_ghash_4bit_mmx: -.L_gcm_ghash_4bit_mmx_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%eax - movl 24(%esp),%ebx - movl 28(%esp),%ecx - movl 32(%esp),%edx - movl %esp,%ebp - call .L003pic_point -.L003pic_point: - popl %esi - leal .Lrem_8bit-.L003pic_point(%esi),%esi - subl $544,%esp - andl $-64,%esp - subl $16,%esp - addl %ecx,%edx - movl %eax,544(%esp) - movl %edx,552(%esp) - movl %ebp,556(%esp) - addl $128,%ebx - leal 144(%esp),%edi - leal 400(%esp),%ebp - movl -120(%ebx),%edx - movq -120(%ebx),%mm0 - movq -128(%ebx),%mm3 - shll $4,%edx - movb %dl,(%esp) - movl -104(%ebx),%edx - movq -104(%ebx),%mm2 - movq -112(%ebx),%mm5 - movq %mm0,-128(%edi) - psrlq $4,%mm0 - movq %mm3,(%edi) - movq %mm3,%mm7 - psrlq $4,%mm3 - shll $4,%edx - movb %dl,1(%esp) - movl -88(%ebx),%edx - movq -88(%ebx),%mm1 - psllq $60,%mm7 - movq -96(%ebx),%mm4 - por %mm7,%mm0 - movq %mm2,-120(%edi) - psrlq $4,%mm2 - movq %mm5,8(%edi) - movq %mm5,%mm6 - movq %mm0,-128(%ebp) - psrlq $4,%mm5 - movq %mm3,(%ebp) - shll $4,%edx - movb %dl,2(%esp) - movl -72(%ebx),%edx - movq -72(%ebx),%mm0 - psllq $60,%mm6 - movq -80(%ebx),%mm3 - por %mm6,%mm2 - movq %mm1,-112(%edi) - psrlq $4,%mm1 - movq %mm4,16(%edi) - movq %mm4,%mm7 - movq %mm2,-120(%ebp) - psrlq $4,%mm4 - movq %mm5,8(%ebp) - shll $4,%edx - movb %dl,3(%esp) - movl -56(%ebx),%edx - movq -56(%ebx),%mm2 - psllq $60,%mm7 - movq -64(%ebx),%mm5 - por %mm7,%mm1 - movq %mm0,-104(%edi) - psrlq $4,%mm0 - movq %mm3,24(%edi) - movq %mm3,%mm6 - movq %mm1,-112(%ebp) - psrlq $4,%mm3 - movq %mm4,16(%ebp) - shll $4,%edx - movb %dl,4(%esp) - movl -40(%ebx),%edx - movq -40(%ebx),%mm1 - psllq $60,%mm6 - movq -48(%ebx),%mm4 - por %mm6,%mm0 - movq %mm2,-96(%edi) - psrlq $4,%mm2 - movq %mm5,32(%edi) - movq %mm5,%mm7 - movq %mm0,-104(%ebp) - psrlq $4,%mm5 - movq %mm3,24(%ebp) - shll $4,%edx - movb %dl,5(%esp) - movl -24(%ebx),%edx - movq -24(%ebx),%mm0 - psllq $60,%mm7 - movq -32(%ebx),%mm3 - por %mm7,%mm2 - movq %mm1,-88(%edi) - psrlq $4,%mm1 - movq %mm4,40(%edi) - movq %mm4,%mm6 - movq %mm2,-96(%ebp) - psrlq $4,%mm4 - movq %mm5,32(%ebp) - shll $4,%edx - movb %dl,6(%esp) - movl -8(%ebx),%edx - movq -8(%ebx),%mm2 - psllq $60,%mm6 - movq -16(%ebx),%mm5 - por %mm6,%mm1 - movq %mm0,-80(%edi) - psrlq $4,%mm0 - movq %mm3,48(%edi) - movq %mm3,%mm7 - movq %mm1,-88(%ebp) - psrlq $4,%mm3 - movq %mm4,40(%ebp) - shll $4,%edx - movb %dl,7(%esp) - movl 8(%ebx),%edx - movq 8(%ebx),%mm1 - psllq $60,%mm7 - movq (%ebx),%mm4 - por %mm7,%mm0 - movq %mm2,-72(%edi) - psrlq $4,%mm2 - movq %mm5,56(%edi) - movq %mm5,%mm6 - movq %mm0,-80(%ebp) - psrlq $4,%mm5 - movq %mm3,48(%ebp) - shll $4,%edx - movb %dl,8(%esp) - movl 24(%ebx),%edx - movq 24(%ebx),%mm0 - psllq $60,%mm6 - movq 16(%ebx),%mm3 - por %mm6,%mm2 - movq %mm1,-64(%edi) - psrlq $4,%mm1 - movq %mm4,64(%edi) - movq %mm4,%mm7 - movq %mm2,-72(%ebp) - psrlq $4,%mm4 - movq %mm5,56(%ebp) - shll $4,%edx - movb %dl,9(%esp) - movl 40(%ebx),%edx - movq 40(%ebx),%mm2 - psllq $60,%mm7 - movq 32(%ebx),%mm5 - por %mm7,%mm1 - movq %mm0,-56(%edi) - psrlq $4,%mm0 - movq %mm3,72(%edi) - movq %mm3,%mm6 - movq %mm1,-64(%ebp) - psrlq $4,%mm3 - movq %mm4,64(%ebp) - shll $4,%edx - movb %dl,10(%esp) - movl 56(%ebx),%edx - movq 56(%ebx),%mm1 - psllq $60,%mm6 - movq 48(%ebx),%mm4 - por %mm6,%mm0 - movq %mm2,-48(%edi) - psrlq $4,%mm2 - movq %mm5,80(%edi) - movq %mm5,%mm7 - movq %mm0,-56(%ebp) - psrlq $4,%mm5 - movq %mm3,72(%ebp) - shll $4,%edx - movb %dl,11(%esp) - movl 72(%ebx),%edx - movq 72(%ebx),%mm0 - psllq $60,%mm7 - movq 64(%ebx),%mm3 - por %mm7,%mm2 - movq %mm1,-40(%edi) - psrlq $4,%mm1 - movq %mm4,88(%edi) - movq %mm4,%mm6 - movq %mm2,-48(%ebp) - psrlq $4,%mm4 - movq %mm5,80(%ebp) - shll $4,%edx - movb %dl,12(%esp) - movl 88(%ebx),%edx - movq 88(%ebx),%mm2 - psllq $60,%mm6 - movq 80(%ebx),%mm5 - por %mm6,%mm1 - movq %mm0,-32(%edi) - psrlq $4,%mm0 - movq %mm3,96(%edi) - movq %mm3,%mm7 - movq %mm1,-40(%ebp) - psrlq $4,%mm3 - movq %mm4,88(%ebp) - shll $4,%edx - movb %dl,13(%esp) - movl 104(%ebx),%edx - movq 104(%ebx),%mm1 - psllq $60,%mm7 - movq 96(%ebx),%mm4 - por %mm7,%mm0 - movq %mm2,-24(%edi) - psrlq $4,%mm2 - movq %mm5,104(%edi) - movq %mm5,%mm6 - movq %mm0,-32(%ebp) - psrlq $4,%mm5 - movq %mm3,96(%ebp) - shll $4,%edx - movb %dl,14(%esp) - movl 120(%ebx),%edx - movq 120(%ebx),%mm0 - psllq $60,%mm6 - movq 112(%ebx),%mm3 - por %mm6,%mm2 - movq %mm1,-16(%edi) - psrlq $4,%mm1 - movq %mm4,112(%edi) - movq %mm4,%mm7 - movq %mm2,-24(%ebp) - psrlq $4,%mm4 - movq %mm5,104(%ebp) - shll $4,%edx - movb %dl,15(%esp) - psllq $60,%mm7 - por %mm7,%mm1 - movq %mm0,-8(%edi) - psrlq $4,%mm0 - movq %mm3,120(%edi) - movq %mm3,%mm6 - movq %mm1,-16(%ebp) - psrlq $4,%mm3 - movq %mm4,112(%ebp) - psllq $60,%mm6 - por %mm6,%mm0 - movq %mm0,-8(%ebp) - movq %mm3,120(%ebp) - movq (%eax),%mm6 - movl 8(%eax),%ebx - movl 12(%eax),%edx -.align 16 -.L004outer: - xorl 12(%ecx),%edx - xorl 8(%ecx),%ebx - pxor (%ecx),%mm6 - leal 16(%ecx),%ecx - movl %ebx,536(%esp) - movq %mm6,528(%esp) - movl %ecx,548(%esp) - xorl %eax,%eax - roll $8,%edx - movb %dl,%al - movl %eax,%ebp - andb $15,%al - shrl $4,%ebp - pxor %mm0,%mm0 - roll $8,%edx - pxor %mm1,%mm1 - pxor %mm2,%mm2 - movq 16(%esp,%eax,8),%mm7 - movq 144(%esp,%eax,8),%mm6 - movb %dl,%al - movd %mm7,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - shrl $4,%edi - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 536(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 532(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 528(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 524(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - pxor 144(%esp,%eax,8),%mm6 - xorb (%esp,%ebp,1),%bl - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - movzbl %bl,%ebx - pxor %mm2,%mm2 - psllq $4,%mm1 - movd %mm7,%ecx - psrlq $4,%mm7 - movq %mm6,%mm3 - psrlq $4,%mm6 - shll $4,%ecx - pxor 16(%esp,%edi,8),%mm7 - psllq $60,%mm3 - movzbl %cl,%ecx - pxor %mm3,%mm7 - pxor 144(%esp,%edi,8),%mm6 - pinsrw $2,(%esi,%ebx,2),%mm0 - pxor %mm1,%mm6 - movd %mm7,%edx - pinsrw $3,(%esi,%ecx,2),%mm2 - psllq $12,%mm0 - pxor %mm0,%mm6 - psrlq $32,%mm7 - pxor %mm2,%mm6 - movl 548(%esp),%ecx - movd %mm7,%ebx - movq %mm6,%mm3 - psllw $8,%mm6 - psrlw $8,%mm3 - por %mm3,%mm6 - bswap %edx - pshufw $27,%mm6,%mm6 - bswap %ebx - cmpl 552(%esp),%ecx - jne .L004outer - movl 544(%esp),%eax - movl %edx,12(%eax) - movl %ebx,8(%eax) - movq %mm6,(%eax) - movl 556(%esp),%esp - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size gcm_ghash_4bit_mmx,.-.L_gcm_ghash_4bit_mmx_begin -.globl gcm_init_clmul -.hidden gcm_init_clmul -.type gcm_init_clmul,@function -.align 16 -gcm_init_clmul: -.L_gcm_init_clmul_begin: - movl 4(%esp),%edx - movl 8(%esp),%eax - call .L005pic -.L005pic: - popl %ecx - leal .Lbswap-.L005pic(%ecx),%ecx - movdqu (%eax),%xmm2 - pshufd $78,%xmm2,%xmm2 - pshufd $255,%xmm2,%xmm4 - movdqa %xmm2,%xmm3 - psllq $1,%xmm2 - pxor %xmm5,%xmm5 - psrlq $63,%xmm3 - pcmpgtd %xmm4,%xmm5 - pslldq $8,%xmm3 - por %xmm3,%xmm2 - pand 16(%ecx),%xmm5 - pxor %xmm5,%xmm2 - movdqa %xmm2,%xmm0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pshufd $78,%xmm2,%xmm4 - pxor %xmm0,%xmm3 - pxor %xmm2,%xmm4 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - xorps %xmm0,%xmm3 - xorps %xmm1,%xmm3 - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - pshufd $78,%xmm2,%xmm3 - pshufd $78,%xmm0,%xmm4 - pxor %xmm2,%xmm3 - movdqu %xmm2,(%edx) - pxor %xmm0,%xmm4 - movdqu %xmm0,16(%edx) -.byte 102,15,58,15,227,8 - movdqu %xmm4,32(%edx) - ret -.size gcm_init_clmul,.-.L_gcm_init_clmul_begin -.globl gcm_gmult_clmul -.hidden gcm_gmult_clmul -.type gcm_gmult_clmul,@function -.align 16 -gcm_gmult_clmul: -.L_gcm_gmult_clmul_begin: - movl 4(%esp),%eax - movl 8(%esp),%edx - call .L006pic -.L006pic: - popl %ecx - leal .Lbswap-.L006pic(%ecx),%ecx - movdqu (%eax),%xmm0 - movdqa (%ecx),%xmm5 - movups (%edx),%xmm2 -.byte 102,15,56,0,197 - movups 32(%edx),%xmm4 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - xorps %xmm0,%xmm3 - xorps %xmm1,%xmm3 - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,197 - movdqu %xmm0,(%eax) - ret -.size gcm_gmult_clmul,.-.L_gcm_gmult_clmul_begin -.globl gcm_ghash_clmul -.hidden gcm_ghash_clmul -.type gcm_ghash_clmul,@function -.align 16 -gcm_ghash_clmul: -.L_gcm_ghash_clmul_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%eax - movl 24(%esp),%edx - movl 28(%esp),%esi - movl 32(%esp),%ebx - call .L007pic -.L007pic: - popl %ecx - leal .Lbswap-.L007pic(%ecx),%ecx - movdqu (%eax),%xmm0 - movdqa (%ecx),%xmm5 - movdqu (%edx),%xmm2 -.byte 102,15,56,0,197 - subl $16,%ebx - jz .L008odd_tail - movdqu (%esi),%xmm3 - movdqu 16(%esi),%xmm6 -.byte 102,15,56,0,221 -.byte 102,15,56,0,245 - movdqu 32(%edx),%xmm5 - pxor %xmm3,%xmm0 - pshufd $78,%xmm6,%xmm3 - movdqa %xmm6,%xmm7 - pxor %xmm6,%xmm3 - leal 32(%esi),%esi -.byte 102,15,58,68,242,0 -.byte 102,15,58,68,250,17 -.byte 102,15,58,68,221,0 - movups 16(%edx),%xmm2 - nop - subl $32,%ebx - jbe .L009even_tail - jmp .L010mod_loop -.align 32 -.L010mod_loop: - pshufd $78,%xmm0,%xmm4 - movdqa %xmm0,%xmm1 - pxor %xmm0,%xmm4 - nop -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,229,16 - movups (%edx),%xmm2 - xorps %xmm6,%xmm0 - movdqa (%ecx),%xmm5 - xorps %xmm7,%xmm1 - movdqu (%esi),%xmm7 - pxor %xmm0,%xmm3 - movdqu 16(%esi),%xmm6 - pxor %xmm1,%xmm3 -.byte 102,15,56,0,253 - pxor %xmm3,%xmm4 - movdqa %xmm4,%xmm3 - psrldq $8,%xmm4 - pslldq $8,%xmm3 - pxor %xmm4,%xmm1 - pxor %xmm3,%xmm0 -.byte 102,15,56,0,245 - pxor %xmm7,%xmm1 - movdqa %xmm6,%xmm7 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 -.byte 102,15,58,68,242,0 - movups 32(%edx),%xmm5 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - pshufd $78,%xmm7,%xmm3 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm7,%xmm3 - pxor %xmm4,%xmm1 -.byte 102,15,58,68,250,17 - movups 16(%edx),%xmm2 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.byte 102,15,58,68,221,0 - leal 32(%esi),%esi - subl $32,%ebx - ja .L010mod_loop -.L009even_tail: - pshufd $78,%xmm0,%xmm4 - movdqa %xmm0,%xmm1 - pxor %xmm0,%xmm4 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,229,16 - movdqa (%ecx),%xmm5 - xorps %xmm6,%xmm0 - xorps %xmm7,%xmm1 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - pxor %xmm3,%xmm4 - movdqa %xmm4,%xmm3 - psrldq $8,%xmm4 - pslldq $8,%xmm3 - pxor %xmm4,%xmm1 - pxor %xmm3,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - testl %ebx,%ebx - jnz .L011done - movups (%edx),%xmm2 -.L008odd_tail: - movdqu (%esi),%xmm3 -.byte 102,15,56,0,221 - pxor %xmm3,%xmm0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pshufd $78,%xmm2,%xmm4 - pxor %xmm0,%xmm3 - pxor %xmm2,%xmm4 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - xorps %xmm0,%xmm3 - xorps %xmm1,%xmm3 - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.L011done: -.byte 102,15,56,0,197 - movdqu %xmm0,(%eax) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin -.align 64 -.Lbswap: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 -.align 64 -.Lrem_8bit: -.value 0,450,900,582,1800,1738,1164,1358 -.value 3600,4050,3476,3158,2328,2266,2716,2910 -.value 7200,7650,8100,7782,6952,6890,6316,6510 -.value 4656,5106,4532,4214,5432,5370,5820,6014 -.value 14400,14722,15300,14854,16200,16010,15564,15630 -.value 13904,14226,13780,13334,12632,12442,13020,13086 -.value 9312,9634,10212,9766,9064,8874,8428,8494 -.value 10864,11186,10740,10294,11640,11450,12028,12094 -.value 28800,28994,29444,29382,30600,30282,29708,30158 -.value 32400,32594,32020,31958,31128,30810,31260,31710 -.value 27808,28002,28452,28390,27560,27242,26668,27118 -.value 25264,25458,24884,24822,26040,25722,26172,26622 -.value 18624,18690,19268,19078,20424,19978,19532,19854 -.value 18128,18194,17748,17558,16856,16410,16988,17310 -.value 21728,21794,22372,22182,21480,21034,20588,20910 -.value 23280,23346,22900,22710,24056,23610,24188,24510 -.value 57600,57538,57988,58182,58888,59338,58764,58446 -.value 61200,61138,60564,60758,59416,59866,60316,59998 -.value 64800,64738,65188,65382,64040,64490,63916,63598 -.value 62256,62194,61620,61814,62520,62970,63420,63102 -.value 55616,55426,56004,56070,56904,57226,56780,56334 -.value 55120,54930,54484,54550,53336,53658,54236,53790 -.value 50528,50338,50916,50982,49768,50090,49644,49198 -.value 52080,51890,51444,51510,52344,52666,53244,52798 -.value 37248,36930,37380,37830,38536,38730,38156,38094 -.value 40848,40530,39956,40406,39064,39258,39708,39646 -.value 36256,35938,36388,36838,35496,35690,35116,35054 -.value 33712,33394,32820,33270,33976,34170,34620,34558 -.value 43456,43010,43588,43910,44744,44810,44364,44174 -.value 42960,42514,42068,42390,41176,41242,41820,41630 -.value 46560,46114,46692,47014,45800,45866,45420,45230 -.value 48112,47666,47220,47542,48376,48442,49020,48830 -.align 64 -.Lrem_4bit: -.long 0,0,0,471859200,0,943718400,0,610271232 -.long 0,1887436800,0,1822425088,0,1220542464,0,1423966208 -.long 0,3774873600,0,4246732800,0,3644850176,0,3311403008 -.long 0,2441084928,0,2376073216,0,2847932416,0,3051356160 -.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 -.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 -.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 -.byte 0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/md5-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/md5-586.S deleted file mode 100644 index 6de8ff886a..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/md5-586.S +++ /dev/null @@ -1,688 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl md5_block_asm_data_order -.hidden md5_block_asm_data_order -.type md5_block_asm_data_order,@function -.align 16 -md5_block_asm_data_order: -.L_md5_block_asm_data_order_begin: - pushl %esi - pushl %edi - movl 12(%esp),%edi - movl 16(%esp),%esi - movl 20(%esp),%ecx - pushl %ebp - shll $6,%ecx - pushl %ebx - addl %esi,%ecx - subl $64,%ecx - movl (%edi),%eax - pushl %ecx - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx -.L000start: - - - movl %ecx,%edi - movl (%esi),%ebp - - xorl %edx,%edi - andl %ebx,%edi - leal 3614090360(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 4(%esi),%ebp - addl %ebx,%eax - - xorl %ecx,%edi - andl %eax,%edi - leal 3905402710(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 8(%esi),%ebp - addl %eax,%edx - - xorl %ebx,%edi - andl %edx,%edi - leal 606105819(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 12(%esi),%ebp - addl %edx,%ecx - - xorl %eax,%edi - andl %ecx,%edi - leal 3250441966(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 16(%esi),%ebp - addl %ecx,%ebx - - xorl %edx,%edi - andl %ebx,%edi - leal 4118548399(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 20(%esi),%ebp - addl %ebx,%eax - - xorl %ecx,%edi - andl %eax,%edi - leal 1200080426(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 24(%esi),%ebp - addl %eax,%edx - - xorl %ebx,%edi - andl %edx,%edi - leal 2821735955(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 28(%esi),%ebp - addl %edx,%ecx - - xorl %eax,%edi - andl %ecx,%edi - leal 4249261313(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 32(%esi),%ebp - addl %ecx,%ebx - - xorl %edx,%edi - andl %ebx,%edi - leal 1770035416(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 36(%esi),%ebp - addl %ebx,%eax - - xorl %ecx,%edi - andl %eax,%edi - leal 2336552879(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 40(%esi),%ebp - addl %eax,%edx - - xorl %ebx,%edi - andl %edx,%edi - leal 4294925233(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 44(%esi),%ebp - addl %edx,%ecx - - xorl %eax,%edi - andl %ecx,%edi - leal 2304563134(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 48(%esi),%ebp - addl %ecx,%ebx - - xorl %edx,%edi - andl %ebx,%edi - leal 1804603682(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 52(%esi),%ebp - addl %ebx,%eax - - xorl %ecx,%edi - andl %eax,%edi - leal 4254626195(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 56(%esi),%ebp - addl %eax,%edx - - xorl %ebx,%edi - andl %edx,%edi - leal 2792965006(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 60(%esi),%ebp - addl %edx,%ecx - - xorl %eax,%edi - andl %ecx,%edi - leal 1236535329(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 4(%esi),%ebp - addl %ecx,%ebx - - - - leal 4129170786(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 24(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - - leal 3225465664(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 44(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - - leal 643717713(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl (%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - - leal 3921069994(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 20(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - - leal 3593408605(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 40(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - - leal 38016083(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 60(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - - leal 3634488961(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl 16(%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - - leal 3889429448(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 36(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - - leal 568446438(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 56(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - - leal 3275163606(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 12(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - - leal 4107603335(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl 32(%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - - leal 1163531501(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 52(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - - leal 2850285829(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 8(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - - leal 4243563512(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 28(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - - leal 1735328473(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl 48(%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - - leal 2368359562(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 20(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - - - - xorl %edx,%edi - xorl %ebx,%edi - leal 4294588738(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl 32(%esi),%ebp - movl %ebx,%edi - - leal 2272392833(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 44(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - - xorl %ebx,%edi - xorl %edx,%edi - leal 1839030562(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 56(%esi),%ebp - movl %edx,%edi - - leal 4259657740(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl 4(%esi),%ebp - addl %edi,%ebx - movl %ecx,%edi - roll $23,%ebx - addl %ecx,%ebx - - xorl %edx,%edi - xorl %ebx,%edi - leal 2763975236(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl 16(%esi),%ebp - movl %ebx,%edi - - leal 1272893353(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 28(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - - xorl %ebx,%edi - xorl %edx,%edi - leal 4139469664(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 40(%esi),%ebp - movl %edx,%edi - - leal 3200236656(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl 52(%esi),%ebp - addl %edi,%ebx - movl %ecx,%edi - roll $23,%ebx - addl %ecx,%ebx - - xorl %edx,%edi - xorl %ebx,%edi - leal 681279174(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl (%esi),%ebp - movl %ebx,%edi - - leal 3936430074(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 12(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - - xorl %ebx,%edi - xorl %edx,%edi - leal 3572445317(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 24(%esi),%ebp - movl %edx,%edi - - leal 76029189(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl 36(%esi),%ebp - addl %edi,%ebx - movl %ecx,%edi - roll $23,%ebx - addl %ecx,%ebx - - xorl %edx,%edi - xorl %ebx,%edi - leal 3654602809(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl 48(%esi),%ebp - movl %ebx,%edi - - leal 3873151461(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 60(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - - xorl %ebx,%edi - xorl %edx,%edi - leal 530742520(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 8(%esi),%ebp - movl %edx,%edi - - leal 3299628645(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl (%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $23,%ebx - addl %ecx,%ebx - - - - xorl %edx,%edi - orl %ebx,%edi - leal 4096336452(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 28(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - - orl %eax,%edi - leal 1126891415(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 56(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - - orl %edx,%edi - leal 2878612391(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 20(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - - orl %ecx,%edi - leal 4237533241(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 48(%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $21,%ebx - xorl %edx,%edi - addl %ecx,%ebx - - orl %ebx,%edi - leal 1700485571(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 12(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - - orl %eax,%edi - leal 2399980690(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 40(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - - orl %edx,%edi - leal 4293915773(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 4(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - - orl %ecx,%edi - leal 2240044497(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 32(%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $21,%ebx - xorl %edx,%edi - addl %ecx,%ebx - - orl %ebx,%edi - leal 1873313359(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 60(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - - orl %eax,%edi - leal 4264355552(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 24(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - - orl %edx,%edi - leal 2734768916(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 52(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - - orl %ecx,%edi - leal 1309151649(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 16(%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $21,%ebx - xorl %edx,%edi - addl %ecx,%ebx - - orl %ebx,%edi - leal 4149444226(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 44(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - - orl %eax,%edi - leal 3174756917(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 8(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - - orl %edx,%edi - leal 718787259(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 36(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - - orl %ecx,%edi - leal 3951481745(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 24(%esp),%ebp - addl %edi,%ebx - addl $64,%esi - roll $21,%ebx - movl (%ebp),%edi - addl %ecx,%ebx - addl %edi,%eax - movl 4(%ebp),%edi - addl %edi,%ebx - movl 8(%ebp),%edi - addl %edi,%ecx - movl 12(%ebp),%edi - addl %edi,%edx - movl %eax,(%ebp) - movl %ebx,4(%ebp) - movl (%esp),%edi - movl %ecx,8(%ebp) - movl %edx,12(%ebp) - cmpl %esi,%edi - jae .L000start - popl %eax - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha1-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha1-586.S deleted file mode 100644 index 4449e38f72..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha1-586.S +++ /dev/null @@ -1,3808 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl sha1_block_data_order -.hidden sha1_block_data_order -.type sha1_block_data_order,@function -.align 16 -sha1_block_data_order: -.L_sha1_block_data_order_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - call .L000pic_point -.L000pic_point: - popl %ebp - leal OPENSSL_ia32cap_P-.L000pic_point(%ebp),%esi - leal .LK_XX_XX-.L000pic_point(%ebp),%ebp - movl (%esi),%eax - movl 4(%esi),%edx - testl $512,%edx - jz .L001x86 - movl 8(%esi),%ecx - testl $16777216,%eax - jz .L001x86 - andl $268435456,%edx - andl $1073741824,%eax - orl %edx,%eax - cmpl $1342177280,%eax - je .Lavx_shortcut - jmp .Lssse3_shortcut -.align 16 -.L001x86: - movl 20(%esp),%ebp - movl 24(%esp),%esi - movl 28(%esp),%eax - subl $76,%esp - shll $6,%eax - addl %esi,%eax - movl %eax,104(%esp) - movl 16(%ebp),%edi - jmp .L002loop -.align 16 -.L002loop: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,(%esp) - movl %ebx,4(%esp) - movl %ecx,8(%esp) - movl %edx,12(%esp) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,16(%esp) - movl %ebx,20(%esp) - movl %ecx,24(%esp) - movl %edx,28(%esp) - movl 32(%esi),%eax - movl 36(%esi),%ebx - movl 40(%esi),%ecx - movl 44(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,40(%esp) - movl %edx,44(%esp) - movl 48(%esi),%eax - movl 52(%esi),%ebx - movl 56(%esi),%ecx - movl 60(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,48(%esp) - movl %ebx,52(%esp) - movl %ecx,56(%esp) - movl %edx,60(%esp) - movl %esi,100(%esp) - movl (%ebp),%eax - movl 4(%ebp),%ebx - movl 8(%ebp),%ecx - movl 12(%ebp),%edx - - movl %ecx,%esi - movl %eax,%ebp - roll $5,%ebp - xorl %edx,%esi - addl %edi,%ebp - movl (%esp),%edi - andl %ebx,%esi - rorl $2,%ebx - xorl %edx,%esi - leal 1518500249(%ebp,%edi,1),%ebp - addl %esi,%ebp - - movl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - xorl %ecx,%edi - addl %edx,%ebp - movl 4(%esp),%edx - andl %eax,%edi - rorl $2,%eax - xorl %ecx,%edi - leal 1518500249(%ebp,%edx,1),%ebp - addl %edi,%ebp - - movl %eax,%edx - movl %ebp,%edi - roll $5,%ebp - xorl %ebx,%edx - addl %ecx,%ebp - movl 8(%esp),%ecx - andl %esi,%edx - rorl $2,%esi - xorl %ebx,%edx - leal 1518500249(%ebp,%ecx,1),%ebp - addl %edx,%ebp - - movl %esi,%ecx - movl %ebp,%edx - roll $5,%ebp - xorl %eax,%ecx - addl %ebx,%ebp - movl 12(%esp),%ebx - andl %edi,%ecx - rorl $2,%edi - xorl %eax,%ecx - leal 1518500249(%ebp,%ebx,1),%ebp - addl %ecx,%ebp - - movl %edi,%ebx - movl %ebp,%ecx - roll $5,%ebp - xorl %esi,%ebx - addl %eax,%ebp - movl 16(%esp),%eax - andl %edx,%ebx - rorl $2,%edx - xorl %esi,%ebx - leal 1518500249(%ebp,%eax,1),%ebp - addl %ebx,%ebp - - movl %edx,%eax - movl %ebp,%ebx - roll $5,%ebp - xorl %edi,%eax - addl %esi,%ebp - movl 20(%esp),%esi - andl %ecx,%eax - rorl $2,%ecx - xorl %edi,%eax - leal 1518500249(%ebp,%esi,1),%ebp - addl %eax,%ebp - - movl %ecx,%esi - movl %ebp,%eax - roll $5,%ebp - xorl %edx,%esi - addl %edi,%ebp - movl 24(%esp),%edi - andl %ebx,%esi - rorl $2,%ebx - xorl %edx,%esi - leal 1518500249(%ebp,%edi,1),%ebp - addl %esi,%ebp - - movl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - xorl %ecx,%edi - addl %edx,%ebp - movl 28(%esp),%edx - andl %eax,%edi - rorl $2,%eax - xorl %ecx,%edi - leal 1518500249(%ebp,%edx,1),%ebp - addl %edi,%ebp - - movl %eax,%edx - movl %ebp,%edi - roll $5,%ebp - xorl %ebx,%edx - addl %ecx,%ebp - movl 32(%esp),%ecx - andl %esi,%edx - rorl $2,%esi - xorl %ebx,%edx - leal 1518500249(%ebp,%ecx,1),%ebp - addl %edx,%ebp - - movl %esi,%ecx - movl %ebp,%edx - roll $5,%ebp - xorl %eax,%ecx - addl %ebx,%ebp - movl 36(%esp),%ebx - andl %edi,%ecx - rorl $2,%edi - xorl %eax,%ecx - leal 1518500249(%ebp,%ebx,1),%ebp - addl %ecx,%ebp - - movl %edi,%ebx - movl %ebp,%ecx - roll $5,%ebp - xorl %esi,%ebx - addl %eax,%ebp - movl 40(%esp),%eax - andl %edx,%ebx - rorl $2,%edx - xorl %esi,%ebx - leal 1518500249(%ebp,%eax,1),%ebp - addl %ebx,%ebp - - movl %edx,%eax - movl %ebp,%ebx - roll $5,%ebp - xorl %edi,%eax - addl %esi,%ebp - movl 44(%esp),%esi - andl %ecx,%eax - rorl $2,%ecx - xorl %edi,%eax - leal 1518500249(%ebp,%esi,1),%ebp - addl %eax,%ebp - - movl %ecx,%esi - movl %ebp,%eax - roll $5,%ebp - xorl %edx,%esi - addl %edi,%ebp - movl 48(%esp),%edi - andl %ebx,%esi - rorl $2,%ebx - xorl %edx,%esi - leal 1518500249(%ebp,%edi,1),%ebp - addl %esi,%ebp - - movl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - xorl %ecx,%edi - addl %edx,%ebp - movl 52(%esp),%edx - andl %eax,%edi - rorl $2,%eax - xorl %ecx,%edi - leal 1518500249(%ebp,%edx,1),%ebp - addl %edi,%ebp - - movl %eax,%edx - movl %ebp,%edi - roll $5,%ebp - xorl %ebx,%edx - addl %ecx,%ebp - movl 56(%esp),%ecx - andl %esi,%edx - rorl $2,%esi - xorl %ebx,%edx - leal 1518500249(%ebp,%ecx,1),%ebp - addl %edx,%ebp - - movl %esi,%ecx - movl %ebp,%edx - roll $5,%ebp - xorl %eax,%ecx - addl %ebx,%ebp - movl 60(%esp),%ebx - andl %edi,%ecx - rorl $2,%edi - xorl %eax,%ecx - leal 1518500249(%ebp,%ebx,1),%ebp - movl (%esp),%ebx - addl %ebp,%ecx - - movl %edi,%ebp - xorl 8(%esp),%ebx - xorl %esi,%ebp - xorl 32(%esp),%ebx - andl %edx,%ebp - xorl 52(%esp),%ebx - roll $1,%ebx - xorl %esi,%ebp - addl %ebp,%eax - movl %ecx,%ebp - rorl $2,%edx - movl %ebx,(%esp) - roll $5,%ebp - leal 1518500249(%ebx,%eax,1),%ebx - movl 4(%esp),%eax - addl %ebp,%ebx - - movl %edx,%ebp - xorl 12(%esp),%eax - xorl %edi,%ebp - xorl 36(%esp),%eax - andl %ecx,%ebp - xorl 56(%esp),%eax - roll $1,%eax - xorl %edi,%ebp - addl %ebp,%esi - movl %ebx,%ebp - rorl $2,%ecx - movl %eax,4(%esp) - roll $5,%ebp - leal 1518500249(%eax,%esi,1),%eax - movl 8(%esp),%esi - addl %ebp,%eax - - movl %ecx,%ebp - xorl 16(%esp),%esi - xorl %edx,%ebp - xorl 40(%esp),%esi - andl %ebx,%ebp - xorl 60(%esp),%esi - roll $1,%esi - xorl %edx,%ebp - addl %ebp,%edi - movl %eax,%ebp - rorl $2,%ebx - movl %esi,8(%esp) - roll $5,%ebp - leal 1518500249(%esi,%edi,1),%esi - movl 12(%esp),%edi - addl %ebp,%esi - - movl %ebx,%ebp - xorl 20(%esp),%edi - xorl %ecx,%ebp - xorl 44(%esp),%edi - andl %eax,%ebp - xorl (%esp),%edi - roll $1,%edi - xorl %ecx,%ebp - addl %ebp,%edx - movl %esi,%ebp - rorl $2,%eax - movl %edi,12(%esp) - roll $5,%ebp - leal 1518500249(%edi,%edx,1),%edi - movl 16(%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl 24(%esp),%edx - xorl %eax,%ebp - xorl 48(%esp),%edx - xorl %ebx,%ebp - xorl 4(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,16(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 20(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 28(%esp),%ecx - xorl %esi,%ebp - xorl 52(%esp),%ecx - xorl %eax,%ebp - xorl 8(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,20(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 24(%esp),%ebx - addl %ebp,%ecx - - movl %edx,%ebp - xorl 32(%esp),%ebx - xorl %edi,%ebp - xorl 56(%esp),%ebx - xorl %esi,%ebp - xorl 12(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,24(%esp) - leal 1859775393(%ebx,%eax,1),%ebx - movl 28(%esp),%eax - addl %ebp,%ebx - - movl %ecx,%ebp - xorl 36(%esp),%eax - xorl %edx,%ebp - xorl 60(%esp),%eax - xorl %edi,%ebp - xorl 16(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,28(%esp) - leal 1859775393(%eax,%esi,1),%eax - movl 32(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl 40(%esp),%esi - xorl %ecx,%ebp - xorl (%esp),%esi - xorl %edx,%ebp - xorl 20(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,32(%esp) - leal 1859775393(%esi,%edi,1),%esi - movl 36(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 44(%esp),%edi - xorl %ebx,%ebp - xorl 4(%esp),%edi - xorl %ecx,%ebp - xorl 24(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,36(%esp) - leal 1859775393(%edi,%edx,1),%edi - movl 40(%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl 48(%esp),%edx - xorl %eax,%ebp - xorl 8(%esp),%edx - xorl %ebx,%ebp - xorl 28(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,40(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 44(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 52(%esp),%ecx - xorl %esi,%ebp - xorl 12(%esp),%ecx - xorl %eax,%ebp - xorl 32(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,44(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 48(%esp),%ebx - addl %ebp,%ecx - - movl %edx,%ebp - xorl 56(%esp),%ebx - xorl %edi,%ebp - xorl 16(%esp),%ebx - xorl %esi,%ebp - xorl 36(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,48(%esp) - leal 1859775393(%ebx,%eax,1),%ebx - movl 52(%esp),%eax - addl %ebp,%ebx - - movl %ecx,%ebp - xorl 60(%esp),%eax - xorl %edx,%ebp - xorl 20(%esp),%eax - xorl %edi,%ebp - xorl 40(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,52(%esp) - leal 1859775393(%eax,%esi,1),%eax - movl 56(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl (%esp),%esi - xorl %ecx,%ebp - xorl 24(%esp),%esi - xorl %edx,%ebp - xorl 44(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,56(%esp) - leal 1859775393(%esi,%edi,1),%esi - movl 60(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 4(%esp),%edi - xorl %ebx,%ebp - xorl 28(%esp),%edi - xorl %ecx,%ebp - xorl 48(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,60(%esp) - leal 1859775393(%edi,%edx,1),%edi - movl (%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl 8(%esp),%edx - xorl %eax,%ebp - xorl 32(%esp),%edx - xorl %ebx,%ebp - xorl 52(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 4(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 12(%esp),%ecx - xorl %esi,%ebp - xorl 36(%esp),%ecx - xorl %eax,%ebp - xorl 56(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,4(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 8(%esp),%ebx - addl %ebp,%ecx - - movl %edx,%ebp - xorl 16(%esp),%ebx - xorl %edi,%ebp - xorl 40(%esp),%ebx - xorl %esi,%ebp - xorl 60(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,8(%esp) - leal 1859775393(%ebx,%eax,1),%ebx - movl 12(%esp),%eax - addl %ebp,%ebx - - movl %ecx,%ebp - xorl 20(%esp),%eax - xorl %edx,%ebp - xorl 44(%esp),%eax - xorl %edi,%ebp - xorl (%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,12(%esp) - leal 1859775393(%eax,%esi,1),%eax - movl 16(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl 24(%esp),%esi - xorl %ecx,%ebp - xorl 48(%esp),%esi - xorl %edx,%ebp - xorl 4(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,16(%esp) - leal 1859775393(%esi,%edi,1),%esi - movl 20(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 28(%esp),%edi - xorl %ebx,%ebp - xorl 52(%esp),%edi - xorl %ecx,%ebp - xorl 8(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,20(%esp) - leal 1859775393(%edi,%edx,1),%edi - movl 24(%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl 32(%esp),%edx - xorl %eax,%ebp - xorl 56(%esp),%edx - xorl %ebx,%ebp - xorl 12(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,24(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 28(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 36(%esp),%ecx - xorl %esi,%ebp - xorl 60(%esp),%ecx - xorl %eax,%ebp - xorl 16(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,28(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 32(%esp),%ebx - addl %ebp,%ecx - - movl %edi,%ebp - xorl 40(%esp),%ebx - xorl %esi,%ebp - xorl (%esp),%ebx - andl %edx,%ebp - xorl 20(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,32(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 36(%esp),%eax - addl %ebp,%ebx - - movl %edx,%ebp - xorl 44(%esp),%eax - xorl %edi,%ebp - xorl 4(%esp),%eax - andl %ecx,%ebp - xorl 24(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,36(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl 40(%esp),%esi - addl %ebp,%eax - - movl %ecx,%ebp - xorl 48(%esp),%esi - xorl %edx,%ebp - xorl 8(%esp),%esi - andl %ebx,%ebp - xorl 28(%esp),%esi - roll $1,%esi - addl %edi,%ebp - rorl $2,%ebx - movl %eax,%edi - roll $5,%edi - movl %esi,40(%esp) - leal 2400959708(%esi,%ebp,1),%esi - movl %ecx,%ebp - addl %edi,%esi - andl %edx,%ebp - movl 44(%esp),%edi - addl %ebp,%esi - - movl %ebx,%ebp - xorl 52(%esp),%edi - xorl %ecx,%ebp - xorl 12(%esp),%edi - andl %eax,%ebp - xorl 32(%esp),%edi - roll $1,%edi - addl %edx,%ebp - rorl $2,%eax - movl %esi,%edx - roll $5,%edx - movl %edi,44(%esp) - leal 2400959708(%edi,%ebp,1),%edi - movl %ebx,%ebp - addl %edx,%edi - andl %ecx,%ebp - movl 48(%esp),%edx - addl %ebp,%edi - - movl %eax,%ebp - xorl 56(%esp),%edx - xorl %ebx,%ebp - xorl 16(%esp),%edx - andl %esi,%ebp - xorl 36(%esp),%edx - roll $1,%edx - addl %ecx,%ebp - rorl $2,%esi - movl %edi,%ecx - roll $5,%ecx - movl %edx,48(%esp) - leal 2400959708(%edx,%ebp,1),%edx - movl %eax,%ebp - addl %ecx,%edx - andl %ebx,%ebp - movl 52(%esp),%ecx - addl %ebp,%edx - - movl %esi,%ebp - xorl 60(%esp),%ecx - xorl %eax,%ebp - xorl 20(%esp),%ecx - andl %edi,%ebp - xorl 40(%esp),%ecx - roll $1,%ecx - addl %ebx,%ebp - rorl $2,%edi - movl %edx,%ebx - roll $5,%ebx - movl %ecx,52(%esp) - leal 2400959708(%ecx,%ebp,1),%ecx - movl %esi,%ebp - addl %ebx,%ecx - andl %eax,%ebp - movl 56(%esp),%ebx - addl %ebp,%ecx - - movl %edi,%ebp - xorl (%esp),%ebx - xorl %esi,%ebp - xorl 24(%esp),%ebx - andl %edx,%ebp - xorl 44(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,56(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 60(%esp),%eax - addl %ebp,%ebx - - movl %edx,%ebp - xorl 4(%esp),%eax - xorl %edi,%ebp - xorl 28(%esp),%eax - andl %ecx,%ebp - xorl 48(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,60(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl (%esp),%esi - addl %ebp,%eax - - movl %ecx,%ebp - xorl 8(%esp),%esi - xorl %edx,%ebp - xorl 32(%esp),%esi - andl %ebx,%ebp - xorl 52(%esp),%esi - roll $1,%esi - addl %edi,%ebp - rorl $2,%ebx - movl %eax,%edi - roll $5,%edi - movl %esi,(%esp) - leal 2400959708(%esi,%ebp,1),%esi - movl %ecx,%ebp - addl %edi,%esi - andl %edx,%ebp - movl 4(%esp),%edi - addl %ebp,%esi - - movl %ebx,%ebp - xorl 12(%esp),%edi - xorl %ecx,%ebp - xorl 36(%esp),%edi - andl %eax,%ebp - xorl 56(%esp),%edi - roll $1,%edi - addl %edx,%ebp - rorl $2,%eax - movl %esi,%edx - roll $5,%edx - movl %edi,4(%esp) - leal 2400959708(%edi,%ebp,1),%edi - movl %ebx,%ebp - addl %edx,%edi - andl %ecx,%ebp - movl 8(%esp),%edx - addl %ebp,%edi - - movl %eax,%ebp - xorl 16(%esp),%edx - xorl %ebx,%ebp - xorl 40(%esp),%edx - andl %esi,%ebp - xorl 60(%esp),%edx - roll $1,%edx - addl %ecx,%ebp - rorl $2,%esi - movl %edi,%ecx - roll $5,%ecx - movl %edx,8(%esp) - leal 2400959708(%edx,%ebp,1),%edx - movl %eax,%ebp - addl %ecx,%edx - andl %ebx,%ebp - movl 12(%esp),%ecx - addl %ebp,%edx - - movl %esi,%ebp - xorl 20(%esp),%ecx - xorl %eax,%ebp - xorl 44(%esp),%ecx - andl %edi,%ebp - xorl (%esp),%ecx - roll $1,%ecx - addl %ebx,%ebp - rorl $2,%edi - movl %edx,%ebx - roll $5,%ebx - movl %ecx,12(%esp) - leal 2400959708(%ecx,%ebp,1),%ecx - movl %esi,%ebp - addl %ebx,%ecx - andl %eax,%ebp - movl 16(%esp),%ebx - addl %ebp,%ecx - - movl %edi,%ebp - xorl 24(%esp),%ebx - xorl %esi,%ebp - xorl 48(%esp),%ebx - andl %edx,%ebp - xorl 4(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,16(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 20(%esp),%eax - addl %ebp,%ebx - - movl %edx,%ebp - xorl 28(%esp),%eax - xorl %edi,%ebp - xorl 52(%esp),%eax - andl %ecx,%ebp - xorl 8(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,20(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl 24(%esp),%esi - addl %ebp,%eax - - movl %ecx,%ebp - xorl 32(%esp),%esi - xorl %edx,%ebp - xorl 56(%esp),%esi - andl %ebx,%ebp - xorl 12(%esp),%esi - roll $1,%esi - addl %edi,%ebp - rorl $2,%ebx - movl %eax,%edi - roll $5,%edi - movl %esi,24(%esp) - leal 2400959708(%esi,%ebp,1),%esi - movl %ecx,%ebp - addl %edi,%esi - andl %edx,%ebp - movl 28(%esp),%edi - addl %ebp,%esi - - movl %ebx,%ebp - xorl 36(%esp),%edi - xorl %ecx,%ebp - xorl 60(%esp),%edi - andl %eax,%ebp - xorl 16(%esp),%edi - roll $1,%edi - addl %edx,%ebp - rorl $2,%eax - movl %esi,%edx - roll $5,%edx - movl %edi,28(%esp) - leal 2400959708(%edi,%ebp,1),%edi - movl %ebx,%ebp - addl %edx,%edi - andl %ecx,%ebp - movl 32(%esp),%edx - addl %ebp,%edi - - movl %eax,%ebp - xorl 40(%esp),%edx - xorl %ebx,%ebp - xorl (%esp),%edx - andl %esi,%ebp - xorl 20(%esp),%edx - roll $1,%edx - addl %ecx,%ebp - rorl $2,%esi - movl %edi,%ecx - roll $5,%ecx - movl %edx,32(%esp) - leal 2400959708(%edx,%ebp,1),%edx - movl %eax,%ebp - addl %ecx,%edx - andl %ebx,%ebp - movl 36(%esp),%ecx - addl %ebp,%edx - - movl %esi,%ebp - xorl 44(%esp),%ecx - xorl %eax,%ebp - xorl 4(%esp),%ecx - andl %edi,%ebp - xorl 24(%esp),%ecx - roll $1,%ecx - addl %ebx,%ebp - rorl $2,%edi - movl %edx,%ebx - roll $5,%ebx - movl %ecx,36(%esp) - leal 2400959708(%ecx,%ebp,1),%ecx - movl %esi,%ebp - addl %ebx,%ecx - andl %eax,%ebp - movl 40(%esp),%ebx - addl %ebp,%ecx - - movl %edi,%ebp - xorl 48(%esp),%ebx - xorl %esi,%ebp - xorl 8(%esp),%ebx - andl %edx,%ebp - xorl 28(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,40(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 44(%esp),%eax - addl %ebp,%ebx - - movl %edx,%ebp - xorl 52(%esp),%eax - xorl %edi,%ebp - xorl 12(%esp),%eax - andl %ecx,%ebp - xorl 32(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,44(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl 48(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl 56(%esp),%esi - xorl %ecx,%ebp - xorl 16(%esp),%esi - xorl %edx,%ebp - xorl 36(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,48(%esp) - leal 3395469782(%esi,%edi,1),%esi - movl 52(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 60(%esp),%edi - xorl %ebx,%ebp - xorl 20(%esp),%edi - xorl %ecx,%ebp - xorl 40(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,52(%esp) - leal 3395469782(%edi,%edx,1),%edi - movl 56(%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl (%esp),%edx - xorl %eax,%ebp - xorl 24(%esp),%edx - xorl %ebx,%ebp - xorl 44(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,56(%esp) - leal 3395469782(%edx,%ecx,1),%edx - movl 60(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 4(%esp),%ecx - xorl %esi,%ebp - xorl 28(%esp),%ecx - xorl %eax,%ebp - xorl 48(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,60(%esp) - leal 3395469782(%ecx,%ebx,1),%ecx - movl (%esp),%ebx - addl %ebp,%ecx - - movl %edx,%ebp - xorl 8(%esp),%ebx - xorl %edi,%ebp - xorl 32(%esp),%ebx - xorl %esi,%ebp - xorl 52(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,(%esp) - leal 3395469782(%ebx,%eax,1),%ebx - movl 4(%esp),%eax - addl %ebp,%ebx - - movl %ecx,%ebp - xorl 12(%esp),%eax - xorl %edx,%ebp - xorl 36(%esp),%eax - xorl %edi,%ebp - xorl 56(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,4(%esp) - leal 3395469782(%eax,%esi,1),%eax - movl 8(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl 16(%esp),%esi - xorl %ecx,%ebp - xorl 40(%esp),%esi - xorl %edx,%ebp - xorl 60(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,8(%esp) - leal 3395469782(%esi,%edi,1),%esi - movl 12(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 20(%esp),%edi - xorl %ebx,%ebp - xorl 44(%esp),%edi - xorl %ecx,%ebp - xorl (%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,12(%esp) - leal 3395469782(%edi,%edx,1),%edi - movl 16(%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl 24(%esp),%edx - xorl %eax,%ebp - xorl 48(%esp),%edx - xorl %ebx,%ebp - xorl 4(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,16(%esp) - leal 3395469782(%edx,%ecx,1),%edx - movl 20(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 28(%esp),%ecx - xorl %esi,%ebp - xorl 52(%esp),%ecx - xorl %eax,%ebp - xorl 8(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,20(%esp) - leal 3395469782(%ecx,%ebx,1),%ecx - movl 24(%esp),%ebx - addl %ebp,%ecx - - movl %edx,%ebp - xorl 32(%esp),%ebx - xorl %edi,%ebp - xorl 56(%esp),%ebx - xorl %esi,%ebp - xorl 12(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,24(%esp) - leal 3395469782(%ebx,%eax,1),%ebx - movl 28(%esp),%eax - addl %ebp,%ebx - - movl %ecx,%ebp - xorl 36(%esp),%eax - xorl %edx,%ebp - xorl 60(%esp),%eax - xorl %edi,%ebp - xorl 16(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,28(%esp) - leal 3395469782(%eax,%esi,1),%eax - movl 32(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl 40(%esp),%esi - xorl %ecx,%ebp - xorl (%esp),%esi - xorl %edx,%ebp - xorl 20(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,32(%esp) - leal 3395469782(%esi,%edi,1),%esi - movl 36(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 44(%esp),%edi - xorl %ebx,%ebp - xorl 4(%esp),%edi - xorl %ecx,%ebp - xorl 24(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,36(%esp) - leal 3395469782(%edi,%edx,1),%edi - movl 40(%esp),%edx - addl %ebp,%edi - - movl %esi,%ebp - xorl 48(%esp),%edx - xorl %eax,%ebp - xorl 8(%esp),%edx - xorl %ebx,%ebp - xorl 28(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,40(%esp) - leal 3395469782(%edx,%ecx,1),%edx - movl 44(%esp),%ecx - addl %ebp,%edx - - movl %edi,%ebp - xorl 52(%esp),%ecx - xorl %esi,%ebp - xorl 12(%esp),%ecx - xorl %eax,%ebp - xorl 32(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,44(%esp) - leal 3395469782(%ecx,%ebx,1),%ecx - movl 48(%esp),%ebx - addl %ebp,%ecx - - movl %edx,%ebp - xorl 56(%esp),%ebx - xorl %edi,%ebp - xorl 16(%esp),%ebx - xorl %esi,%ebp - xorl 36(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,48(%esp) - leal 3395469782(%ebx,%eax,1),%ebx - movl 52(%esp),%eax - addl %ebp,%ebx - - movl %ecx,%ebp - xorl 60(%esp),%eax - xorl %edx,%ebp - xorl 20(%esp),%eax - xorl %edi,%ebp - xorl 40(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - leal 3395469782(%eax,%esi,1),%eax - movl 56(%esp),%esi - addl %ebp,%eax - - movl %ebx,%ebp - xorl (%esp),%esi - xorl %ecx,%ebp - xorl 24(%esp),%esi - xorl %edx,%ebp - xorl 44(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - leal 3395469782(%esi,%edi,1),%esi - movl 60(%esp),%edi - addl %ebp,%esi - - movl %eax,%ebp - xorl 4(%esp),%edi - xorl %ebx,%ebp - xorl 28(%esp),%edi - xorl %ecx,%ebp - xorl 48(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - leal 3395469782(%edi,%edx,1),%edi - addl %ebp,%edi - movl 96(%esp),%ebp - movl 100(%esp),%edx - addl (%ebp),%edi - addl 4(%ebp),%esi - addl 8(%ebp),%eax - addl 12(%ebp),%ebx - addl 16(%ebp),%ecx - movl %edi,(%ebp) - addl $64,%edx - movl %esi,4(%ebp) - cmpl 104(%esp),%edx - movl %eax,8(%ebp) - movl %ecx,%edi - movl %ebx,12(%ebp) - movl %edx,%esi - movl %ecx,16(%ebp) - jb .L002loop - addl $76,%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size sha1_block_data_order,.-.L_sha1_block_data_order_begin -.hidden _sha1_block_data_order_ssse3 -.type _sha1_block_data_order_ssse3,@function -.align 16 -_sha1_block_data_order_ssse3: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - call .L003pic_point -.L003pic_point: - popl %ebp - leal .LK_XX_XX-.L003pic_point(%ebp),%ebp -.Lssse3_shortcut: - movdqa (%ebp),%xmm7 - movdqa 16(%ebp),%xmm0 - movdqa 32(%ebp),%xmm1 - movdqa 48(%ebp),%xmm2 - movdqa 64(%ebp),%xmm6 - movl 20(%esp),%edi - movl 24(%esp),%ebp - movl 28(%esp),%edx - movl %esp,%esi - subl $208,%esp - andl $-64,%esp - movdqa %xmm0,112(%esp) - movdqa %xmm1,128(%esp) - movdqa %xmm2,144(%esp) - shll $6,%edx - movdqa %xmm7,160(%esp) - addl %ebp,%edx - movdqa %xmm6,176(%esp) - addl $64,%ebp - movl %edi,192(%esp) - movl %ebp,196(%esp) - movl %edx,200(%esp) - movl %esi,204(%esp) - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl 16(%edi),%edi - movl %ebx,%esi - movdqu -64(%ebp),%xmm0 - movdqu -48(%ebp),%xmm1 - movdqu -32(%ebp),%xmm2 - movdqu -16(%ebp),%xmm3 -.byte 102,15,56,0,198 -.byte 102,15,56,0,206 -.byte 102,15,56,0,214 - movdqa %xmm7,96(%esp) -.byte 102,15,56,0,222 - paddd %xmm7,%xmm0 - paddd %xmm7,%xmm1 - paddd %xmm7,%xmm2 - movdqa %xmm0,(%esp) - psubd %xmm7,%xmm0 - movdqa %xmm1,16(%esp) - psubd %xmm7,%xmm1 - movdqa %xmm2,32(%esp) - movl %ecx,%ebp - psubd %xmm7,%xmm2 - xorl %edx,%ebp - pshufd $238,%xmm0,%xmm4 - andl %ebp,%esi - jmp .L004loop -.align 16 -.L004loop: - rorl $2,%ebx - xorl %edx,%esi - movl %eax,%ebp - punpcklqdq %xmm1,%xmm4 - movdqa %xmm3,%xmm6 - addl (%esp),%edi - xorl %ecx,%ebx - paddd %xmm3,%xmm7 - movdqa %xmm0,64(%esp) - roll $5,%eax - addl %esi,%edi - psrldq $4,%xmm6 - andl %ebx,%ebp - xorl %ecx,%ebx - pxor %xmm0,%xmm4 - addl %eax,%edi - rorl $7,%eax - pxor %xmm2,%xmm6 - xorl %ecx,%ebp - movl %edi,%esi - addl 4(%esp),%edx - pxor %xmm6,%xmm4 - xorl %ebx,%eax - roll $5,%edi - movdqa %xmm7,48(%esp) - addl %ebp,%edx - andl %eax,%esi - movdqa %xmm4,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - rorl $7,%edi - movdqa %xmm4,%xmm6 - xorl %ebx,%esi - pslldq $12,%xmm0 - paddd %xmm4,%xmm4 - movl %edx,%ebp - addl 8(%esp),%ecx - psrld $31,%xmm6 - xorl %eax,%edi - roll $5,%edx - movdqa %xmm0,%xmm7 - addl %esi,%ecx - andl %edi,%ebp - xorl %eax,%edi - psrld $30,%xmm0 - addl %edx,%ecx - rorl $7,%edx - por %xmm6,%xmm4 - xorl %eax,%ebp - movl %ecx,%esi - addl 12(%esp),%ebx - pslld $2,%xmm7 - xorl %edi,%edx - roll $5,%ecx - pxor %xmm0,%xmm4 - movdqa 96(%esp),%xmm0 - addl %ebp,%ebx - andl %edx,%esi - pxor %xmm7,%xmm4 - pshufd $238,%xmm1,%xmm5 - xorl %edi,%edx - addl %ecx,%ebx - rorl $7,%ecx - xorl %edi,%esi - movl %ebx,%ebp - punpcklqdq %xmm2,%xmm5 - movdqa %xmm4,%xmm7 - addl 16(%esp),%eax - xorl %edx,%ecx - paddd %xmm4,%xmm0 - movdqa %xmm1,80(%esp) - roll $5,%ebx - addl %esi,%eax - psrldq $4,%xmm7 - andl %ecx,%ebp - xorl %edx,%ecx - pxor %xmm1,%xmm5 - addl %ebx,%eax - rorl $7,%ebx - pxor %xmm3,%xmm7 - xorl %edx,%ebp - movl %eax,%esi - addl 20(%esp),%edi - pxor %xmm7,%xmm5 - xorl %ecx,%ebx - roll $5,%eax - movdqa %xmm0,(%esp) - addl %ebp,%edi - andl %ebx,%esi - movdqa %xmm5,%xmm1 - xorl %ecx,%ebx - addl %eax,%edi - rorl $7,%eax - movdqa %xmm5,%xmm7 - xorl %ecx,%esi - pslldq $12,%xmm1 - paddd %xmm5,%xmm5 - movl %edi,%ebp - addl 24(%esp),%edx - psrld $31,%xmm7 - xorl %ebx,%eax - roll $5,%edi - movdqa %xmm1,%xmm0 - addl %esi,%edx - andl %eax,%ebp - xorl %ebx,%eax - psrld $30,%xmm1 - addl %edi,%edx - rorl $7,%edi - por %xmm7,%xmm5 - xorl %ebx,%ebp - movl %edx,%esi - addl 28(%esp),%ecx - pslld $2,%xmm0 - xorl %eax,%edi - roll $5,%edx - pxor %xmm1,%xmm5 - movdqa 112(%esp),%xmm1 - addl %ebp,%ecx - andl %edi,%esi - pxor %xmm0,%xmm5 - pshufd $238,%xmm2,%xmm6 - xorl %eax,%edi - addl %edx,%ecx - rorl $7,%edx - xorl %eax,%esi - movl %ecx,%ebp - punpcklqdq %xmm3,%xmm6 - movdqa %xmm5,%xmm0 - addl 32(%esp),%ebx - xorl %edi,%edx - paddd %xmm5,%xmm1 - movdqa %xmm2,96(%esp) - roll $5,%ecx - addl %esi,%ebx - psrldq $4,%xmm0 - andl %edx,%ebp - xorl %edi,%edx - pxor %xmm2,%xmm6 - addl %ecx,%ebx - rorl $7,%ecx - pxor %xmm4,%xmm0 - xorl %edi,%ebp - movl %ebx,%esi - addl 36(%esp),%eax - pxor %xmm0,%xmm6 - xorl %edx,%ecx - roll $5,%ebx - movdqa %xmm1,16(%esp) - addl %ebp,%eax - andl %ecx,%esi - movdqa %xmm6,%xmm2 - xorl %edx,%ecx - addl %ebx,%eax - rorl $7,%ebx - movdqa %xmm6,%xmm0 - xorl %edx,%esi - pslldq $12,%xmm2 - paddd %xmm6,%xmm6 - movl %eax,%ebp - addl 40(%esp),%edi - psrld $31,%xmm0 - xorl %ecx,%ebx - roll $5,%eax - movdqa %xmm2,%xmm1 - addl %esi,%edi - andl %ebx,%ebp - xorl %ecx,%ebx - psrld $30,%xmm2 - addl %eax,%edi - rorl $7,%eax - por %xmm0,%xmm6 - xorl %ecx,%ebp - movdqa 64(%esp),%xmm0 - movl %edi,%esi - addl 44(%esp),%edx - pslld $2,%xmm1 - xorl %ebx,%eax - roll $5,%edi - pxor %xmm2,%xmm6 - movdqa 112(%esp),%xmm2 - addl %ebp,%edx - andl %eax,%esi - pxor %xmm1,%xmm6 - pshufd $238,%xmm3,%xmm7 - xorl %ebx,%eax - addl %edi,%edx - rorl $7,%edi - xorl %ebx,%esi - movl %edx,%ebp - punpcklqdq %xmm4,%xmm7 - movdqa %xmm6,%xmm1 - addl 48(%esp),%ecx - xorl %eax,%edi - paddd %xmm6,%xmm2 - movdqa %xmm3,64(%esp) - roll $5,%edx - addl %esi,%ecx - psrldq $4,%xmm1 - andl %edi,%ebp - xorl %eax,%edi - pxor %xmm3,%xmm7 - addl %edx,%ecx - rorl $7,%edx - pxor %xmm5,%xmm1 - xorl %eax,%ebp - movl %ecx,%esi - addl 52(%esp),%ebx - pxor %xmm1,%xmm7 - xorl %edi,%edx - roll $5,%ecx - movdqa %xmm2,32(%esp) - addl %ebp,%ebx - andl %edx,%esi - movdqa %xmm7,%xmm3 - xorl %edi,%edx - addl %ecx,%ebx - rorl $7,%ecx - movdqa %xmm7,%xmm1 - xorl %edi,%esi - pslldq $12,%xmm3 - paddd %xmm7,%xmm7 - movl %ebx,%ebp - addl 56(%esp),%eax - psrld $31,%xmm1 - xorl %edx,%ecx - roll $5,%ebx - movdqa %xmm3,%xmm2 - addl %esi,%eax - andl %ecx,%ebp - xorl %edx,%ecx - psrld $30,%xmm3 - addl %ebx,%eax - rorl $7,%ebx - por %xmm1,%xmm7 - xorl %edx,%ebp - movdqa 80(%esp),%xmm1 - movl %eax,%esi - addl 60(%esp),%edi - pslld $2,%xmm2 - xorl %ecx,%ebx - roll $5,%eax - pxor %xmm3,%xmm7 - movdqa 112(%esp),%xmm3 - addl %ebp,%edi - andl %ebx,%esi - pxor %xmm2,%xmm7 - pshufd $238,%xmm6,%xmm2 - xorl %ecx,%ebx - addl %eax,%edi - rorl $7,%eax - pxor %xmm4,%xmm0 - punpcklqdq %xmm7,%xmm2 - xorl %ecx,%esi - movl %edi,%ebp - addl (%esp),%edx - pxor %xmm1,%xmm0 - movdqa %xmm4,80(%esp) - xorl %ebx,%eax - roll $5,%edi - movdqa %xmm3,%xmm4 - addl %esi,%edx - paddd %xmm7,%xmm3 - andl %eax,%ebp - pxor %xmm2,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - rorl $7,%edi - xorl %ebx,%ebp - movdqa %xmm0,%xmm2 - movdqa %xmm3,48(%esp) - movl %edx,%esi - addl 4(%esp),%ecx - xorl %eax,%edi - roll $5,%edx - pslld $2,%xmm0 - addl %ebp,%ecx - andl %edi,%esi - psrld $30,%xmm2 - xorl %eax,%edi - addl %edx,%ecx - rorl $7,%edx - xorl %eax,%esi - movl %ecx,%ebp - addl 8(%esp),%ebx - xorl %edi,%edx - roll $5,%ecx - por %xmm2,%xmm0 - addl %esi,%ebx - andl %edx,%ebp - movdqa 96(%esp),%xmm2 - xorl %edi,%edx - addl %ecx,%ebx - addl 12(%esp),%eax - xorl %edi,%ebp - movl %ebx,%esi - pshufd $238,%xmm7,%xmm3 - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - addl 16(%esp),%edi - pxor %xmm5,%xmm1 - punpcklqdq %xmm0,%xmm3 - xorl %ecx,%esi - movl %eax,%ebp - roll $5,%eax - pxor %xmm2,%xmm1 - movdqa %xmm5,96(%esp) - addl %esi,%edi - xorl %ecx,%ebp - movdqa %xmm4,%xmm5 - rorl $7,%ebx - paddd %xmm0,%xmm4 - addl %eax,%edi - pxor %xmm3,%xmm1 - addl 20(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - movdqa %xmm1,%xmm3 - movdqa %xmm4,(%esp) - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - pslld $2,%xmm1 - addl 24(%esp),%ecx - xorl %eax,%esi - psrld $30,%xmm3 - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi - addl %edx,%ecx - por %xmm3,%xmm1 - addl 28(%esp),%ebx - xorl %edi,%ebp - movdqa 64(%esp),%xmm3 - movl %ecx,%esi - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - pshufd $238,%xmm0,%xmm4 - addl %ecx,%ebx - addl 32(%esp),%eax - pxor %xmm6,%xmm2 - punpcklqdq %xmm1,%xmm4 - xorl %edx,%esi - movl %ebx,%ebp - roll $5,%ebx - pxor %xmm3,%xmm2 - movdqa %xmm6,64(%esp) - addl %esi,%eax - xorl %edx,%ebp - movdqa 128(%esp),%xmm6 - rorl $7,%ecx - paddd %xmm1,%xmm5 - addl %ebx,%eax - pxor %xmm4,%xmm2 - addl 36(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - roll $5,%eax - movdqa %xmm2,%xmm4 - movdqa %xmm5,16(%esp) - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - pslld $2,%xmm2 - addl 40(%esp),%edx - xorl %ebx,%esi - psrld $30,%xmm4 - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax - addl %edi,%edx - por %xmm4,%xmm2 - addl 44(%esp),%ecx - xorl %eax,%ebp - movdqa 80(%esp),%xmm4 - movl %edx,%esi - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - pshufd $238,%xmm1,%xmm5 - addl %edx,%ecx - addl 48(%esp),%ebx - pxor %xmm7,%xmm3 - punpcklqdq %xmm2,%xmm5 - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - pxor %xmm4,%xmm3 - movdqa %xmm7,80(%esp) - addl %esi,%ebx - xorl %edi,%ebp - movdqa %xmm6,%xmm7 - rorl $7,%edx - paddd %xmm2,%xmm6 - addl %ecx,%ebx - pxor %xmm5,%xmm3 - addl 52(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - movdqa %xmm3,%xmm5 - movdqa %xmm6,32(%esp) - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - pslld $2,%xmm3 - addl 56(%esp),%edi - xorl %ecx,%esi - psrld $30,%xmm5 - movl %eax,%ebp - roll $5,%eax - addl %esi,%edi - xorl %ecx,%ebp - rorl $7,%ebx - addl %eax,%edi - por %xmm5,%xmm3 - addl 60(%esp),%edx - xorl %ebx,%ebp - movdqa 96(%esp),%xmm5 - movl %edi,%esi - roll $5,%edi - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - pshufd $238,%xmm2,%xmm6 - addl %edi,%edx - addl (%esp),%ecx - pxor %xmm0,%xmm4 - punpcklqdq %xmm3,%xmm6 - xorl %eax,%esi - movl %edx,%ebp - roll $5,%edx - pxor %xmm5,%xmm4 - movdqa %xmm0,96(%esp) - addl %esi,%ecx - xorl %eax,%ebp - movdqa %xmm7,%xmm0 - rorl $7,%edi - paddd %xmm3,%xmm7 - addl %edx,%ecx - pxor %xmm6,%xmm4 - addl 4(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - roll $5,%ecx - movdqa %xmm4,%xmm6 - movdqa %xmm7,48(%esp) - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - addl %ecx,%ebx - pslld $2,%xmm4 - addl 8(%esp),%eax - xorl %edx,%esi - psrld $30,%xmm6 - movl %ebx,%ebp - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - addl %ebx,%eax - por %xmm6,%xmm4 - addl 12(%esp),%edi - xorl %ecx,%ebp - movdqa 64(%esp),%xmm6 - movl %eax,%esi - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - pshufd $238,%xmm3,%xmm7 - addl %eax,%edi - addl 16(%esp),%edx - pxor %xmm1,%xmm5 - punpcklqdq %xmm4,%xmm7 - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - pxor %xmm6,%xmm5 - movdqa %xmm1,64(%esp) - addl %esi,%edx - xorl %ebx,%ebp - movdqa %xmm0,%xmm1 - rorl $7,%eax - paddd %xmm4,%xmm0 - addl %edi,%edx - pxor %xmm7,%xmm5 - addl 20(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - roll $5,%edx - movdqa %xmm5,%xmm7 - movdqa %xmm0,(%esp) - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - addl %edx,%ecx - pslld $2,%xmm5 - addl 24(%esp),%ebx - xorl %edi,%esi - psrld $30,%xmm7 - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - por %xmm7,%xmm5 - addl 28(%esp),%eax - movdqa 80(%esp),%xmm7 - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%ebp - roll $5,%ebx - pshufd $238,%xmm4,%xmm0 - addl %ebp,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - addl 32(%esp),%edi - pxor %xmm2,%xmm6 - punpcklqdq %xmm5,%xmm0 - andl %ecx,%esi - xorl %edx,%ecx - rorl $7,%ebx - pxor %xmm7,%xmm6 - movdqa %xmm2,80(%esp) - movl %eax,%ebp - xorl %ecx,%esi - roll $5,%eax - movdqa %xmm1,%xmm2 - addl %esi,%edi - paddd %xmm5,%xmm1 - xorl %ebx,%ebp - pxor %xmm0,%xmm6 - xorl %ecx,%ebx - addl %eax,%edi - addl 36(%esp),%edx - andl %ebx,%ebp - movdqa %xmm6,%xmm0 - movdqa %xmm1,16(%esp) - xorl %ecx,%ebx - rorl $7,%eax - movl %edi,%esi - xorl %ebx,%ebp - roll $5,%edi - pslld $2,%xmm6 - addl %ebp,%edx - xorl %eax,%esi - psrld $30,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - addl 40(%esp),%ecx - andl %eax,%esi - xorl %ebx,%eax - rorl $7,%edi - por %xmm0,%xmm6 - movl %edx,%ebp - xorl %eax,%esi - movdqa 96(%esp),%xmm0 - roll $5,%edx - addl %esi,%ecx - xorl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - pshufd $238,%xmm5,%xmm1 - addl 44(%esp),%ebx - andl %edi,%ebp - xorl %eax,%edi - rorl $7,%edx - movl %ecx,%esi - xorl %edi,%ebp - roll $5,%ecx - addl %ebp,%ebx - xorl %edx,%esi - xorl %edi,%edx - addl %ecx,%ebx - addl 48(%esp),%eax - pxor %xmm3,%xmm7 - punpcklqdq %xmm6,%xmm1 - andl %edx,%esi - xorl %edi,%edx - rorl $7,%ecx - pxor %xmm0,%xmm7 - movdqa %xmm3,96(%esp) - movl %ebx,%ebp - xorl %edx,%esi - roll $5,%ebx - movdqa 144(%esp),%xmm3 - addl %esi,%eax - paddd %xmm6,%xmm2 - xorl %ecx,%ebp - pxor %xmm1,%xmm7 - xorl %edx,%ecx - addl %ebx,%eax - addl 52(%esp),%edi - andl %ecx,%ebp - movdqa %xmm7,%xmm1 - movdqa %xmm2,32(%esp) - xorl %edx,%ecx - rorl $7,%ebx - movl %eax,%esi - xorl %ecx,%ebp - roll $5,%eax - pslld $2,%xmm7 - addl %ebp,%edi - xorl %ebx,%esi - psrld $30,%xmm1 - xorl %ecx,%ebx - addl %eax,%edi - addl 56(%esp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - rorl $7,%eax - por %xmm1,%xmm7 - movl %edi,%ebp - xorl %ebx,%esi - movdqa 64(%esp),%xmm1 - roll $5,%edi - addl %esi,%edx - xorl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - pshufd $238,%xmm6,%xmm2 - addl 60(%esp),%ecx - andl %eax,%ebp - xorl %ebx,%eax - rorl $7,%edi - movl %edx,%esi - xorl %eax,%ebp - roll $5,%edx - addl %ebp,%ecx - xorl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - addl (%esp),%ebx - pxor %xmm4,%xmm0 - punpcklqdq %xmm7,%xmm2 - andl %edi,%esi - xorl %eax,%edi - rorl $7,%edx - pxor %xmm1,%xmm0 - movdqa %xmm4,64(%esp) - movl %ecx,%ebp - xorl %edi,%esi - roll $5,%ecx - movdqa %xmm3,%xmm4 - addl %esi,%ebx - paddd %xmm7,%xmm3 - xorl %edx,%ebp - pxor %xmm2,%xmm0 - xorl %edi,%edx - addl %ecx,%ebx - addl 4(%esp),%eax - andl %edx,%ebp - movdqa %xmm0,%xmm2 - movdqa %xmm3,48(%esp) - xorl %edi,%edx - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%ebp - roll $5,%ebx - pslld $2,%xmm0 - addl %ebp,%eax - xorl %ecx,%esi - psrld $30,%xmm2 - xorl %edx,%ecx - addl %ebx,%eax - addl 8(%esp),%edi - andl %ecx,%esi - xorl %edx,%ecx - rorl $7,%ebx - por %xmm2,%xmm0 - movl %eax,%ebp - xorl %ecx,%esi - movdqa 80(%esp),%xmm2 - roll $5,%eax - addl %esi,%edi - xorl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - pshufd $238,%xmm7,%xmm3 - addl 12(%esp),%edx - andl %ebx,%ebp - xorl %ecx,%ebx - rorl $7,%eax - movl %edi,%esi - xorl %ebx,%ebp - roll $5,%edi - addl %ebp,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %edi,%edx - addl 16(%esp),%ecx - pxor %xmm5,%xmm1 - punpcklqdq %xmm0,%xmm3 - andl %eax,%esi - xorl %ebx,%eax - rorl $7,%edi - pxor %xmm2,%xmm1 - movdqa %xmm5,80(%esp) - movl %edx,%ebp - xorl %eax,%esi - roll $5,%edx - movdqa %xmm4,%xmm5 - addl %esi,%ecx - paddd %xmm0,%xmm4 - xorl %edi,%ebp - pxor %xmm3,%xmm1 - xorl %eax,%edi - addl %edx,%ecx - addl 20(%esp),%ebx - andl %edi,%ebp - movdqa %xmm1,%xmm3 - movdqa %xmm4,(%esp) - xorl %eax,%edi - rorl $7,%edx - movl %ecx,%esi - xorl %edi,%ebp - roll $5,%ecx - pslld $2,%xmm1 - addl %ebp,%ebx - xorl %edx,%esi - psrld $30,%xmm3 - xorl %edi,%edx - addl %ecx,%ebx - addl 24(%esp),%eax - andl %edx,%esi - xorl %edi,%edx - rorl $7,%ecx - por %xmm3,%xmm1 - movl %ebx,%ebp - xorl %edx,%esi - movdqa 96(%esp),%xmm3 - roll $5,%ebx - addl %esi,%eax - xorl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - pshufd $238,%xmm0,%xmm4 - addl 28(%esp),%edi - andl %ecx,%ebp - xorl %edx,%ecx - rorl $7,%ebx - movl %eax,%esi - xorl %ecx,%ebp - roll $5,%eax - addl %ebp,%edi - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%edi - addl 32(%esp),%edx - pxor %xmm6,%xmm2 - punpcklqdq %xmm1,%xmm4 - andl %ebx,%esi - xorl %ecx,%ebx - rorl $7,%eax - pxor %xmm3,%xmm2 - movdqa %xmm6,96(%esp) - movl %edi,%ebp - xorl %ebx,%esi - roll $5,%edi - movdqa %xmm5,%xmm6 - addl %esi,%edx - paddd %xmm1,%xmm5 - xorl %eax,%ebp - pxor %xmm4,%xmm2 - xorl %ebx,%eax - addl %edi,%edx - addl 36(%esp),%ecx - andl %eax,%ebp - movdqa %xmm2,%xmm4 - movdqa %xmm5,16(%esp) - xorl %ebx,%eax - rorl $7,%edi - movl %edx,%esi - xorl %eax,%ebp - roll $5,%edx - pslld $2,%xmm2 - addl %ebp,%ecx - xorl %edi,%esi - psrld $30,%xmm4 - xorl %eax,%edi - addl %edx,%ecx - addl 40(%esp),%ebx - andl %edi,%esi - xorl %eax,%edi - rorl $7,%edx - por %xmm4,%xmm2 - movl %ecx,%ebp - xorl %edi,%esi - movdqa 64(%esp),%xmm4 - roll $5,%ecx - addl %esi,%ebx - xorl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - pshufd $238,%xmm1,%xmm5 - addl 44(%esp),%eax - andl %edx,%ebp - xorl %edi,%edx - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%ebp - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - addl %ebx,%eax - addl 48(%esp),%edi - pxor %xmm7,%xmm3 - punpcklqdq %xmm2,%xmm5 - xorl %ecx,%esi - movl %eax,%ebp - roll $5,%eax - pxor %xmm4,%xmm3 - movdqa %xmm7,64(%esp) - addl %esi,%edi - xorl %ecx,%ebp - movdqa %xmm6,%xmm7 - rorl $7,%ebx - paddd %xmm2,%xmm6 - addl %eax,%edi - pxor %xmm5,%xmm3 - addl 52(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - movdqa %xmm3,%xmm5 - movdqa %xmm6,32(%esp) - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - pslld $2,%xmm3 - addl 56(%esp),%ecx - xorl %eax,%esi - psrld $30,%xmm5 - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi - addl %edx,%ecx - por %xmm5,%xmm3 - addl 60(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - addl %ecx,%ebx - addl (%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - paddd %xmm3,%xmm7 - addl %ebx,%eax - addl 4(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - movdqa %xmm7,48(%esp) - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - addl 8(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax - addl %edi,%edx - addl 12(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - addl %edx,%ecx - movl 196(%esp),%ebp - cmpl 200(%esp),%ebp - je .L005done - movdqa 160(%esp),%xmm7 - movdqa 176(%esp),%xmm6 - movdqu (%ebp),%xmm0 - movdqu 16(%ebp),%xmm1 - movdqu 32(%ebp),%xmm2 - movdqu 48(%ebp),%xmm3 - addl $64,%ebp -.byte 102,15,56,0,198 - movl %ebp,196(%esp) - movdqa %xmm7,96(%esp) - addl 16(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx -.byte 102,15,56,0,206 - addl %ecx,%ebx - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - paddd %xmm7,%xmm0 - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - movdqa %xmm0,(%esp) - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - psubd %xmm7,%xmm0 - roll $5,%eax - addl %esi,%edi - xorl %ecx,%ebp - rorl $7,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi -.byte 102,15,56,0,214 - addl %edx,%ecx - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - paddd %xmm7,%xmm1 - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - movdqa %xmm1,16(%esp) - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - psubd %xmm7,%xmm1 - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax -.byte 102,15,56,0,222 - addl %edi,%edx - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - paddd %xmm7,%xmm2 - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - movdqa %xmm2,32(%esp) - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - psubd %xmm7,%xmm2 - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - addl %ebp,%eax - rorl $7,%ecx - addl %ebx,%eax - movl 192(%esp),%ebp - addl (%ebp),%eax - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,8(%ebp) - movl %ecx,%ebx - movl %edx,12(%ebp) - xorl %edx,%ebx - movl %edi,16(%ebp) - movl %esi,%ebp - pshufd $238,%xmm0,%xmm4 - andl %ebx,%esi - movl %ebp,%ebx - jmp .L004loop -.align 16 -.L005done: - addl 16(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - roll $5,%eax - addl %esi,%edi - xorl %ecx,%ebp - rorl $7,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi - addl %edx,%ecx - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax - addl %edi,%edx - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - addl %ebp,%eax - rorl $7,%ecx - addl %ebx,%eax - movl 192(%esp),%ebp - addl (%ebp),%eax - movl 204(%esp),%esp - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,8(%ebp) - movl %edx,12(%ebp) - movl %edi,16(%ebp) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size _sha1_block_data_order_ssse3,.-_sha1_block_data_order_ssse3 -.hidden _sha1_block_data_order_avx -.type _sha1_block_data_order_avx,@function -.align 16 -_sha1_block_data_order_avx: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - call .L006pic_point -.L006pic_point: - popl %ebp - leal .LK_XX_XX-.L006pic_point(%ebp),%ebp -.Lavx_shortcut: - vzeroall - vmovdqa (%ebp),%xmm7 - vmovdqa 16(%ebp),%xmm0 - vmovdqa 32(%ebp),%xmm1 - vmovdqa 48(%ebp),%xmm2 - vmovdqa 64(%ebp),%xmm6 - movl 20(%esp),%edi - movl 24(%esp),%ebp - movl 28(%esp),%edx - movl %esp,%esi - subl $208,%esp - andl $-64,%esp - vmovdqa %xmm0,112(%esp) - vmovdqa %xmm1,128(%esp) - vmovdqa %xmm2,144(%esp) - shll $6,%edx - vmovdqa %xmm7,160(%esp) - addl %ebp,%edx - vmovdqa %xmm6,176(%esp) - addl $64,%ebp - movl %edi,192(%esp) - movl %ebp,196(%esp) - movl %edx,200(%esp) - movl %esi,204(%esp) - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl 16(%edi),%edi - movl %ebx,%esi - vmovdqu -64(%ebp),%xmm0 - vmovdqu -48(%ebp),%xmm1 - vmovdqu -32(%ebp),%xmm2 - vmovdqu -16(%ebp),%xmm3 - vpshufb %xmm6,%xmm0,%xmm0 - vpshufb %xmm6,%xmm1,%xmm1 - vpshufb %xmm6,%xmm2,%xmm2 - vmovdqa %xmm7,96(%esp) - vpshufb %xmm6,%xmm3,%xmm3 - vpaddd %xmm7,%xmm0,%xmm4 - vpaddd %xmm7,%xmm1,%xmm5 - vpaddd %xmm7,%xmm2,%xmm6 - vmovdqa %xmm4,(%esp) - movl %ecx,%ebp - vmovdqa %xmm5,16(%esp) - xorl %edx,%ebp - vmovdqa %xmm6,32(%esp) - andl %ebp,%esi - jmp .L007loop -.align 16 -.L007loop: - shrdl $2,%ebx,%ebx - xorl %edx,%esi - vpalignr $8,%xmm0,%xmm1,%xmm4 - movl %eax,%ebp - addl (%esp),%edi - vpaddd %xmm3,%xmm7,%xmm7 - vmovdqa %xmm0,64(%esp) - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrldq $4,%xmm3,%xmm6 - addl %esi,%edi - andl %ebx,%ebp - vpxor %xmm0,%xmm4,%xmm4 - xorl %ecx,%ebx - addl %eax,%edi - vpxor %xmm2,%xmm6,%xmm6 - shrdl $7,%eax,%eax - xorl %ecx,%ebp - vmovdqa %xmm7,48(%esp) - movl %edi,%esi - addl 4(%esp),%edx - vpxor %xmm6,%xmm4,%xmm4 - xorl %ebx,%eax - shldl $5,%edi,%edi - addl %ebp,%edx - andl %eax,%esi - vpsrld $31,%xmm4,%xmm6 - xorl %ebx,%eax - addl %edi,%edx - shrdl $7,%edi,%edi - xorl %ebx,%esi - vpslldq $12,%xmm4,%xmm0 - vpaddd %xmm4,%xmm4,%xmm4 - movl %edx,%ebp - addl 8(%esp),%ecx - xorl %eax,%edi - shldl $5,%edx,%edx - vpsrld $30,%xmm0,%xmm7 - vpor %xmm6,%xmm4,%xmm4 - addl %esi,%ecx - andl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - vpslld $2,%xmm0,%xmm0 - shrdl $7,%edx,%edx - xorl %eax,%ebp - vpxor %xmm7,%xmm4,%xmm4 - movl %ecx,%esi - addl 12(%esp),%ebx - xorl %edi,%edx - shldl $5,%ecx,%ecx - vpxor %xmm0,%xmm4,%xmm4 - addl %ebp,%ebx - andl %edx,%esi - vmovdqa 96(%esp),%xmm0 - xorl %edi,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %edi,%esi - vpalignr $8,%xmm1,%xmm2,%xmm5 - movl %ebx,%ebp - addl 16(%esp),%eax - vpaddd %xmm4,%xmm0,%xmm0 - vmovdqa %xmm1,80(%esp) - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrldq $4,%xmm4,%xmm7 - addl %esi,%eax - andl %ecx,%ebp - vpxor %xmm1,%xmm5,%xmm5 - xorl %edx,%ecx - addl %ebx,%eax - vpxor %xmm3,%xmm7,%xmm7 - shrdl $7,%ebx,%ebx - xorl %edx,%ebp - vmovdqa %xmm0,(%esp) - movl %eax,%esi - addl 20(%esp),%edi - vpxor %xmm7,%xmm5,%xmm5 - xorl %ecx,%ebx - shldl $5,%eax,%eax - addl %ebp,%edi - andl %ebx,%esi - vpsrld $31,%xmm5,%xmm7 - xorl %ecx,%ebx - addl %eax,%edi - shrdl $7,%eax,%eax - xorl %ecx,%esi - vpslldq $12,%xmm5,%xmm1 - vpaddd %xmm5,%xmm5,%xmm5 - movl %edi,%ebp - addl 24(%esp),%edx - xorl %ebx,%eax - shldl $5,%edi,%edi - vpsrld $30,%xmm1,%xmm0 - vpor %xmm7,%xmm5,%xmm5 - addl %esi,%edx - andl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - vpslld $2,%xmm1,%xmm1 - shrdl $7,%edi,%edi - xorl %ebx,%ebp - vpxor %xmm0,%xmm5,%xmm5 - movl %edx,%esi - addl 28(%esp),%ecx - xorl %eax,%edi - shldl $5,%edx,%edx - vpxor %xmm1,%xmm5,%xmm5 - addl %ebp,%ecx - andl %edi,%esi - vmovdqa 112(%esp),%xmm1 - xorl %eax,%edi - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - vpalignr $8,%xmm2,%xmm3,%xmm6 - movl %ecx,%ebp - addl 32(%esp),%ebx - vpaddd %xmm5,%xmm1,%xmm1 - vmovdqa %xmm2,96(%esp) - xorl %edi,%edx - shldl $5,%ecx,%ecx - vpsrldq $4,%xmm5,%xmm0 - addl %esi,%ebx - andl %edx,%ebp - vpxor %xmm2,%xmm6,%xmm6 - xorl %edi,%edx - addl %ecx,%ebx - vpxor %xmm4,%xmm0,%xmm0 - shrdl $7,%ecx,%ecx - xorl %edi,%ebp - vmovdqa %xmm1,16(%esp) - movl %ebx,%esi - addl 36(%esp),%eax - vpxor %xmm0,%xmm6,%xmm6 - xorl %edx,%ecx - shldl $5,%ebx,%ebx - addl %ebp,%eax - andl %ecx,%esi - vpsrld $31,%xmm6,%xmm0 - xorl %edx,%ecx - addl %ebx,%eax - shrdl $7,%ebx,%ebx - xorl %edx,%esi - vpslldq $12,%xmm6,%xmm2 - vpaddd %xmm6,%xmm6,%xmm6 - movl %eax,%ebp - addl 40(%esp),%edi - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrld $30,%xmm2,%xmm1 - vpor %xmm0,%xmm6,%xmm6 - addl %esi,%edi - andl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - vpslld $2,%xmm2,%xmm2 - vmovdqa 64(%esp),%xmm0 - shrdl $7,%eax,%eax - xorl %ecx,%ebp - vpxor %xmm1,%xmm6,%xmm6 - movl %edi,%esi - addl 44(%esp),%edx - xorl %ebx,%eax - shldl $5,%edi,%edi - vpxor %xmm2,%xmm6,%xmm6 - addl %ebp,%edx - andl %eax,%esi - vmovdqa 112(%esp),%xmm2 - xorl %ebx,%eax - addl %edi,%edx - shrdl $7,%edi,%edi - xorl %ebx,%esi - vpalignr $8,%xmm3,%xmm4,%xmm7 - movl %edx,%ebp - addl 48(%esp),%ecx - vpaddd %xmm6,%xmm2,%xmm2 - vmovdqa %xmm3,64(%esp) - xorl %eax,%edi - shldl $5,%edx,%edx - vpsrldq $4,%xmm6,%xmm1 - addl %esi,%ecx - andl %edi,%ebp - vpxor %xmm3,%xmm7,%xmm7 - xorl %eax,%edi - addl %edx,%ecx - vpxor %xmm5,%xmm1,%xmm1 - shrdl $7,%edx,%edx - xorl %eax,%ebp - vmovdqa %xmm2,32(%esp) - movl %ecx,%esi - addl 52(%esp),%ebx - vpxor %xmm1,%xmm7,%xmm7 - xorl %edi,%edx - shldl $5,%ecx,%ecx - addl %ebp,%ebx - andl %edx,%esi - vpsrld $31,%xmm7,%xmm1 - xorl %edi,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %edi,%esi - vpslldq $12,%xmm7,%xmm3 - vpaddd %xmm7,%xmm7,%xmm7 - movl %ebx,%ebp - addl 56(%esp),%eax - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrld $30,%xmm3,%xmm2 - vpor %xmm1,%xmm7,%xmm7 - addl %esi,%eax - andl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - vpslld $2,%xmm3,%xmm3 - vmovdqa 80(%esp),%xmm1 - shrdl $7,%ebx,%ebx - xorl %edx,%ebp - vpxor %xmm2,%xmm7,%xmm7 - movl %eax,%esi - addl 60(%esp),%edi - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpxor %xmm3,%xmm7,%xmm7 - addl %ebp,%edi - andl %ebx,%esi - vmovdqa 112(%esp),%xmm3 - xorl %ecx,%ebx - addl %eax,%edi - vpalignr $8,%xmm6,%xmm7,%xmm2 - vpxor %xmm4,%xmm0,%xmm0 - shrdl $7,%eax,%eax - xorl %ecx,%esi - movl %edi,%ebp - addl (%esp),%edx - vpxor %xmm1,%xmm0,%xmm0 - vmovdqa %xmm4,80(%esp) - xorl %ebx,%eax - shldl $5,%edi,%edi - vmovdqa %xmm3,%xmm4 - vpaddd %xmm7,%xmm3,%xmm3 - addl %esi,%edx - andl %eax,%ebp - vpxor %xmm2,%xmm0,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - shrdl $7,%edi,%edi - xorl %ebx,%ebp - vpsrld $30,%xmm0,%xmm2 - vmovdqa %xmm3,48(%esp) - movl %edx,%esi - addl 4(%esp),%ecx - xorl %eax,%edi - shldl $5,%edx,%edx - vpslld $2,%xmm0,%xmm0 - addl %ebp,%ecx - andl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - movl %ecx,%ebp - addl 8(%esp),%ebx - vpor %xmm2,%xmm0,%xmm0 - xorl %edi,%edx - shldl $5,%ecx,%ecx - vmovdqa 96(%esp),%xmm2 - addl %esi,%ebx - andl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - addl 12(%esp),%eax - xorl %edi,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm7,%xmm0,%xmm3 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm5,96(%esp) - addl %esi,%edi - xorl %ecx,%ebp - vmovdqa %xmm4,%xmm5 - vpaddd %xmm0,%xmm4,%xmm4 - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpxor %xmm3,%xmm1,%xmm1 - addl 20(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - vpsrld $30,%xmm1,%xmm3 - vmovdqa %xmm4,(%esp) - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - vpslld $2,%xmm1,%xmm1 - addl 24(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - vpor %xmm3,%xmm1,%xmm1 - addl 28(%esp),%ebx - xorl %edi,%ebp - vmovdqa 64(%esp),%xmm3 - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpalignr $8,%xmm0,%xmm1,%xmm4 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - vpxor %xmm3,%xmm2,%xmm2 - vmovdqa %xmm6,64(%esp) - addl %esi,%eax - xorl %edx,%ebp - vmovdqa 128(%esp),%xmm6 - vpaddd %xmm1,%xmm5,%xmm5 - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpxor %xmm4,%xmm2,%xmm2 - addl 36(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - vpsrld $30,%xmm2,%xmm4 - vmovdqa %xmm5,16(%esp) - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpslld $2,%xmm2,%xmm2 - addl 40(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - vpor %xmm4,%xmm2,%xmm2 - addl 44(%esp),%ecx - xorl %eax,%ebp - vmovdqa 80(%esp),%xmm4 - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - vpalignr $8,%xmm1,%xmm2,%xmm5 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - vpxor %xmm4,%xmm3,%xmm3 - vmovdqa %xmm7,80(%esp) - addl %esi,%ebx - xorl %edi,%ebp - vmovdqa %xmm6,%xmm7 - vpaddd %xmm2,%xmm6,%xmm6 - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpxor %xmm5,%xmm3,%xmm3 - addl 52(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - vpsrld $30,%xmm3,%xmm5 - vmovdqa %xmm6,32(%esp) - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpslld $2,%xmm3,%xmm3 - addl 56(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ecx,%ebp - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpor %xmm5,%xmm3,%xmm3 - addl 60(%esp),%edx - xorl %ebx,%ebp - vmovdqa 96(%esp),%xmm5 - movl %edi,%esi - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - vpalignr $8,%xmm2,%xmm3,%xmm6 - vpxor %xmm0,%xmm4,%xmm4 - addl (%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - vpxor %xmm5,%xmm4,%xmm4 - vmovdqa %xmm0,96(%esp) - addl %esi,%ecx - xorl %eax,%ebp - vmovdqa %xmm7,%xmm0 - vpaddd %xmm3,%xmm7,%xmm7 - shrdl $7,%edi,%edi - addl %edx,%ecx - vpxor %xmm6,%xmm4,%xmm4 - addl 4(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - vpsrld $30,%xmm4,%xmm6 - vmovdqa %xmm7,48(%esp) - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpslld $2,%xmm4,%xmm4 - addl 8(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpor %xmm6,%xmm4,%xmm4 - addl 12(%esp),%edi - xorl %ecx,%ebp - vmovdqa 64(%esp),%xmm6 - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpalignr $8,%xmm3,%xmm4,%xmm7 - vpxor %xmm1,%xmm5,%xmm5 - addl 16(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - vpxor %xmm6,%xmm5,%xmm5 - vmovdqa %xmm1,64(%esp) - addl %esi,%edx - xorl %ebx,%ebp - vmovdqa %xmm0,%xmm1 - vpaddd %xmm4,%xmm0,%xmm0 - shrdl $7,%eax,%eax - addl %edi,%edx - vpxor %xmm7,%xmm5,%xmm5 - addl 20(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - vpsrld $30,%xmm5,%xmm7 - vmovdqa %xmm0,(%esp) - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - vpslld $2,%xmm5,%xmm5 - addl 24(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpor %xmm7,%xmm5,%xmm5 - addl 28(%esp),%eax - vmovdqa 80(%esp),%xmm7 - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%ebp - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm4,%xmm5,%xmm0 - vpxor %xmm2,%xmm6,%xmm6 - addl 32(%esp),%edi - andl %ecx,%esi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - vpxor %xmm7,%xmm6,%xmm6 - vmovdqa %xmm2,80(%esp) - movl %eax,%ebp - xorl %ecx,%esi - vmovdqa %xmm1,%xmm2 - vpaddd %xmm5,%xmm1,%xmm1 - shldl $5,%eax,%eax - addl %esi,%edi - vpxor %xmm0,%xmm6,%xmm6 - xorl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - addl 36(%esp),%edx - vpsrld $30,%xmm6,%xmm0 - vmovdqa %xmm1,16(%esp) - andl %ebx,%ebp - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %edi,%esi - vpslld $2,%xmm6,%xmm6 - xorl %ebx,%ebp - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %edi,%edx - addl 40(%esp),%ecx - andl %eax,%esi - vpor %xmm0,%xmm6,%xmm6 - xorl %ebx,%eax - shrdl $7,%edi,%edi - vmovdqa 96(%esp),%xmm0 - movl %edx,%ebp - xorl %eax,%esi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - addl 44(%esp),%ebx - andl %edi,%ebp - xorl %eax,%edi - shrdl $7,%edx,%edx - movl %ecx,%esi - xorl %edi,%ebp - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edx,%esi - xorl %edi,%edx - addl %ecx,%ebx - vpalignr $8,%xmm5,%xmm6,%xmm1 - vpxor %xmm3,%xmm7,%xmm7 - addl 48(%esp),%eax - andl %edx,%esi - xorl %edi,%edx - shrdl $7,%ecx,%ecx - vpxor %xmm0,%xmm7,%xmm7 - vmovdqa %xmm3,96(%esp) - movl %ebx,%ebp - xorl %edx,%esi - vmovdqa 144(%esp),%xmm3 - vpaddd %xmm6,%xmm2,%xmm2 - shldl $5,%ebx,%ebx - addl %esi,%eax - vpxor %xmm1,%xmm7,%xmm7 - xorl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - addl 52(%esp),%edi - vpsrld $30,%xmm7,%xmm1 - vmovdqa %xmm2,32(%esp) - andl %ecx,%ebp - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - vpslld $2,%xmm7,%xmm7 - xorl %ecx,%ebp - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%edi - addl 56(%esp),%edx - andl %ebx,%esi - vpor %xmm1,%xmm7,%xmm7 - xorl %ecx,%ebx - shrdl $7,%eax,%eax - vmovdqa 64(%esp),%xmm1 - movl %edi,%ebp - xorl %ebx,%esi - shldl $5,%edi,%edi - addl %esi,%edx - xorl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - addl 60(%esp),%ecx - andl %eax,%ebp - xorl %ebx,%eax - shrdl $7,%edi,%edi - movl %edx,%esi - xorl %eax,%ebp - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - vpalignr $8,%xmm6,%xmm7,%xmm2 - vpxor %xmm4,%xmm0,%xmm0 - addl (%esp),%ebx - andl %edi,%esi - xorl %eax,%edi - shrdl $7,%edx,%edx - vpxor %xmm1,%xmm0,%xmm0 - vmovdqa %xmm4,64(%esp) - movl %ecx,%ebp - xorl %edi,%esi - vmovdqa %xmm3,%xmm4 - vpaddd %xmm7,%xmm3,%xmm3 - shldl $5,%ecx,%ecx - addl %esi,%ebx - vpxor %xmm2,%xmm0,%xmm0 - xorl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - addl 4(%esp),%eax - vpsrld $30,%xmm0,%xmm2 - vmovdqa %xmm3,48(%esp) - andl %edx,%ebp - xorl %edi,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - vpslld $2,%xmm0,%xmm0 - xorl %edx,%ebp - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - addl 8(%esp),%edi - andl %ecx,%esi - vpor %xmm2,%xmm0,%xmm0 - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - vmovdqa 80(%esp),%xmm2 - movl %eax,%ebp - xorl %ecx,%esi - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - addl 12(%esp),%edx - andl %ebx,%ebp - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %edi,%esi - xorl %ebx,%ebp - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %edi,%edx - vpalignr $8,%xmm7,%xmm0,%xmm3 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%esp),%ecx - andl %eax,%esi - xorl %ebx,%eax - shrdl $7,%edi,%edi - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm5,80(%esp) - movl %edx,%ebp - xorl %eax,%esi - vmovdqa %xmm4,%xmm5 - vpaddd %xmm0,%xmm4,%xmm4 - shldl $5,%edx,%edx - addl %esi,%ecx - vpxor %xmm3,%xmm1,%xmm1 - xorl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - addl 20(%esp),%ebx - vpsrld $30,%xmm1,%xmm3 - vmovdqa %xmm4,(%esp) - andl %edi,%ebp - xorl %eax,%edi - shrdl $7,%edx,%edx - movl %ecx,%esi - vpslld $2,%xmm1,%xmm1 - xorl %edi,%ebp - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edx,%esi - xorl %edi,%edx - addl %ecx,%ebx - addl 24(%esp),%eax - andl %edx,%esi - vpor %xmm3,%xmm1,%xmm1 - xorl %edi,%edx - shrdl $7,%ecx,%ecx - vmovdqa 96(%esp),%xmm3 - movl %ebx,%ebp - xorl %edx,%esi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - addl 28(%esp),%edi - andl %ecx,%ebp - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - xorl %ecx,%ebp - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%edi - vpalignr $8,%xmm0,%xmm1,%xmm4 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%esp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - vpxor %xmm3,%xmm2,%xmm2 - vmovdqa %xmm6,96(%esp) - movl %edi,%ebp - xorl %ebx,%esi - vmovdqa %xmm5,%xmm6 - vpaddd %xmm1,%xmm5,%xmm5 - shldl $5,%edi,%edi - addl %esi,%edx - vpxor %xmm4,%xmm2,%xmm2 - xorl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - addl 36(%esp),%ecx - vpsrld $30,%xmm2,%xmm4 - vmovdqa %xmm5,16(%esp) - andl %eax,%ebp - xorl %ebx,%eax - shrdl $7,%edi,%edi - movl %edx,%esi - vpslld $2,%xmm2,%xmm2 - xorl %eax,%ebp - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - addl 40(%esp),%ebx - andl %edi,%esi - vpor %xmm4,%xmm2,%xmm2 - xorl %eax,%edi - shrdl $7,%edx,%edx - vmovdqa 64(%esp),%xmm4 - movl %ecx,%ebp - xorl %edi,%esi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - addl 44(%esp),%eax - andl %edx,%ebp - xorl %edi,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%ebp - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - addl %ebx,%eax - vpalignr $8,%xmm1,%xmm2,%xmm5 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - vpxor %xmm4,%xmm3,%xmm3 - vmovdqa %xmm7,64(%esp) - addl %esi,%edi - xorl %ecx,%ebp - vmovdqa %xmm6,%xmm7 - vpaddd %xmm2,%xmm6,%xmm6 - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpxor %xmm5,%xmm3,%xmm3 - addl 52(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - vpsrld $30,%xmm3,%xmm5 - vmovdqa %xmm6,32(%esp) - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - vpslld $2,%xmm3,%xmm3 - addl 56(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - vpor %xmm5,%xmm3,%xmm3 - addl 60(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl (%esp),%eax - vpaddd %xmm3,%xmm7,%xmm7 - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - vmovdqa %xmm7,48(%esp) - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 4(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 8(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - addl 12(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - movl 196(%esp),%ebp - cmpl 200(%esp),%ebp - je .L008done - vmovdqa 160(%esp),%xmm7 - vmovdqa 176(%esp),%xmm6 - vmovdqu (%ebp),%xmm0 - vmovdqu 16(%ebp),%xmm1 - vmovdqu 32(%ebp),%xmm2 - vmovdqu 48(%ebp),%xmm3 - addl $64,%ebp - vpshufb %xmm6,%xmm0,%xmm0 - movl %ebp,196(%esp) - vmovdqa %xmm7,96(%esp) - addl 16(%esp),%ebx - xorl %edi,%esi - vpshufb %xmm6,%xmm1,%xmm1 - movl %ecx,%ebp - shldl $5,%ecx,%ecx - vpaddd %xmm7,%xmm0,%xmm4 - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - vmovdqa %xmm4,(%esp) - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ecx,%ebp - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - vpshufb %xmm6,%xmm2,%xmm2 - movl %edx,%ebp - shldl $5,%edx,%edx - vpaddd %xmm7,%xmm1,%xmm5 - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - vmovdqa %xmm5,16(%esp) - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - vpshufb %xmm6,%xmm3,%xmm3 - movl %edi,%ebp - shldl $5,%edi,%edi - vpaddd %xmm7,%xmm2,%xmm6 - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - vmovdqa %xmm6,32(%esp) - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - movl 192(%esp),%ebp - addl (%ebp),%eax - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,%ebx - movl %ecx,8(%ebp) - xorl %edx,%ebx - movl %edx,12(%ebp) - movl %edi,16(%ebp) - movl %esi,%ebp - andl %ebx,%esi - movl %ebp,%ebx - jmp .L007loop -.align 16 -.L008done: - addl 16(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ecx,%ebp - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vzeroall - movl 192(%esp),%ebp - addl (%ebp),%eax - movl 204(%esp),%esp - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,8(%ebp) - movl %edx,12(%ebp) - movl %edi,16(%ebp) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size _sha1_block_data_order_avx,.-_sha1_block_data_order_avx -.align 64 -.LK_XX_XX: -.long 1518500249,1518500249,1518500249,1518500249 -.long 1859775393,1859775393,1859775393,1859775393 -.long 2400959708,2400959708,2400959708,2400959708 -.long 3395469782,3395469782,3395469782,3395469782 -.long 66051,67438087,134810123,202182159 -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 -.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 -.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 -.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha256-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha256-586.S deleted file mode 100644 index f61fa3df72..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha256-586.S +++ /dev/null @@ -1,5567 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl sha256_block_data_order -.hidden sha256_block_data_order -.type sha256_block_data_order,@function -.align 16 -sha256_block_data_order: -.L_sha256_block_data_order_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl %esp,%ebx - call .L000pic_point -.L000pic_point: - popl %ebp - leal .L001K256-.L000pic_point(%ebp),%ebp - subl $16,%esp - andl $-64,%esp - shll $6,%eax - addl %edi,%eax - movl %esi,(%esp) - movl %edi,4(%esp) - movl %eax,8(%esp) - movl %ebx,12(%esp) - leal OPENSSL_ia32cap_P-.L001K256(%ebp),%edx - movl (%edx),%ecx - movl 4(%edx),%ebx - testl $1048576,%ecx - jnz .L002loop - movl 8(%edx),%edx - testl $16777216,%ecx - jz .L003no_xmm - andl $1073741824,%ecx - andl $268435968,%ebx - orl %ebx,%ecx - andl $1342177280,%ecx - cmpl $1342177280,%ecx - je .L004AVX - testl $512,%ebx - jnz .L005SSSE3 -.L003no_xmm: - subl %edi,%eax - cmpl $256,%eax - jae .L006unrolled - jmp .L002loop -.align 16 -.L002loop: - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - bswap %eax - movl 12(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - movl 16(%edi),%eax - movl 20(%edi),%ebx - movl 24(%edi),%ecx - bswap %eax - movl 28(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - movl 32(%edi),%eax - movl 36(%edi),%ebx - movl 40(%edi),%ecx - bswap %eax - movl 44(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - movl 48(%edi),%eax - movl 52(%edi),%ebx - movl 56(%edi),%ecx - bswap %eax - movl 60(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - addl $64,%edi - leal -36(%esp),%esp - movl %edi,104(%esp) - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edi - movl %ebx,8(%esp) - xorl %ecx,%ebx - movl %ecx,12(%esp) - movl %edi,16(%esp) - movl %ebx,(%esp) - movl 16(%esi),%edx - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edi - movl %ebx,24(%esp) - movl %ecx,28(%esp) - movl %edi,32(%esp) -.align 16 -.L00700_15: - movl %edx,%ecx - movl 24(%esp),%esi - rorl $14,%ecx - movl 28(%esp),%edi - xorl %edx,%ecx - xorl %edi,%esi - movl 96(%esp),%ebx - rorl $5,%ecx - andl %edx,%esi - movl %edx,20(%esp) - xorl %ecx,%edx - addl 32(%esp),%ebx - xorl %edi,%esi - rorl $6,%edx - movl %eax,%ecx - addl %esi,%ebx - rorl $9,%ecx - addl %edx,%ebx - movl 8(%esp),%edi - xorl %eax,%ecx - movl %eax,4(%esp) - leal -4(%esp),%esp - rorl $11,%ecx - movl (%ebp),%esi - xorl %eax,%ecx - movl 20(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %esi,%ebx - movl %eax,(%esp) - addl %ebx,%edx - andl 4(%esp),%eax - addl %ecx,%ebx - xorl %edi,%eax - addl $4,%ebp - addl %ebx,%eax - cmpl $3248222580,%esi - jne .L00700_15 - movl 156(%esp),%ecx - jmp .L00816_63 -.align 16 -.L00816_63: - movl %ecx,%ebx - movl 104(%esp),%esi - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 160(%esp),%ebx - shrl $10,%edi - addl 124(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 24(%esp),%esi - rorl $14,%ecx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %edx,%ecx - xorl %edi,%esi - movl %ebx,96(%esp) - rorl $5,%ecx - andl %edx,%esi - movl %edx,20(%esp) - xorl %ecx,%edx - addl 32(%esp),%ebx - xorl %edi,%esi - rorl $6,%edx - movl %eax,%ecx - addl %esi,%ebx - rorl $9,%ecx - addl %edx,%ebx - movl 8(%esp),%edi - xorl %eax,%ecx - movl %eax,4(%esp) - leal -4(%esp),%esp - rorl $11,%ecx - movl (%ebp),%esi - xorl %eax,%ecx - movl 20(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %esi,%ebx - movl %eax,(%esp) - addl %ebx,%edx - andl 4(%esp),%eax - addl %ecx,%ebx - xorl %edi,%eax - movl 156(%esp),%ecx - addl $4,%ebp - addl %ebx,%eax - cmpl $3329325298,%esi - jne .L00816_63 - movl 356(%esp),%esi - movl 8(%esp),%ebx - movl 16(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebx - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl 24(%esp),%eax - movl 28(%esp),%ebx - movl 32(%esp),%ecx - movl 360(%esp),%edi - addl 16(%esi),%edx - addl 20(%esi),%eax - addl 24(%esi),%ebx - addl 28(%esi),%ecx - movl %edx,16(%esi) - movl %eax,20(%esi) - movl %ebx,24(%esi) - movl %ecx,28(%esi) - leal 356(%esp),%esp - subl $256,%ebp - cmpl 8(%esp),%edi - jb .L002loop - movl 12(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 64 -.L001K256: -.long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298 -.long 66051,67438087,134810123,202182159 -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 -.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 -.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -.byte 62,0 -.align 16 -.L006unrolled: - leal -96(%esp),%esp - movl (%esi),%eax - movl 4(%esi),%ebp - movl 8(%esi),%ecx - movl 12(%esi),%ebx - movl %ebp,4(%esp) - xorl %ecx,%ebp - movl %ecx,8(%esp) - movl %ebx,12(%esp) - movl 16(%esi),%edx - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%esi - movl %ebx,20(%esp) - movl %ecx,24(%esp) - movl %esi,28(%esp) - jmp .L009grand_loop -.align 16 -.L009grand_loop: - movl (%edi),%ebx - movl 4(%edi),%ecx - bswap %ebx - movl 8(%edi),%esi - bswap %ecx - movl %ebx,32(%esp) - bswap %esi - movl %ecx,36(%esp) - movl %esi,40(%esp) - movl 12(%edi),%ebx - movl 16(%edi),%ecx - bswap %ebx - movl 20(%edi),%esi - bswap %ecx - movl %ebx,44(%esp) - bswap %esi - movl %ecx,48(%esp) - movl %esi,52(%esp) - movl 24(%edi),%ebx - movl 28(%edi),%ecx - bswap %ebx - movl 32(%edi),%esi - bswap %ecx - movl %ebx,56(%esp) - bswap %esi - movl %ecx,60(%esp) - movl %esi,64(%esp) - movl 36(%edi),%ebx - movl 40(%edi),%ecx - bswap %ebx - movl 44(%edi),%esi - bswap %ecx - movl %ebx,68(%esp) - bswap %esi - movl %ecx,72(%esp) - movl %esi,76(%esp) - movl 48(%edi),%ebx - movl 52(%edi),%ecx - bswap %ebx - movl 56(%edi),%esi - bswap %ecx - movl %ebx,80(%esp) - bswap %esi - movl %ecx,84(%esp) - movl %esi,88(%esp) - movl 60(%edi),%ebx - addl $64,%edi - bswap %ebx - movl %edi,100(%esp) - movl %ebx,92(%esp) - movl %edx,%ecx - movl 20(%esp),%esi - rorl $14,%edx - movl 24(%esp),%edi - xorl %ecx,%edx - movl 32(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1116352408(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 16(%esp),%ecx - rorl $14,%edx - movl 20(%esp),%edi - xorl %esi,%edx - movl 36(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1899447441(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 12(%esp),%esi - rorl $14,%edx - movl 16(%esp),%edi - xorl %ecx,%edx - movl 40(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3049323471(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 8(%esp),%ecx - rorl $14,%edx - movl 12(%esp),%edi - xorl %esi,%edx - movl 44(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3921009573(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 4(%esp),%esi - rorl $14,%edx - movl 8(%esp),%edi - xorl %ecx,%edx - movl 48(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 961987163(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl (%esp),%ecx - rorl $14,%edx - movl 4(%esp),%edi - xorl %esi,%edx - movl 52(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1508970993(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 28(%esp),%esi - rorl $14,%edx - movl (%esp),%edi - xorl %ecx,%edx - movl 56(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2453635748(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 24(%esp),%ecx - rorl $14,%edx - movl 28(%esp),%edi - xorl %esi,%edx - movl 60(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2870763221(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 20(%esp),%esi - rorl $14,%edx - movl 24(%esp),%edi - xorl %ecx,%edx - movl 64(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3624381080(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 16(%esp),%ecx - rorl $14,%edx - movl 20(%esp),%edi - xorl %esi,%edx - movl 68(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 310598401(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 12(%esp),%esi - rorl $14,%edx - movl 16(%esp),%edi - xorl %ecx,%edx - movl 72(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 607225278(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 8(%esp),%ecx - rorl $14,%edx - movl 12(%esp),%edi - xorl %esi,%edx - movl 76(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1426881987(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 4(%esp),%esi - rorl $14,%edx - movl 8(%esp),%edi - xorl %ecx,%edx - movl 80(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1925078388(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl (%esp),%ecx - rorl $14,%edx - movl 4(%esp),%edi - xorl %esi,%edx - movl 84(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2162078206(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 28(%esp),%esi - rorl $14,%edx - movl (%esp),%edi - xorl %ecx,%edx - movl 88(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2614888103(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 24(%esp),%ecx - rorl $14,%edx - movl 28(%esp),%edi - xorl %esi,%edx - movl 92(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3248222580(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 36(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 88(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 32(%esp),%ebx - shrl $10,%edi - addl 68(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,32(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3835390401(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 40(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 92(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 36(%esp),%ebx - shrl $10,%edi - addl 72(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,36(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 4022224774(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 44(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 32(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 40(%esp),%ebx - shrl $10,%edi - addl 76(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,40(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 264347078(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 48(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 36(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 44(%esp),%ebx - shrl $10,%edi - addl 80(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,44(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 604807628(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 52(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 40(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 48(%esp),%ebx - shrl $10,%edi - addl 84(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,48(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 770255983(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 56(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 44(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 52(%esp),%ebx - shrl $10,%edi - addl 88(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,52(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1249150122(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 60(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 48(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 56(%esp),%ebx - shrl $10,%edi - addl 92(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,56(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1555081692(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 64(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 52(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 60(%esp),%ebx - shrl $10,%edi - addl 32(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,60(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1996064986(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 68(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 56(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 64(%esp),%ebx - shrl $10,%edi - addl 36(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,64(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2554220882(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 72(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 60(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 68(%esp),%ebx - shrl $10,%edi - addl 40(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,68(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2821834349(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 76(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 64(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 72(%esp),%ebx - shrl $10,%edi - addl 44(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,72(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2952996808(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 80(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 68(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 76(%esp),%ebx - shrl $10,%edi - addl 48(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,76(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3210313671(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 84(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 72(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 80(%esp),%ebx - shrl $10,%edi - addl 52(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,80(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3336571891(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 88(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 76(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 84(%esp),%ebx - shrl $10,%edi - addl 56(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,84(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3584528711(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 92(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 80(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 88(%esp),%ebx - shrl $10,%edi - addl 60(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,88(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 113926993(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 32(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 84(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 92(%esp),%ebx - shrl $10,%edi - addl 64(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,92(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 338241895(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 36(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 88(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 32(%esp),%ebx - shrl $10,%edi - addl 68(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,32(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 666307205(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 40(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 92(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 36(%esp),%ebx - shrl $10,%edi - addl 72(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,36(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 773529912(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 44(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 32(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 40(%esp),%ebx - shrl $10,%edi - addl 76(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,40(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1294757372(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 48(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 36(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 44(%esp),%ebx - shrl $10,%edi - addl 80(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,44(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1396182291(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 52(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 40(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 48(%esp),%ebx - shrl $10,%edi - addl 84(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,48(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1695183700(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 56(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 44(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 52(%esp),%ebx - shrl $10,%edi - addl 88(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,52(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1986661051(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 60(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 48(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 56(%esp),%ebx - shrl $10,%edi - addl 92(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,56(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2177026350(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 64(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 52(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 60(%esp),%ebx - shrl $10,%edi - addl 32(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,60(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2456956037(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 68(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 56(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 64(%esp),%ebx - shrl $10,%edi - addl 36(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,64(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2730485921(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 72(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 60(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 68(%esp),%ebx - shrl $10,%edi - addl 40(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,68(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2820302411(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 76(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 64(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 72(%esp),%ebx - shrl $10,%edi - addl 44(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,72(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3259730800(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 80(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 68(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 76(%esp),%ebx - shrl $10,%edi - addl 48(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,76(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3345764771(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 84(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 72(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 80(%esp),%ebx - shrl $10,%edi - addl 52(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,80(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3516065817(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 88(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 76(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 84(%esp),%ebx - shrl $10,%edi - addl 56(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,84(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3600352804(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 92(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 80(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 88(%esp),%ebx - shrl $10,%edi - addl 60(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,88(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 4094571909(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 32(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 84(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 92(%esp),%ebx - shrl $10,%edi - addl 64(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,92(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 275423344(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 36(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 88(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 32(%esp),%ebx - shrl $10,%edi - addl 68(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,32(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 430227734(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 40(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 92(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 36(%esp),%ebx - shrl $10,%edi - addl 72(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,36(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 506948616(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 44(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 32(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 40(%esp),%ebx - shrl $10,%edi - addl 76(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,40(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 659060556(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 48(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 36(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 44(%esp),%ebx - shrl $10,%edi - addl 80(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,44(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 883997877(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 52(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 40(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 48(%esp),%ebx - shrl $10,%edi - addl 84(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,48(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 958139571(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 56(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 44(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 52(%esp),%ebx - shrl $10,%edi - addl 88(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,52(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1322822218(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 60(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 48(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 56(%esp),%ebx - shrl $10,%edi - addl 92(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,56(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1537002063(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 64(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 52(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 60(%esp),%ebx - shrl $10,%edi - addl 32(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,60(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1747873779(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 68(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 56(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 64(%esp),%ebx - shrl $10,%edi - addl 36(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,64(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1955562222(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 72(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 60(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 68(%esp),%ebx - shrl $10,%edi - addl 40(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,68(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2024104815(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 76(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 64(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 72(%esp),%ebx - shrl $10,%edi - addl 44(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,72(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2227730452(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 80(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 68(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 76(%esp),%ebx - shrl $10,%edi - addl 48(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,76(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2361852424(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 84(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 72(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 80(%esp),%ebx - shrl $10,%edi - addl 52(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,80(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2428436474(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 88(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 76(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 84(%esp),%ebx - shrl $10,%edi - addl 56(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,84(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2756734187(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 92(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 80(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 88(%esp),%ebx - shrl $10,%edi - addl 60(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3204031479(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 32(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 84(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 92(%esp),%ebx - shrl $10,%edi - addl 64(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3329325298(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 96(%esp),%esi - xorl %edi,%ebp - movl 12(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebp - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebp,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl %ebp,4(%esp) - xorl %edi,%ebp - movl %edi,8(%esp) - movl %ecx,12(%esp) - movl 20(%esp),%edi - movl 24(%esp),%ebx - movl 28(%esp),%ecx - addl 16(%esi),%edx - addl 20(%esi),%edi - addl 24(%esi),%ebx - addl 28(%esi),%ecx - movl %edx,16(%esi) - movl %edi,20(%esi) - movl %ebx,24(%esi) - movl %ecx,28(%esi) - movl %edi,20(%esp) - movl 100(%esp),%edi - movl %ebx,24(%esp) - movl %ecx,28(%esp) - cmpl 104(%esp),%edi - jb .L009grand_loop - movl 108(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 32 -.L005SSSE3: - leal -96(%esp),%esp - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edi - movl %ebx,4(%esp) - xorl %ecx,%ebx - movl %ecx,8(%esp) - movl %edi,12(%esp) - movl 16(%esi),%edx - movl 20(%esi),%edi - movl 24(%esi),%ecx - movl 28(%esi),%esi - movl %edi,20(%esp) - movl 100(%esp),%edi - movl %ecx,24(%esp) - movl %esi,28(%esp) - movdqa 256(%ebp),%xmm7 - jmp .L010grand_ssse3 -.align 16 -.L010grand_ssse3: - movdqu (%edi),%xmm0 - movdqu 16(%edi),%xmm1 - movdqu 32(%edi),%xmm2 - movdqu 48(%edi),%xmm3 - addl $64,%edi -.byte 102,15,56,0,199 - movl %edi,100(%esp) -.byte 102,15,56,0,207 - movdqa (%ebp),%xmm4 -.byte 102,15,56,0,215 - movdqa 16(%ebp),%xmm5 - paddd %xmm0,%xmm4 -.byte 102,15,56,0,223 - movdqa 32(%ebp),%xmm6 - paddd %xmm1,%xmm5 - movdqa 48(%ebp),%xmm7 - movdqa %xmm4,32(%esp) - paddd %xmm2,%xmm6 - movdqa %xmm5,48(%esp) - paddd %xmm3,%xmm7 - movdqa %xmm6,64(%esp) - movdqa %xmm7,80(%esp) - jmp .L011ssse3_00_47 -.align 16 -.L011ssse3_00_47: - addl $64,%ebp - movl %edx,%ecx - movdqa %xmm1,%xmm4 - rorl $14,%edx - movl 20(%esp),%esi - movdqa %xmm3,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi -.byte 102,15,58,15,224,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,250,4 - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 4(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm0 - movl %eax,(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm3,%xmm7 - xorl %esi,%ecx - addl 32(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl 16(%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,12(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm0 - movl %ebx,28(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 36(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm0 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - pshufd $80,%xmm0,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 40(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa (%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,4(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm0 - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - paddd %xmm0,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movdqa %xmm6,32(%esp) - movl %edx,%ecx - movdqa %xmm2,%xmm4 - rorl $14,%edx - movl 4(%esp),%esi - movdqa %xmm0,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi -.byte 102,15,58,15,225,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,251,4 - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 20(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm1 - movl %eax,16(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm0,%xmm7 - xorl %esi,%ecx - addl 48(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl (%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,28(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm1 - movl %ebx,12(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 52(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm1 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - pshufd $80,%xmm1,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 56(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa 16(%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,20(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm1 - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - paddd %xmm1,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movdqa %xmm6,48(%esp) - movl %edx,%ecx - movdqa %xmm3,%xmm4 - rorl $14,%edx - movl 20(%esp),%esi - movdqa %xmm1,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi -.byte 102,15,58,15,226,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,248,4 - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 4(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm2 - movl %eax,(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm1,%xmm7 - xorl %esi,%ecx - addl 64(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl 16(%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,12(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm2 - movl %ebx,28(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 68(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm2 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - pshufd $80,%xmm2,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 72(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa 32(%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,4(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm2 - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - paddd %xmm2,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movdqa %xmm6,64(%esp) - movl %edx,%ecx - movdqa %xmm0,%xmm4 - rorl $14,%edx - movl 4(%esp),%esi - movdqa %xmm2,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi -.byte 102,15,58,15,227,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,249,4 - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 20(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm3 - movl %eax,16(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm2,%xmm7 - xorl %esi,%ecx - addl 80(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl (%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,28(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm3 - movl %ebx,12(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 84(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm3 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - pshufd $80,%xmm3,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 88(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa 48(%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,20(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm3 - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - paddd %xmm3,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movdqa %xmm6,80(%esp) - cmpl $66051,64(%ebp) - jne .L011ssse3_00_47 - movl %edx,%ecx - rorl $14,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 32(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 36(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 40(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 48(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 52(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 56(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 64(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 68(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 72(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 80(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 84(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 88(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl 96(%esp),%esi - xorl %edi,%ebx - movl 12(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebx - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl %ebx,4(%esp) - xorl %edi,%ebx - movl %edi,8(%esp) - movl %ecx,12(%esp) - movl 20(%esp),%edi - movl 24(%esp),%ecx - addl 16(%esi),%edx - addl 20(%esi),%edi - addl 24(%esi),%ecx - movl %edx,16(%esi) - movl %edi,20(%esi) - movl %edi,20(%esp) - movl 28(%esp),%edi - movl %ecx,24(%esi) - addl 28(%esi),%edi - movl %ecx,24(%esp) - movl %edi,28(%esi) - movl %edi,28(%esp) - movl 100(%esp),%edi - movdqa 64(%ebp),%xmm7 - subl $192,%ebp - cmpl 104(%esp),%edi - jb .L010grand_ssse3 - movl 108(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 32 -.L004AVX: - leal -96(%esp),%esp - vzeroall - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edi - movl %ebx,4(%esp) - xorl %ecx,%ebx - movl %ecx,8(%esp) - movl %edi,12(%esp) - movl 16(%esi),%edx - movl 20(%esi),%edi - movl 24(%esi),%ecx - movl 28(%esi),%esi - movl %edi,20(%esp) - movl 100(%esp),%edi - movl %ecx,24(%esp) - movl %esi,28(%esp) - vmovdqa 256(%ebp),%xmm7 - jmp .L012grand_avx -.align 32 -.L012grand_avx: - vmovdqu (%edi),%xmm0 - vmovdqu 16(%edi),%xmm1 - vmovdqu 32(%edi),%xmm2 - vmovdqu 48(%edi),%xmm3 - addl $64,%edi - vpshufb %xmm7,%xmm0,%xmm0 - movl %edi,100(%esp) - vpshufb %xmm7,%xmm1,%xmm1 - vpshufb %xmm7,%xmm2,%xmm2 - vpaddd (%ebp),%xmm0,%xmm4 - vpshufb %xmm7,%xmm3,%xmm3 - vpaddd 16(%ebp),%xmm1,%xmm5 - vpaddd 32(%ebp),%xmm2,%xmm6 - vpaddd 48(%ebp),%xmm3,%xmm7 - vmovdqa %xmm4,32(%esp) - vmovdqa %xmm5,48(%esp) - vmovdqa %xmm6,64(%esp) - vmovdqa %xmm7,80(%esp) - jmp .L013avx_00_47 -.align 16 -.L013avx_00_47: - addl $64,%ebp - vpalignr $4,%xmm0,%xmm1,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - vpalignr $4,%xmm2,%xmm3,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - vpaddd %xmm7,%xmm0,%xmm0 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - vpshufd $250,%xmm3,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 32(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - vpaddd %xmm4,%xmm0,%xmm0 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 36(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - vpaddd %xmm7,%xmm0,%xmm0 - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm0,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 40(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm0,%xmm0 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - vpaddd (%ebp),%xmm0,%xmm6 - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,32(%esp) - vpalignr $4,%xmm1,%xmm2,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - vpalignr $4,%xmm3,%xmm0,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - vpaddd %xmm7,%xmm1,%xmm1 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - vpshufd $250,%xmm0,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 48(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - vpaddd %xmm4,%xmm1,%xmm1 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 52(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - vpaddd %xmm7,%xmm1,%xmm1 - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm1,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 56(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm1,%xmm1 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - vpaddd 16(%ebp),%xmm1,%xmm6 - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,48(%esp) - vpalignr $4,%xmm2,%xmm3,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - vpalignr $4,%xmm0,%xmm1,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - vpaddd %xmm7,%xmm2,%xmm2 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - vpshufd $250,%xmm1,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 64(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - vpaddd %xmm4,%xmm2,%xmm2 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 68(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - vpaddd %xmm7,%xmm2,%xmm2 - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm2,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 72(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm2,%xmm2 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - vpaddd 32(%ebp),%xmm2,%xmm6 - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,64(%esp) - vpalignr $4,%xmm3,%xmm0,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - vpalignr $4,%xmm1,%xmm2,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - vpaddd %xmm7,%xmm3,%xmm3 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - vpshufd $250,%xmm2,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 80(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - vpaddd %xmm4,%xmm3,%xmm3 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 84(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - vpaddd %xmm7,%xmm3,%xmm3 - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm3,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 88(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm3,%xmm3 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - vpaddd 48(%ebp),%xmm3,%xmm6 - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,80(%esp) - cmpl $66051,64(%ebp) - jne .L013avx_00_47 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 32(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 36(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 40(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 48(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 52(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 56(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 64(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 68(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 72(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 80(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 84(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 88(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl 96(%esp),%esi - xorl %edi,%ebx - movl 12(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebx - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl %ebx,4(%esp) - xorl %edi,%ebx - movl %edi,8(%esp) - movl %ecx,12(%esp) - movl 20(%esp),%edi - movl 24(%esp),%ecx - addl 16(%esi),%edx - addl 20(%esi),%edi - addl 24(%esi),%ecx - movl %edx,16(%esi) - movl %edi,20(%esi) - movl %edi,20(%esp) - movl 28(%esp),%edi - movl %ecx,24(%esi) - addl 28(%esi),%edi - movl %ecx,24(%esp) - movl %edi,28(%esi) - movl %edi,28(%esp) - movl 100(%esp),%edi - vmovdqa 64(%ebp),%xmm7 - subl $192,%ebp - cmpl 104(%esp),%edi - jb .L012grand_avx - movl 108(%esp),%esp - vzeroall - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size sha256_block_data_order,.-.L_sha256_block_data_order_begin -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha512-586.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha512-586.S deleted file mode 100644 index 89fb50b4ca..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/sha512-586.S +++ /dev/null @@ -1,2837 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl sha512_block_data_order -.hidden sha512_block_data_order -.type sha512_block_data_order,@function -.align 16 -sha512_block_data_order: -.L_sha512_block_data_order_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl %esp,%ebx - call .L000pic_point -.L000pic_point: - popl %ebp - leal .L001K512-.L000pic_point(%ebp),%ebp - subl $16,%esp - andl $-64,%esp - shll $7,%eax - addl %edi,%eax - movl %esi,(%esp) - movl %edi,4(%esp) - movl %eax,8(%esp) - movl %ebx,12(%esp) - leal OPENSSL_ia32cap_P-.L001K512(%ebp),%edx - movl (%edx),%ecx - testl $67108864,%ecx - jz .L002loop_x86 - movl 4(%edx),%edx - movq (%esi),%mm0 - andl $16777216,%ecx - movq 8(%esi),%mm1 - andl $512,%edx - movq 16(%esi),%mm2 - orl %edx,%ecx - movq 24(%esi),%mm3 - movq 32(%esi),%mm4 - movq 40(%esi),%mm5 - movq 48(%esi),%mm6 - movq 56(%esi),%mm7 - cmpl $16777728,%ecx - je .L003SSSE3 - subl $80,%esp - jmp .L004loop_sse2 -.align 16 -.L004loop_sse2: - movq %mm1,8(%esp) - movq %mm2,16(%esp) - movq %mm3,24(%esp) - movq %mm5,40(%esp) - movq %mm6,48(%esp) - pxor %mm1,%mm2 - movq %mm7,56(%esp) - movq %mm0,%mm3 - movl (%edi),%eax - movl 4(%edi),%ebx - addl $8,%edi - movl $15,%edx - bswap %eax - bswap %ebx - jmp .L00500_14_sse2 -.align 16 -.L00500_14_sse2: - movd %eax,%mm1 - movl (%edi),%eax - movd %ebx,%mm7 - movl 4(%edi),%ebx - addl $8,%edi - bswap %eax - bswap %ebx - punpckldq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm3,%mm0 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm2,%mm3 - movq %mm0,%mm2 - addl $8,%ebp - paddq %mm6,%mm3 - movq 48(%esp),%mm6 - decl %edx - jnz .L00500_14_sse2 - movd %eax,%mm1 - movd %ebx,%mm7 - punpckldq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm3,%mm0 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 192(%esp),%mm7 - paddq %mm2,%mm3 - movq %mm0,%mm2 - addl $8,%ebp - paddq %mm6,%mm3 - pxor %mm0,%mm0 - movl $32,%edx - jmp .L00616_79_sse2 -.align 16 -.L00616_79_sse2: - movq 88(%esp),%mm5 - movq %mm7,%mm1 - psrlq $1,%mm7 - movq %mm5,%mm6 - psrlq $6,%mm5 - psllq $56,%mm1 - paddq %mm3,%mm0 - movq %mm7,%mm3 - psrlq $6,%mm7 - pxor %mm1,%mm3 - psllq $7,%mm1 - pxor %mm7,%mm3 - psrlq $1,%mm7 - pxor %mm1,%mm3 - movq %mm5,%mm1 - psrlq $13,%mm5 - pxor %mm3,%mm7 - psllq $3,%mm6 - pxor %mm5,%mm1 - paddq 200(%esp),%mm7 - pxor %mm6,%mm1 - psrlq $42,%mm5 - paddq 128(%esp),%mm7 - pxor %mm5,%mm1 - psllq $42,%mm6 - movq 40(%esp),%mm5 - pxor %mm6,%mm1 - movq 48(%esp),%mm6 - paddq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 192(%esp),%mm7 - paddq %mm6,%mm2 - addl $8,%ebp - movq 88(%esp),%mm5 - movq %mm7,%mm1 - psrlq $1,%mm7 - movq %mm5,%mm6 - psrlq $6,%mm5 - psllq $56,%mm1 - paddq %mm3,%mm2 - movq %mm7,%mm3 - psrlq $6,%mm7 - pxor %mm1,%mm3 - psllq $7,%mm1 - pxor %mm7,%mm3 - psrlq $1,%mm7 - pxor %mm1,%mm3 - movq %mm5,%mm1 - psrlq $13,%mm5 - pxor %mm3,%mm7 - psllq $3,%mm6 - pxor %mm5,%mm1 - paddq 200(%esp),%mm7 - pxor %mm6,%mm1 - psrlq $42,%mm5 - paddq 128(%esp),%mm7 - pxor %mm5,%mm1 - psllq $42,%mm6 - movq 40(%esp),%mm5 - pxor %mm6,%mm1 - movq 48(%esp),%mm6 - paddq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 192(%esp),%mm7 - paddq %mm6,%mm0 - addl $8,%ebp - decl %edx - jnz .L00616_79_sse2 - paddq %mm3,%mm0 - movq 8(%esp),%mm1 - movq 24(%esp),%mm3 - movq 40(%esp),%mm5 - movq 48(%esp),%mm6 - movq 56(%esp),%mm7 - pxor %mm1,%mm2 - paddq (%esi),%mm0 - paddq 8(%esi),%mm1 - paddq 16(%esi),%mm2 - paddq 24(%esi),%mm3 - paddq 32(%esi),%mm4 - paddq 40(%esi),%mm5 - paddq 48(%esi),%mm6 - paddq 56(%esi),%mm7 - movl $640,%eax - movq %mm0,(%esi) - movq %mm1,8(%esi) - movq %mm2,16(%esi) - movq %mm3,24(%esi) - movq %mm4,32(%esi) - movq %mm5,40(%esi) - movq %mm6,48(%esi) - movq %mm7,56(%esi) - leal (%esp,%eax,1),%esp - subl %eax,%ebp - cmpl 88(%esp),%edi - jb .L004loop_sse2 - movl 92(%esp),%esp - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 32 -.L003SSSE3: - leal -64(%esp),%edx - subl $256,%esp - movdqa 640(%ebp),%xmm1 - movdqu (%edi),%xmm0 -.byte 102,15,56,0,193 - movdqa (%ebp),%xmm3 - movdqa %xmm1,%xmm2 - movdqu 16(%edi),%xmm1 - paddq %xmm0,%xmm3 -.byte 102,15,56,0,202 - movdqa %xmm3,-128(%edx) - movdqa 16(%ebp),%xmm4 - movdqa %xmm2,%xmm3 - movdqu 32(%edi),%xmm2 - paddq %xmm1,%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm4,-112(%edx) - movdqa 32(%ebp),%xmm5 - movdqa %xmm3,%xmm4 - movdqu 48(%edi),%xmm3 - paddq %xmm2,%xmm5 -.byte 102,15,56,0,220 - movdqa %xmm5,-96(%edx) - movdqa 48(%ebp),%xmm6 - movdqa %xmm4,%xmm5 - movdqu 64(%edi),%xmm4 - paddq %xmm3,%xmm6 -.byte 102,15,56,0,229 - movdqa %xmm6,-80(%edx) - movdqa 64(%ebp),%xmm7 - movdqa %xmm5,%xmm6 - movdqu 80(%edi),%xmm5 - paddq %xmm4,%xmm7 -.byte 102,15,56,0,238 - movdqa %xmm7,-64(%edx) - movdqa %xmm0,(%edx) - movdqa 80(%ebp),%xmm0 - movdqa %xmm6,%xmm7 - movdqu 96(%edi),%xmm6 - paddq %xmm5,%xmm0 -.byte 102,15,56,0,247 - movdqa %xmm0,-48(%edx) - movdqa %xmm1,16(%edx) - movdqa 96(%ebp),%xmm1 - movdqa %xmm7,%xmm0 - movdqu 112(%edi),%xmm7 - paddq %xmm6,%xmm1 -.byte 102,15,56,0,248 - movdqa %xmm1,-32(%edx) - movdqa %xmm2,32(%edx) - movdqa 112(%ebp),%xmm2 - movdqa (%edx),%xmm0 - paddq %xmm7,%xmm2 - movdqa %xmm2,-16(%edx) - nop -.align 32 -.L007loop_ssse3: - movdqa 16(%edx),%xmm2 - movdqa %xmm3,48(%edx) - leal 128(%ebp),%ebp - movq %mm1,8(%esp) - movl %edi,%ebx - movq %mm2,16(%esp) - leal 128(%edi),%edi - movq %mm3,24(%esp) - cmpl %eax,%edi - movq %mm5,40(%esp) - cmovbl %edi,%ebx - movq %mm6,48(%esp) - movl $4,%ecx - pxor %mm1,%mm2 - movq %mm7,56(%esp) - pxor %mm3,%mm3 - jmp .L00800_47_ssse3 -.align 32 -.L00800_47_ssse3: - movdqa %xmm5,%xmm3 - movdqa %xmm2,%xmm1 -.byte 102,15,58,15,208,8 - movdqa %xmm4,(%edx) -.byte 102,15,58,15,220,8 - movdqa %xmm2,%xmm4 - psrlq $7,%xmm2 - paddq %xmm3,%xmm0 - movdqa %xmm4,%xmm3 - psrlq $1,%xmm4 - psllq $56,%xmm3 - pxor %xmm4,%xmm2 - psrlq $7,%xmm4 - pxor %xmm3,%xmm2 - psllq $7,%xmm3 - pxor %xmm4,%xmm2 - movdqa %xmm7,%xmm4 - pxor %xmm3,%xmm2 - movdqa %xmm7,%xmm3 - psrlq $6,%xmm4 - paddq %xmm2,%xmm0 - movdqa %xmm7,%xmm2 - psrlq $19,%xmm3 - psllq $3,%xmm2 - pxor %xmm3,%xmm4 - psrlq $42,%xmm3 - pxor %xmm2,%xmm4 - psllq $42,%xmm2 - pxor %xmm3,%xmm4 - movdqa 32(%edx),%xmm3 - pxor %xmm2,%xmm4 - movdqa (%ebp),%xmm2 - movq %mm4,%mm1 - paddq %xmm4,%xmm0 - movq -128(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - paddq %xmm0,%xmm2 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -120(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm2,-128(%edx) - movdqa %xmm6,%xmm4 - movdqa %xmm3,%xmm2 -.byte 102,15,58,15,217,8 - movdqa %xmm5,16(%edx) -.byte 102,15,58,15,229,8 - movdqa %xmm3,%xmm5 - psrlq $7,%xmm3 - paddq %xmm4,%xmm1 - movdqa %xmm5,%xmm4 - psrlq $1,%xmm5 - psllq $56,%xmm4 - pxor %xmm5,%xmm3 - psrlq $7,%xmm5 - pxor %xmm4,%xmm3 - psllq $7,%xmm4 - pxor %xmm5,%xmm3 - movdqa %xmm0,%xmm5 - pxor %xmm4,%xmm3 - movdqa %xmm0,%xmm4 - psrlq $6,%xmm5 - paddq %xmm3,%xmm1 - movdqa %xmm0,%xmm3 - psrlq $19,%xmm4 - psllq $3,%xmm3 - pxor %xmm4,%xmm5 - psrlq $42,%xmm4 - pxor %xmm3,%xmm5 - psllq $42,%xmm3 - pxor %xmm4,%xmm5 - movdqa 48(%edx),%xmm4 - pxor %xmm3,%xmm5 - movdqa 16(%ebp),%xmm3 - movq %mm4,%mm1 - paddq %xmm5,%xmm1 - movq -112(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - paddq %xmm1,%xmm3 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -104(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm3,-112(%edx) - movdqa %xmm7,%xmm5 - movdqa %xmm4,%xmm3 -.byte 102,15,58,15,226,8 - movdqa %xmm6,32(%edx) -.byte 102,15,58,15,238,8 - movdqa %xmm4,%xmm6 - psrlq $7,%xmm4 - paddq %xmm5,%xmm2 - movdqa %xmm6,%xmm5 - psrlq $1,%xmm6 - psllq $56,%xmm5 - pxor %xmm6,%xmm4 - psrlq $7,%xmm6 - pxor %xmm5,%xmm4 - psllq $7,%xmm5 - pxor %xmm6,%xmm4 - movdqa %xmm1,%xmm6 - pxor %xmm5,%xmm4 - movdqa %xmm1,%xmm5 - psrlq $6,%xmm6 - paddq %xmm4,%xmm2 - movdqa %xmm1,%xmm4 - psrlq $19,%xmm5 - psllq $3,%xmm4 - pxor %xmm5,%xmm6 - psrlq $42,%xmm5 - pxor %xmm4,%xmm6 - psllq $42,%xmm4 - pxor %xmm5,%xmm6 - movdqa (%edx),%xmm5 - pxor %xmm4,%xmm6 - movdqa 32(%ebp),%xmm4 - movq %mm4,%mm1 - paddq %xmm6,%xmm2 - movq -96(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - paddq %xmm2,%xmm4 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -88(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm4,-96(%edx) - movdqa %xmm0,%xmm6 - movdqa %xmm5,%xmm4 -.byte 102,15,58,15,235,8 - movdqa %xmm7,48(%edx) -.byte 102,15,58,15,247,8 - movdqa %xmm5,%xmm7 - psrlq $7,%xmm5 - paddq %xmm6,%xmm3 - movdqa %xmm7,%xmm6 - psrlq $1,%xmm7 - psllq $56,%xmm6 - pxor %xmm7,%xmm5 - psrlq $7,%xmm7 - pxor %xmm6,%xmm5 - psllq $7,%xmm6 - pxor %xmm7,%xmm5 - movdqa %xmm2,%xmm7 - pxor %xmm6,%xmm5 - movdqa %xmm2,%xmm6 - psrlq $6,%xmm7 - paddq %xmm5,%xmm3 - movdqa %xmm2,%xmm5 - psrlq $19,%xmm6 - psllq $3,%xmm5 - pxor %xmm6,%xmm7 - psrlq $42,%xmm6 - pxor %xmm5,%xmm7 - psllq $42,%xmm5 - pxor %xmm6,%xmm7 - movdqa 16(%edx),%xmm6 - pxor %xmm5,%xmm7 - movdqa 48(%ebp),%xmm5 - movq %mm4,%mm1 - paddq %xmm7,%xmm3 - movq -80(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - paddq %xmm3,%xmm5 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -72(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm5,-80(%edx) - movdqa %xmm1,%xmm7 - movdqa %xmm6,%xmm5 -.byte 102,15,58,15,244,8 - movdqa %xmm0,(%edx) -.byte 102,15,58,15,248,8 - movdqa %xmm6,%xmm0 - psrlq $7,%xmm6 - paddq %xmm7,%xmm4 - movdqa %xmm0,%xmm7 - psrlq $1,%xmm0 - psllq $56,%xmm7 - pxor %xmm0,%xmm6 - psrlq $7,%xmm0 - pxor %xmm7,%xmm6 - psllq $7,%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm3,%xmm0 - pxor %xmm7,%xmm6 - movdqa %xmm3,%xmm7 - psrlq $6,%xmm0 - paddq %xmm6,%xmm4 - movdqa %xmm3,%xmm6 - psrlq $19,%xmm7 - psllq $3,%xmm6 - pxor %xmm7,%xmm0 - psrlq $42,%xmm7 - pxor %xmm6,%xmm0 - psllq $42,%xmm6 - pxor %xmm7,%xmm0 - movdqa 32(%edx),%xmm7 - pxor %xmm6,%xmm0 - movdqa 64(%ebp),%xmm6 - movq %mm4,%mm1 - paddq %xmm0,%xmm4 - movq -64(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - paddq %xmm4,%xmm6 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -56(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm6,-64(%edx) - movdqa %xmm2,%xmm0 - movdqa %xmm7,%xmm6 -.byte 102,15,58,15,253,8 - movdqa %xmm1,16(%edx) -.byte 102,15,58,15,193,8 - movdqa %xmm7,%xmm1 - psrlq $7,%xmm7 - paddq %xmm0,%xmm5 - movdqa %xmm1,%xmm0 - psrlq $1,%xmm1 - psllq $56,%xmm0 - pxor %xmm1,%xmm7 - psrlq $7,%xmm1 - pxor %xmm0,%xmm7 - psllq $7,%xmm0 - pxor %xmm1,%xmm7 - movdqa %xmm4,%xmm1 - pxor %xmm0,%xmm7 - movdqa %xmm4,%xmm0 - psrlq $6,%xmm1 - paddq %xmm7,%xmm5 - movdqa %xmm4,%xmm7 - psrlq $19,%xmm0 - psllq $3,%xmm7 - pxor %xmm0,%xmm1 - psrlq $42,%xmm0 - pxor %xmm7,%xmm1 - psllq $42,%xmm7 - pxor %xmm0,%xmm1 - movdqa 48(%edx),%xmm0 - pxor %xmm7,%xmm1 - movdqa 80(%ebp),%xmm7 - movq %mm4,%mm1 - paddq %xmm1,%xmm5 - movq -48(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - paddq %xmm5,%xmm7 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -40(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm7,-48(%edx) - movdqa %xmm3,%xmm1 - movdqa %xmm0,%xmm7 -.byte 102,15,58,15,198,8 - movdqa %xmm2,32(%edx) -.byte 102,15,58,15,202,8 - movdqa %xmm0,%xmm2 - psrlq $7,%xmm0 - paddq %xmm1,%xmm6 - movdqa %xmm2,%xmm1 - psrlq $1,%xmm2 - psllq $56,%xmm1 - pxor %xmm2,%xmm0 - psrlq $7,%xmm2 - pxor %xmm1,%xmm0 - psllq $7,%xmm1 - pxor %xmm2,%xmm0 - movdqa %xmm5,%xmm2 - pxor %xmm1,%xmm0 - movdqa %xmm5,%xmm1 - psrlq $6,%xmm2 - paddq %xmm0,%xmm6 - movdqa %xmm5,%xmm0 - psrlq $19,%xmm1 - psllq $3,%xmm0 - pxor %xmm1,%xmm2 - psrlq $42,%xmm1 - pxor %xmm0,%xmm2 - psllq $42,%xmm0 - pxor %xmm1,%xmm2 - movdqa (%edx),%xmm1 - pxor %xmm0,%xmm2 - movdqa 96(%ebp),%xmm0 - movq %mm4,%mm1 - paddq %xmm2,%xmm6 - movq -32(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - paddq %xmm6,%xmm0 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -24(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm0,-32(%edx) - movdqa %xmm4,%xmm2 - movdqa %xmm1,%xmm0 -.byte 102,15,58,15,207,8 - movdqa %xmm3,48(%edx) -.byte 102,15,58,15,211,8 - movdqa %xmm1,%xmm3 - psrlq $7,%xmm1 - paddq %xmm2,%xmm7 - movdqa %xmm3,%xmm2 - psrlq $1,%xmm3 - psllq $56,%xmm2 - pxor %xmm3,%xmm1 - psrlq $7,%xmm3 - pxor %xmm2,%xmm1 - psllq $7,%xmm2 - pxor %xmm3,%xmm1 - movdqa %xmm6,%xmm3 - pxor %xmm2,%xmm1 - movdqa %xmm6,%xmm2 - psrlq $6,%xmm3 - paddq %xmm1,%xmm7 - movdqa %xmm6,%xmm1 - psrlq $19,%xmm2 - psllq $3,%xmm1 - pxor %xmm2,%xmm3 - psrlq $42,%xmm2 - pxor %xmm1,%xmm3 - psllq $42,%xmm1 - pxor %xmm2,%xmm3 - movdqa 16(%edx),%xmm2 - pxor %xmm1,%xmm3 - movdqa 112(%ebp),%xmm1 - movq %mm4,%mm1 - paddq %xmm3,%xmm7 - movq -16(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - paddq %xmm7,%xmm1 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -8(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm1,-16(%edx) - leal 128(%ebp),%ebp - decl %ecx - jnz .L00800_47_ssse3 - movdqa (%ebp),%xmm1 - leal -640(%ebp),%ebp - movdqu (%ebx),%xmm0 -.byte 102,15,56,0,193 - movdqa (%ebp),%xmm3 - movdqa %xmm1,%xmm2 - movdqu 16(%ebx),%xmm1 - paddq %xmm0,%xmm3 -.byte 102,15,56,0,202 - movq %mm4,%mm1 - movq -128(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -120(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm3,-128(%edx) - movdqa 16(%ebp),%xmm4 - movdqa %xmm2,%xmm3 - movdqu 32(%ebx),%xmm2 - paddq %xmm1,%xmm4 -.byte 102,15,56,0,211 - movq %mm4,%mm1 - movq -112(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -104(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm4,-112(%edx) - movdqa 32(%ebp),%xmm5 - movdqa %xmm3,%xmm4 - movdqu 48(%ebx),%xmm3 - paddq %xmm2,%xmm5 -.byte 102,15,56,0,220 - movq %mm4,%mm1 - movq -96(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -88(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm5,-96(%edx) - movdqa 48(%ebp),%xmm6 - movdqa %xmm4,%xmm5 - movdqu 64(%ebx),%xmm4 - paddq %xmm3,%xmm6 -.byte 102,15,56,0,229 - movq %mm4,%mm1 - movq -80(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -72(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm6,-80(%edx) - movdqa 64(%ebp),%xmm7 - movdqa %xmm5,%xmm6 - movdqu 80(%ebx),%xmm5 - paddq %xmm4,%xmm7 -.byte 102,15,56,0,238 - movq %mm4,%mm1 - movq -64(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -56(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm7,-64(%edx) - movdqa %xmm0,(%edx) - movdqa 80(%ebp),%xmm0 - movdqa %xmm6,%xmm7 - movdqu 96(%ebx),%xmm6 - paddq %xmm5,%xmm0 -.byte 102,15,56,0,247 - movq %mm4,%mm1 - movq -48(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -40(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm0,-48(%edx) - movdqa %xmm1,16(%edx) - movdqa 96(%ebp),%xmm1 - movdqa %xmm7,%xmm0 - movdqu 112(%ebx),%xmm7 - paddq %xmm6,%xmm1 -.byte 102,15,56,0,248 - movq %mm4,%mm1 - movq -32(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -24(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm1,-32(%edx) - movdqa %xmm2,32(%edx) - movdqa 112(%ebp),%xmm2 - movdqa (%edx),%xmm0 - paddq %xmm7,%xmm2 - movq %mm4,%mm1 - movq -16(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -8(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm2,-16(%edx) - movq 8(%esp),%mm1 - paddq %mm3,%mm0 - movq 24(%esp),%mm3 - movq 56(%esp),%mm7 - pxor %mm1,%mm2 - paddq (%esi),%mm0 - paddq 8(%esi),%mm1 - paddq 16(%esi),%mm2 - paddq 24(%esi),%mm3 - paddq 32(%esi),%mm4 - paddq 40(%esi),%mm5 - paddq 48(%esi),%mm6 - paddq 56(%esi),%mm7 - movq %mm0,(%esi) - movq %mm1,8(%esi) - movq %mm2,16(%esi) - movq %mm3,24(%esi) - movq %mm4,32(%esi) - movq %mm5,40(%esi) - movq %mm6,48(%esi) - movq %mm7,56(%esi) - cmpl %eax,%edi - jb .L007loop_ssse3 - movl 76(%edx),%esp - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 16 -.L002loop_x86: - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 16(%edi),%eax - movl 20(%edi),%ebx - movl 24(%edi),%ecx - movl 28(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 32(%edi),%eax - movl 36(%edi),%ebx - movl 40(%edi),%ecx - movl 44(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 48(%edi),%eax - movl 52(%edi),%ebx - movl 56(%edi),%ecx - movl 60(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 64(%edi),%eax - movl 68(%edi),%ebx - movl 72(%edi),%ecx - movl 76(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 80(%edi),%eax - movl 84(%edi),%ebx - movl 88(%edi),%ecx - movl 92(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 96(%edi),%eax - movl 100(%edi),%ebx - movl 104(%edi),%ecx - movl 108(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 112(%edi),%eax - movl 116(%edi),%ebx - movl 120(%edi),%ecx - movl 124(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - addl $128,%edi - subl $72,%esp - movl %edi,204(%esp) - leal 8(%esp),%edi - movl $16,%ecx -.long 2784229001 -.align 16 -.L00900_15_x86: - movl 40(%esp),%ecx - movl 44(%esp),%edx - movl %ecx,%esi - shrl $9,%ecx - movl %edx,%edi - shrl $9,%edx - movl %ecx,%ebx - shll $14,%esi - movl %edx,%eax - shll $14,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%eax - shll $4,%esi - xorl %edx,%ebx - shll $4,%edi - xorl %esi,%ebx - shrl $4,%ecx - xorl %edi,%eax - shrl $4,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 48(%esp),%ecx - movl 52(%esp),%edx - movl 56(%esp),%esi - movl 60(%esp),%edi - addl 64(%esp),%eax - adcl 68(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - andl 40(%esp),%ecx - andl 44(%esp),%edx - addl 192(%esp),%eax - adcl 196(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - movl (%ebp),%esi - movl 4(%ebp),%edi - addl %ecx,%eax - adcl %edx,%ebx - movl 32(%esp),%ecx - movl 36(%esp),%edx - addl %esi,%eax - adcl %edi,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - addl %ecx,%eax - adcl %edx,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,%esi - shrl $2,%ecx - movl %edx,%edi - shrl $2,%edx - movl %ecx,%ebx - shll $4,%esi - movl %edx,%eax - shll $4,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%ebx - shll $21,%esi - xorl %edx,%eax - shll $21,%edi - xorl %esi,%eax - shrl $21,%ecx - xorl %edi,%ebx - shrl $21,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl 16(%esp),%esi - movl 20(%esp),%edi - addl (%esp),%eax - adcl 4(%esp),%ebx - orl %esi,%ecx - orl %edi,%edx - andl 24(%esp),%ecx - andl 28(%esp),%edx - andl 8(%esp),%esi - andl 12(%esp),%edi - orl %esi,%ecx - orl %edi,%edx - addl %ecx,%eax - adcl %edx,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - movb (%ebp),%dl - subl $8,%esp - leal 8(%ebp),%ebp - cmpb $148,%dl - jne .L00900_15_x86 -.align 16 -.L01016_79_x86: - movl 312(%esp),%ecx - movl 316(%esp),%edx - movl %ecx,%esi - shrl $1,%ecx - movl %edx,%edi - shrl $1,%edx - movl %ecx,%eax - shll $24,%esi - movl %edx,%ebx - shll $24,%edi - xorl %esi,%ebx - shrl $6,%ecx - xorl %edi,%eax - shrl $6,%edx - xorl %ecx,%eax - shll $7,%esi - xorl %edx,%ebx - shll $1,%edi - xorl %esi,%ebx - shrl $1,%ecx - xorl %edi,%eax - shrl $1,%edx - xorl %ecx,%eax - shll $6,%edi - xorl %edx,%ebx - xorl %edi,%eax - movl %eax,(%esp) - movl %ebx,4(%esp) - movl 208(%esp),%ecx - movl 212(%esp),%edx - movl %ecx,%esi - shrl $6,%ecx - movl %edx,%edi - shrl $6,%edx - movl %ecx,%eax - shll $3,%esi - movl %edx,%ebx - shll $3,%edi - xorl %esi,%eax - shrl $13,%ecx - xorl %edi,%ebx - shrl $13,%edx - xorl %ecx,%eax - shll $10,%esi - xorl %edx,%ebx - shll $10,%edi - xorl %esi,%ebx - shrl $10,%ecx - xorl %edi,%eax - shrl $10,%edx - xorl %ecx,%ebx - shll $13,%edi - xorl %edx,%eax - xorl %edi,%eax - movl 320(%esp),%ecx - movl 324(%esp),%edx - addl (%esp),%eax - adcl 4(%esp),%ebx - movl 248(%esp),%esi - movl 252(%esp),%edi - addl %ecx,%eax - adcl %edx,%ebx - addl %esi,%eax - adcl %edi,%ebx - movl %eax,192(%esp) - movl %ebx,196(%esp) - movl 40(%esp),%ecx - movl 44(%esp),%edx - movl %ecx,%esi - shrl $9,%ecx - movl %edx,%edi - shrl $9,%edx - movl %ecx,%ebx - shll $14,%esi - movl %edx,%eax - shll $14,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%eax - shll $4,%esi - xorl %edx,%ebx - shll $4,%edi - xorl %esi,%ebx - shrl $4,%ecx - xorl %edi,%eax - shrl $4,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 48(%esp),%ecx - movl 52(%esp),%edx - movl 56(%esp),%esi - movl 60(%esp),%edi - addl 64(%esp),%eax - adcl 68(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - andl 40(%esp),%ecx - andl 44(%esp),%edx - addl 192(%esp),%eax - adcl 196(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - movl (%ebp),%esi - movl 4(%ebp),%edi - addl %ecx,%eax - adcl %edx,%ebx - movl 32(%esp),%ecx - movl 36(%esp),%edx - addl %esi,%eax - adcl %edi,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - addl %ecx,%eax - adcl %edx,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,%esi - shrl $2,%ecx - movl %edx,%edi - shrl $2,%edx - movl %ecx,%ebx - shll $4,%esi - movl %edx,%eax - shll $4,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%ebx - shll $21,%esi - xorl %edx,%eax - shll $21,%edi - xorl %esi,%eax - shrl $21,%ecx - xorl %edi,%ebx - shrl $21,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl 16(%esp),%esi - movl 20(%esp),%edi - addl (%esp),%eax - adcl 4(%esp),%ebx - orl %esi,%ecx - orl %edi,%edx - andl 24(%esp),%ecx - andl 28(%esp),%edx - andl 8(%esp),%esi - andl 12(%esp),%edi - orl %esi,%ecx - orl %edi,%edx - addl %ecx,%eax - adcl %edx,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - movb (%ebp),%dl - subl $8,%esp - leal 8(%ebp),%ebp - cmpb $23,%dl - jne .L01016_79_x86 - movl 840(%esp),%esi - movl 844(%esp),%edi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - addl 8(%esp),%eax - adcl 12(%esp),%ebx - movl %eax,(%esi) - movl %ebx,4(%esi) - addl 16(%esp),%ecx - adcl 20(%esp),%edx - movl %ecx,8(%esi) - movl %edx,12(%esi) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - addl 24(%esp),%eax - adcl 28(%esp),%ebx - movl %eax,16(%esi) - movl %ebx,20(%esi) - addl 32(%esp),%ecx - adcl 36(%esp),%edx - movl %ecx,24(%esi) - movl %edx,28(%esi) - movl 32(%esi),%eax - movl 36(%esi),%ebx - movl 40(%esi),%ecx - movl 44(%esi),%edx - addl 40(%esp),%eax - adcl 44(%esp),%ebx - movl %eax,32(%esi) - movl %ebx,36(%esi) - addl 48(%esp),%ecx - adcl 52(%esp),%edx - movl %ecx,40(%esi) - movl %edx,44(%esi) - movl 48(%esi),%eax - movl 52(%esi),%ebx - movl 56(%esi),%ecx - movl 60(%esi),%edx - addl 56(%esp),%eax - adcl 60(%esp),%ebx - movl %eax,48(%esi) - movl %ebx,52(%esi) - addl 64(%esp),%ecx - adcl 68(%esp),%edx - movl %ecx,56(%esi) - movl %edx,60(%esi) - addl $840,%esp - subl $640,%ebp - cmpl 8(%esp),%edi - jb .L002loop_x86 - movl 12(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 64 -.L001K512: -.long 3609767458,1116352408 -.long 602891725,1899447441 -.long 3964484399,3049323471 -.long 2173295548,3921009573 -.long 4081628472,961987163 -.long 3053834265,1508970993 -.long 2937671579,2453635748 -.long 3664609560,2870763221 -.long 2734883394,3624381080 -.long 1164996542,310598401 -.long 1323610764,607225278 -.long 3590304994,1426881987 -.long 4068182383,1925078388 -.long 991336113,2162078206 -.long 633803317,2614888103 -.long 3479774868,3248222580 -.long 2666613458,3835390401 -.long 944711139,4022224774 -.long 2341262773,264347078 -.long 2007800933,604807628 -.long 1495990901,770255983 -.long 1856431235,1249150122 -.long 3175218132,1555081692 -.long 2198950837,1996064986 -.long 3999719339,2554220882 -.long 766784016,2821834349 -.long 2566594879,2952996808 -.long 3203337956,3210313671 -.long 1034457026,3336571891 -.long 2466948901,3584528711 -.long 3758326383,113926993 -.long 168717936,338241895 -.long 1188179964,666307205 -.long 1546045734,773529912 -.long 1522805485,1294757372 -.long 2643833823,1396182291 -.long 2343527390,1695183700 -.long 1014477480,1986661051 -.long 1206759142,2177026350 -.long 344077627,2456956037 -.long 1290863460,2730485921 -.long 3158454273,2820302411 -.long 3505952657,3259730800 -.long 106217008,3345764771 -.long 3606008344,3516065817 -.long 1432725776,3600352804 -.long 1467031594,4094571909 -.long 851169720,275423344 -.long 3100823752,430227734 -.long 1363258195,506948616 -.long 3750685593,659060556 -.long 3785050280,883997877 -.long 3318307427,958139571 -.long 3812723403,1322822218 -.long 2003034995,1537002063 -.long 3602036899,1747873779 -.long 1575990012,1955562222 -.long 1125592928,2024104815 -.long 2716904306,2227730452 -.long 442776044,2361852424 -.long 593698344,2428436474 -.long 3733110249,2756734187 -.long 2999351573,3204031479 -.long 3815920427,3329325298 -.long 3928383900,3391569614 -.long 566280711,3515267271 -.long 3454069534,3940187606 -.long 4000239992,4118630271 -.long 1914138554,116418474 -.long 2731055270,174292421 -.long 3203993006,289380356 -.long 320620315,460393269 -.long 587496836,685471733 -.long 1086792851,852142971 -.long 365543100,1017036298 -.long 2618297676,1126000580 -.long 3409855158,1288033470 -.long 4234509866,1501505948 -.long 987167468,1607167915 -.long 1246189591,1816402316 -.long 67438087,66051 -.long 202182159,134810123 -.size sha512_block_data_order,.-.L_sha512_block_data_order_begin -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 -.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 -.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -.byte 62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/vpaes-x86.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/vpaes-x86.S deleted file mode 100644 index 8807116950..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/vpaes-x86.S +++ /dev/null @@ -1,708 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -#ifdef BORINGSSL_DISPATCH_TEST -#endif -.align 64 -.L_vpaes_consts: -.long 218628480,235210255,168496130,67568393 -.long 252381056,17041926,33884169,51187212 -.long 252645135,252645135,252645135,252645135 -.long 1512730624,3266504856,1377990664,3401244816 -.long 830229760,1275146365,2969422977,3447763452 -.long 3411033600,2979783055,338359620,2782886510 -.long 4209124096,907596821,221174255,1006095553 -.long 191964160,3799684038,3164090317,1589111125 -.long 182528256,1777043520,2877432650,3265356744 -.long 1874708224,3503451415,3305285752,363511674 -.long 1606117888,3487855781,1093350906,2384367825 -.long 197121,67569157,134941193,202313229 -.long 67569157,134941193,202313229,197121 -.long 134941193,202313229,197121,67569157 -.long 202313229,197121,67569157,134941193 -.long 33619971,100992007,168364043,235736079 -.long 235736079,33619971,100992007,168364043 -.long 168364043,235736079,33619971,100992007 -.long 100992007,168364043,235736079,33619971 -.long 50462976,117835012,185207048,252579084 -.long 252314880,51251460,117574920,184942860 -.long 184682752,252054788,50987272,118359308 -.long 118099200,185467140,251790600,50727180 -.long 2946363062,528716217,1300004225,1881839624 -.long 1532713819,1532713819,1532713819,1532713819 -.long 3602276352,4288629033,3737020424,4153884961 -.long 1354558464,32357713,2958822624,3775749553 -.long 1201988352,132424512,1572796698,503232858 -.long 2213177600,1597421020,4103937655,675398315 -.long 2749646592,4273543773,1511898873,121693092 -.long 3040248576,1103263732,2871565598,1608280554 -.long 2236667136,2588920351,482954393,64377734 -.long 3069987328,291237287,2117370568,3650299247 -.long 533321216,3573750986,2572112006,1401264716 -.long 1339849704,2721158661,548607111,3445553514 -.long 2128193280,3054596040,2183486460,1257083700 -.long 655635200,1165381986,3923443150,2344132524 -.long 190078720,256924420,290342170,357187870 -.long 1610966272,2263057382,4103205268,309794674 -.long 2592527872,2233205587,1335446729,3402964816 -.long 3973531904,3225098121,3002836325,1918774430 -.long 3870401024,2102906079,2284471353,4117666579 -.long 617007872,1021508343,366931923,691083277 -.long 2528395776,3491914898,2968704004,1613121270 -.long 3445188352,3247741094,844474987,4093578302 -.long 651481088,1190302358,1689581232,574775300 -.long 4289380608,206939853,2555985458,2489840491 -.long 2130264064,327674451,3566485037,3349835193 -.long 2470714624,316102159,3636825756,3393945945 -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 -.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 -.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 -.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 -.byte 118,101,114,115,105,116,121,41,0 -.align 64 -.hidden _vpaes_preheat -.type _vpaes_preheat,@function -.align 16 -_vpaes_preheat: - addl (%esp),%ebp - movdqa -48(%ebp),%xmm7 - movdqa -16(%ebp),%xmm6 - ret -.size _vpaes_preheat,.-_vpaes_preheat -.hidden _vpaes_encrypt_core -.type _vpaes_encrypt_core,@function -.align 16 -_vpaes_encrypt_core: - movl $16,%ecx - movl 240(%edx),%eax - movdqa %xmm6,%xmm1 - movdqa (%ebp),%xmm2 - pandn %xmm0,%xmm1 - pand %xmm6,%xmm0 - movdqu (%edx),%xmm5 -.byte 102,15,56,0,208 - movdqa 16(%ebp),%xmm0 - pxor %xmm5,%xmm2 - psrld $4,%xmm1 - addl $16,%edx -.byte 102,15,56,0,193 - leal 192(%ebp),%ebx - pxor %xmm2,%xmm0 - jmp .L000enc_entry -.align 16 -.L001enc_loop: - movdqa 32(%ebp),%xmm4 - movdqa 48(%ebp),%xmm0 -.byte 102,15,56,0,226 -.byte 102,15,56,0,195 - pxor %xmm5,%xmm4 - movdqa 64(%ebp),%xmm5 - pxor %xmm4,%xmm0 - movdqa -64(%ebx,%ecx,1),%xmm1 -.byte 102,15,56,0,234 - movdqa 80(%ebp),%xmm2 - movdqa (%ebx,%ecx,1),%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm0,%xmm3 - pxor %xmm5,%xmm2 -.byte 102,15,56,0,193 - addl $16,%edx - pxor %xmm2,%xmm0 -.byte 102,15,56,0,220 - addl $16,%ecx - pxor %xmm0,%xmm3 -.byte 102,15,56,0,193 - andl $48,%ecx - subl $1,%eax - pxor %xmm3,%xmm0 -.L000enc_entry: - movdqa %xmm6,%xmm1 - movdqa -32(%ebp),%xmm5 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm6,%xmm0 -.byte 102,15,56,0,232 - movdqa %xmm7,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm7,%xmm4 - pxor %xmm5,%xmm3 -.byte 102,15,56,0,224 - movdqa %xmm7,%xmm2 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm7,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%edx),%xmm5 - pxor %xmm1,%xmm3 - jnz .L001enc_loop - movdqa 96(%ebp),%xmm4 - movdqa 112(%ebp),%xmm0 -.byte 102,15,56,0,226 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,195 - movdqa 64(%ebx,%ecx,1),%xmm1 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,193 - ret -.size _vpaes_encrypt_core,.-_vpaes_encrypt_core -.hidden _vpaes_decrypt_core -.type _vpaes_decrypt_core,@function -.align 16 -_vpaes_decrypt_core: - leal 608(%ebp),%ebx - movl 240(%edx),%eax - movdqa %xmm6,%xmm1 - movdqa -64(%ebx),%xmm2 - pandn %xmm0,%xmm1 - movl %eax,%ecx - psrld $4,%xmm1 - movdqu (%edx),%xmm5 - shll $4,%ecx - pand %xmm6,%xmm0 -.byte 102,15,56,0,208 - movdqa -48(%ebx),%xmm0 - xorl $48,%ecx -.byte 102,15,56,0,193 - andl $48,%ecx - pxor %xmm5,%xmm2 - movdqa 176(%ebp),%xmm5 - pxor %xmm2,%xmm0 - addl $16,%edx - leal -352(%ebx,%ecx,1),%ecx - jmp .L002dec_entry -.align 16 -.L003dec_loop: - movdqa -32(%ebx),%xmm4 - movdqa -16(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa (%ebx),%xmm4 - pxor %xmm1,%xmm0 - movdqa 16(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 32(%ebx),%xmm4 - pxor %xmm1,%xmm0 - movdqa 48(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 64(%ebx),%xmm4 - pxor %xmm1,%xmm0 - movdqa 80(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - addl $16,%edx -.byte 102,15,58,15,237,12 - pxor %xmm1,%xmm0 - subl $1,%eax -.L002dec_entry: - movdqa %xmm6,%xmm1 - movdqa -32(%ebp),%xmm2 - pandn %xmm0,%xmm1 - pand %xmm6,%xmm0 - psrld $4,%xmm1 -.byte 102,15,56,0,208 - movdqa %xmm7,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm7,%xmm4 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm7,%xmm2 -.byte 102,15,56,0,211 - movdqa %xmm7,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%edx),%xmm0 - pxor %xmm1,%xmm3 - jnz .L003dec_loop - movdqa 96(%ebx),%xmm4 -.byte 102,15,56,0,226 - pxor %xmm0,%xmm4 - movdqa 112(%ebx),%xmm0 - movdqa (%ecx),%xmm2 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,194 - ret -.size _vpaes_decrypt_core,.-_vpaes_decrypt_core -.hidden _vpaes_schedule_core -.type _vpaes_schedule_core,@function -.align 16 -_vpaes_schedule_core: - addl (%esp),%ebp - movdqu (%esi),%xmm0 - movdqa 320(%ebp),%xmm2 - movdqa %xmm0,%xmm3 - leal (%ebp),%ebx - movdqa %xmm2,4(%esp) - call _vpaes_schedule_transform - movdqa %xmm0,%xmm7 - testl %edi,%edi - jnz .L004schedule_am_decrypting - movdqu %xmm0,(%edx) - jmp .L005schedule_go -.L004schedule_am_decrypting: - movdqa 256(%ebp,%ecx,1),%xmm1 -.byte 102,15,56,0,217 - movdqu %xmm3,(%edx) - xorl $48,%ecx -.L005schedule_go: - cmpl $192,%eax - ja .L006schedule_256 - je .L007schedule_192 -.L008schedule_128: - movl $10,%eax -.L009loop_schedule_128: - call _vpaes_schedule_round - decl %eax - jz .L010schedule_mangle_last - call _vpaes_schedule_mangle - jmp .L009loop_schedule_128 -.align 16 -.L007schedule_192: - movdqu 8(%esi),%xmm0 - call _vpaes_schedule_transform - movdqa %xmm0,%xmm6 - pxor %xmm4,%xmm4 - movhlps %xmm4,%xmm6 - movl $4,%eax -.L011loop_schedule_192: - call _vpaes_schedule_round -.byte 102,15,58,15,198,8 - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - call _vpaes_schedule_mangle - call _vpaes_schedule_round - decl %eax - jz .L010schedule_mangle_last - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - jmp .L011loop_schedule_192 -.align 16 -.L006schedule_256: - movdqu 16(%esi),%xmm0 - call _vpaes_schedule_transform - movl $7,%eax -.L012loop_schedule_256: - call _vpaes_schedule_mangle - movdqa %xmm0,%xmm6 - call _vpaes_schedule_round - decl %eax - jz .L010schedule_mangle_last - call _vpaes_schedule_mangle - pshufd $255,%xmm0,%xmm0 - movdqa %xmm7,20(%esp) - movdqa %xmm6,%xmm7 - call .L_vpaes_schedule_low_round - movdqa 20(%esp),%xmm7 - jmp .L012loop_schedule_256 -.align 16 -.L010schedule_mangle_last: - leal 384(%ebp),%ebx - testl %edi,%edi - jnz .L013schedule_mangle_last_dec - movdqa 256(%ebp,%ecx,1),%xmm1 -.byte 102,15,56,0,193 - leal 352(%ebp),%ebx - addl $32,%edx -.L013schedule_mangle_last_dec: - addl $-16,%edx - pxor 336(%ebp),%xmm0 - call _vpaes_schedule_transform - movdqu %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - ret -.size _vpaes_schedule_core,.-_vpaes_schedule_core -.hidden _vpaes_schedule_192_smear -.type _vpaes_schedule_192_smear,@function -.align 16 -_vpaes_schedule_192_smear: - pshufd $128,%xmm6,%xmm1 - pshufd $254,%xmm7,%xmm0 - pxor %xmm1,%xmm6 - pxor %xmm1,%xmm1 - pxor %xmm0,%xmm6 - movdqa %xmm6,%xmm0 - movhlps %xmm1,%xmm6 - ret -.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear -.hidden _vpaes_schedule_round -.type _vpaes_schedule_round,@function -.align 16 -_vpaes_schedule_round: - movdqa 8(%esp),%xmm2 - pxor %xmm1,%xmm1 -.byte 102,15,58,15,202,15 -.byte 102,15,58,15,210,15 - pxor %xmm1,%xmm7 - pshufd $255,%xmm0,%xmm0 -.byte 102,15,58,15,192,1 - movdqa %xmm2,8(%esp) -.L_vpaes_schedule_low_round: - movdqa %xmm7,%xmm1 - pslldq $4,%xmm7 - pxor %xmm1,%xmm7 - movdqa %xmm7,%xmm1 - pslldq $8,%xmm7 - pxor %xmm1,%xmm7 - pxor 336(%ebp),%xmm7 - movdqa -16(%ebp),%xmm4 - movdqa -48(%ebp),%xmm5 - movdqa %xmm4,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm4,%xmm0 - movdqa -32(%ebp),%xmm2 -.byte 102,15,56,0,208 - pxor %xmm1,%xmm0 - movdqa %xmm5,%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - movdqa %xmm5,%xmm4 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm5,%xmm2 -.byte 102,15,56,0,211 - pxor %xmm0,%xmm2 - movdqa %xmm5,%xmm3 -.byte 102,15,56,0,220 - pxor %xmm1,%xmm3 - movdqa 32(%ebp),%xmm4 -.byte 102,15,56,0,226 - movdqa 48(%ebp),%xmm0 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 - pxor %xmm7,%xmm0 - movdqa %xmm0,%xmm7 - ret -.size _vpaes_schedule_round,.-_vpaes_schedule_round -.hidden _vpaes_schedule_transform -.type _vpaes_schedule_transform,@function -.align 16 -_vpaes_schedule_transform: - movdqa -16(%ebp),%xmm2 - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - movdqa (%ebx),%xmm2 -.byte 102,15,56,0,208 - movdqa 16(%ebx),%xmm0 -.byte 102,15,56,0,193 - pxor %xmm2,%xmm0 - ret -.size _vpaes_schedule_transform,.-_vpaes_schedule_transform -.hidden _vpaes_schedule_mangle -.type _vpaes_schedule_mangle,@function -.align 16 -_vpaes_schedule_mangle: - movdqa %xmm0,%xmm4 - movdqa 128(%ebp),%xmm5 - testl %edi,%edi - jnz .L014schedule_mangle_dec - addl $16,%edx - pxor 336(%ebp),%xmm4 -.byte 102,15,56,0,229 - movdqa %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 - jmp .L015schedule_mangle_both -.align 16 -.L014schedule_mangle_dec: - movdqa -16(%ebp),%xmm2 - leal 416(%ebp),%esi - movdqa %xmm2,%xmm1 - pandn %xmm4,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm4 - movdqa (%esi),%xmm2 -.byte 102,15,56,0,212 - movdqa 16(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - movdqa 32(%esi),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 48(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - movdqa 64(%esi),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 80(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - movdqa 96(%esi),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 112(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - addl $-16,%edx -.L015schedule_mangle_both: - movdqa 256(%ebp,%ecx,1),%xmm1 -.byte 102,15,56,0,217 - addl $-16,%ecx - andl $48,%ecx - movdqu %xmm3,(%edx) - ret -.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle -.globl vpaes_set_encrypt_key -.hidden vpaes_set_encrypt_key -.type vpaes_set_encrypt_key,@function -.align 16 -vpaes_set_encrypt_key: -.L_vpaes_set_encrypt_key_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call .L016pic -.L016pic: - popl %ebx - leal BORINGSSL_function_hit+5-.L016pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%eax - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movl %eax,%ebx - shrl $5,%ebx - addl $5,%ebx - movl %ebx,240(%edx) - movl $48,%ecx - movl $0,%edi - leal .L_vpaes_consts+0x30-.L017pic_point,%ebp - call _vpaes_schedule_core -.L017pic_point: - movl 48(%esp),%esp - xorl %eax,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin -.globl vpaes_set_decrypt_key -.hidden vpaes_set_decrypt_key -.type vpaes_set_decrypt_key,@function -.align 16 -vpaes_set_decrypt_key: -.L_vpaes_set_decrypt_key_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%eax - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movl %eax,%ebx - shrl $5,%ebx - addl $5,%ebx - movl %ebx,240(%edx) - shll $4,%ebx - leal 16(%edx,%ebx,1),%edx - movl $1,%edi - movl %eax,%ecx - shrl $1,%ecx - andl $32,%ecx - xorl $32,%ecx - leal .L_vpaes_consts+0x30-.L018pic_point,%ebp - call _vpaes_schedule_core -.L018pic_point: - movl 48(%esp),%esp - xorl %eax,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin -.globl vpaes_encrypt -.hidden vpaes_encrypt -.type vpaes_encrypt,@function -.align 16 -vpaes_encrypt: -.L_vpaes_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call .L019pic -.L019pic: - popl %ebx - leal BORINGSSL_function_hit+4-.L019pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - leal .L_vpaes_consts+0x30-.L020pic_point,%ebp - call _vpaes_preheat -.L020pic_point: - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%edi - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movdqu (%esi),%xmm0 - call _vpaes_encrypt_core - movdqu %xmm0,(%edi) - movl 48(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size vpaes_encrypt,.-.L_vpaes_encrypt_begin -.globl vpaes_decrypt -.hidden vpaes_decrypt -.type vpaes_decrypt,@function -.align 16 -vpaes_decrypt: -.L_vpaes_decrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - leal .L_vpaes_consts+0x30-.L021pic_point,%ebp - call _vpaes_preheat -.L021pic_point: - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%edi - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movdqu (%esi),%xmm0 - call _vpaes_decrypt_core - movdqu %xmm0,(%edi) - movl 48(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size vpaes_decrypt,.-.L_vpaes_decrypt_begin -.globl vpaes_cbc_encrypt -.hidden vpaes_cbc_encrypt -.type vpaes_cbc_encrypt,@function -.align 16 -vpaes_cbc_encrypt: -.L_vpaes_cbc_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - subl $16,%eax - jc .L022cbc_abort - leal -56(%esp),%ebx - movl 36(%esp),%ebp - andl $-16,%ebx - movl 40(%esp),%ecx - xchgl %esp,%ebx - movdqu (%ebp),%xmm1 - subl %esi,%edi - movl %ebx,48(%esp) - movl %edi,(%esp) - movl %edx,4(%esp) - movl %ebp,8(%esp) - movl %eax,%edi - leal .L_vpaes_consts+0x30-.L023pic_point,%ebp - call _vpaes_preheat -.L023pic_point: - cmpl $0,%ecx - je .L024cbc_dec_loop - jmp .L025cbc_enc_loop -.align 16 -.L025cbc_enc_loop: - movdqu (%esi),%xmm0 - pxor %xmm1,%xmm0 - call _vpaes_encrypt_core - movl (%esp),%ebx - movl 4(%esp),%edx - movdqa %xmm0,%xmm1 - movdqu %xmm0,(%ebx,%esi,1) - leal 16(%esi),%esi - subl $16,%edi - jnc .L025cbc_enc_loop - jmp .L026cbc_done -.align 16 -.L024cbc_dec_loop: - movdqu (%esi),%xmm0 - movdqa %xmm1,16(%esp) - movdqa %xmm0,32(%esp) - call _vpaes_decrypt_core - movl (%esp),%ebx - movl 4(%esp),%edx - pxor 16(%esp),%xmm0 - movdqa 32(%esp),%xmm1 - movdqu %xmm0,(%ebx,%esi,1) - leal 16(%esi),%esi - subl $16,%edi - jnc .L024cbc_dec_loop -.L026cbc_done: - movl 8(%esp),%ebx - movl 48(%esp),%esp - movdqu %xmm1,(%ebx) -.L022cbc_abort: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/x86-mont.S b/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/x86-mont.S deleted file mode 100644 index f2c6fde7c6..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/fipsmodule/x86-mont.S +++ /dev/null @@ -1,484 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl bn_mul_mont -.hidden bn_mul_mont -.type bn_mul_mont,@function -.align 16 -bn_mul_mont: -.L_bn_mul_mont_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - xorl %eax,%eax - movl 40(%esp),%edi - cmpl $4,%edi - jl .L000just_leave - leal 20(%esp),%esi - leal 24(%esp),%edx - addl $2,%edi - negl %edi - leal -32(%esp,%edi,4),%ebp - negl %edi - movl %ebp,%eax - subl %edx,%eax - andl $2047,%eax - subl %eax,%ebp - xorl %ebp,%edx - andl $2048,%edx - xorl $2048,%edx - subl %edx,%ebp - andl $-64,%ebp - movl %esp,%eax - subl %ebp,%eax - andl $-4096,%eax - movl %esp,%edx - leal (%ebp,%eax,1),%esp - movl (%esp),%eax - cmpl %ebp,%esp - ja .L001page_walk - jmp .L002page_walk_done -.align 16 -.L001page_walk: - leal -4096(%esp),%esp - movl (%esp),%eax - cmpl %ebp,%esp - ja .L001page_walk -.L002page_walk_done: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%ebp - movl 16(%esi),%esi - movl (%esi),%esi - movl %eax,4(%esp) - movl %ebx,8(%esp) - movl %ecx,12(%esp) - movl %ebp,16(%esp) - movl %esi,20(%esp) - leal -3(%edi),%ebx - movl %edx,24(%esp) - call .L003PIC_me_up -.L003PIC_me_up: - popl %eax - leal OPENSSL_ia32cap_P-.L003PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc .L004non_sse2 - movl $-1,%eax - movd %eax,%mm7 - movl 8(%esp),%esi - movl 12(%esp),%edi - movl 16(%esp),%ebp - xorl %edx,%edx - xorl %ecx,%ecx - movd (%edi),%mm4 - movd (%esi),%mm5 - movd (%ebp),%mm3 - pmuludq %mm4,%mm5 - movq %mm5,%mm2 - movq %mm5,%mm0 - pand %mm7,%mm0 - pmuludq 20(%esp),%mm5 - pmuludq %mm5,%mm3 - paddq %mm0,%mm3 - movd 4(%ebp),%mm1 - movd 4(%esi),%mm0 - psrlq $32,%mm2 - psrlq $32,%mm3 - incl %ecx -.align 16 -.L0051st: - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - pand %mm7,%mm0 - movd 4(%ebp,%ecx,4),%mm1 - paddq %mm0,%mm3 - movd 4(%esi,%ecx,4),%mm0 - psrlq $32,%mm2 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm3 - leal 1(%ecx),%ecx - cmpl %ebx,%ecx - jl .L0051st - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - pand %mm7,%mm0 - paddq %mm0,%mm3 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm2 - psrlq $32,%mm3 - paddq %mm2,%mm3 - movq %mm3,32(%esp,%ebx,4) - incl %edx -.L006outer: - xorl %ecx,%ecx - movd (%edi,%edx,4),%mm4 - movd (%esi),%mm5 - movd 32(%esp),%mm6 - movd (%ebp),%mm3 - pmuludq %mm4,%mm5 - paddq %mm6,%mm5 - movq %mm5,%mm0 - movq %mm5,%mm2 - pand %mm7,%mm0 - pmuludq 20(%esp),%mm5 - pmuludq %mm5,%mm3 - paddq %mm0,%mm3 - movd 36(%esp),%mm6 - movd 4(%ebp),%mm1 - movd 4(%esi),%mm0 - psrlq $32,%mm2 - psrlq $32,%mm3 - paddq %mm6,%mm2 - incl %ecx - decl %ebx -.L007inner: - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - movd 36(%esp,%ecx,4),%mm6 - pand %mm7,%mm0 - movd 4(%ebp,%ecx,4),%mm1 - paddq %mm0,%mm3 - movd 4(%esi,%ecx,4),%mm0 - psrlq $32,%mm2 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm3 - paddq %mm6,%mm2 - decl %ebx - leal 1(%ecx),%ecx - jnz .L007inner - movl %ecx,%ebx - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - pand %mm7,%mm0 - paddq %mm0,%mm3 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm2 - psrlq $32,%mm3 - movd 36(%esp,%ebx,4),%mm6 - paddq %mm2,%mm3 - paddq %mm6,%mm3 - movq %mm3,32(%esp,%ebx,4) - leal 1(%edx),%edx - cmpl %ebx,%edx - jle .L006outer - emms - jmp .L008common_tail -.align 16 -.L004non_sse2: - movl 8(%esp),%esi - leal 1(%ebx),%ebp - movl 12(%esp),%edi - xorl %ecx,%ecx - movl %esi,%edx - andl $1,%ebp - subl %edi,%edx - leal 4(%edi,%ebx,4),%eax - orl %edx,%ebp - movl (%edi),%edi - jz .L009bn_sqr_mont - movl %eax,28(%esp) - movl (%esi),%eax - xorl %edx,%edx -.align 16 -.L010mull: - movl %edx,%ebp - mull %edi - addl %eax,%ebp - leal 1(%ecx),%ecx - adcl $0,%edx - movl (%esi,%ecx,4),%eax - cmpl %ebx,%ecx - movl %ebp,28(%esp,%ecx,4) - jl .L010mull - movl %edx,%ebp - mull %edi - movl 20(%esp),%edi - addl %ebp,%eax - movl 16(%esp),%esi - adcl $0,%edx - imull 32(%esp),%edi - movl %eax,32(%esp,%ebx,4) - xorl %ecx,%ecx - movl %edx,36(%esp,%ebx,4) - movl %ecx,40(%esp,%ebx,4) - movl (%esi),%eax - mull %edi - addl 32(%esp),%eax - movl 4(%esi),%eax - adcl $0,%edx - incl %ecx - jmp .L0112ndmadd -.align 16 -.L0121stmadd: - movl %edx,%ebp - mull %edi - addl 32(%esp,%ecx,4),%ebp - leal 1(%ecx),%ecx - adcl $0,%edx - addl %eax,%ebp - movl (%esi,%ecx,4),%eax - adcl $0,%edx - cmpl %ebx,%ecx - movl %ebp,28(%esp,%ecx,4) - jl .L0121stmadd - movl %edx,%ebp - mull %edi - addl 32(%esp,%ebx,4),%eax - movl 20(%esp),%edi - adcl $0,%edx - movl 16(%esp),%esi - addl %eax,%ebp - adcl $0,%edx - imull 32(%esp),%edi - xorl %ecx,%ecx - addl 36(%esp,%ebx,4),%edx - movl %ebp,32(%esp,%ebx,4) - adcl $0,%ecx - movl (%esi),%eax - movl %edx,36(%esp,%ebx,4) - movl %ecx,40(%esp,%ebx,4) - mull %edi - addl 32(%esp),%eax - movl 4(%esi),%eax - adcl $0,%edx - movl $1,%ecx -.align 16 -.L0112ndmadd: - movl %edx,%ebp - mull %edi - addl 32(%esp,%ecx,4),%ebp - leal 1(%ecx),%ecx - adcl $0,%edx - addl %eax,%ebp - movl (%esi,%ecx,4),%eax - adcl $0,%edx - cmpl %ebx,%ecx - movl %ebp,24(%esp,%ecx,4) - jl .L0112ndmadd - movl %edx,%ebp - mull %edi - addl 32(%esp,%ebx,4),%ebp - adcl $0,%edx - addl %eax,%ebp - adcl $0,%edx - movl %ebp,28(%esp,%ebx,4) - xorl %eax,%eax - movl 12(%esp),%ecx - addl 36(%esp,%ebx,4),%edx - adcl 40(%esp,%ebx,4),%eax - leal 4(%ecx),%ecx - movl %edx,32(%esp,%ebx,4) - cmpl 28(%esp),%ecx - movl %eax,36(%esp,%ebx,4) - je .L008common_tail - movl (%ecx),%edi - movl 8(%esp),%esi - movl %ecx,12(%esp) - xorl %ecx,%ecx - xorl %edx,%edx - movl (%esi),%eax - jmp .L0121stmadd -.align 16 -.L009bn_sqr_mont: - movl %ebx,(%esp) - movl %ecx,12(%esp) - movl %edi,%eax - mull %edi - movl %eax,32(%esp) - movl %edx,%ebx - shrl $1,%edx - andl $1,%ebx - incl %ecx -.align 16 -.L013sqr: - movl (%esi,%ecx,4),%eax - movl %edx,%ebp - mull %edi - addl %ebp,%eax - leal 1(%ecx),%ecx - adcl $0,%edx - leal (%ebx,%eax,2),%ebp - shrl $31,%eax - cmpl (%esp),%ecx - movl %eax,%ebx - movl %ebp,28(%esp,%ecx,4) - jl .L013sqr - movl (%esi,%ecx,4),%eax - movl %edx,%ebp - mull %edi - addl %ebp,%eax - movl 20(%esp),%edi - adcl $0,%edx - movl 16(%esp),%esi - leal (%ebx,%eax,2),%ebp - imull 32(%esp),%edi - shrl $31,%eax - movl %ebp,32(%esp,%ecx,4) - leal (%eax,%edx,2),%ebp - movl (%esi),%eax - shrl $31,%edx - movl %ebp,36(%esp,%ecx,4) - movl %edx,40(%esp,%ecx,4) - mull %edi - addl 32(%esp),%eax - movl %ecx,%ebx - adcl $0,%edx - movl 4(%esi),%eax - movl $1,%ecx -.align 16 -.L0143rdmadd: - movl %edx,%ebp - mull %edi - addl 32(%esp,%ecx,4),%ebp - adcl $0,%edx - addl %eax,%ebp - movl 4(%esi,%ecx,4),%eax - adcl $0,%edx - movl %ebp,28(%esp,%ecx,4) - movl %edx,%ebp - mull %edi - addl 36(%esp,%ecx,4),%ebp - leal 2(%ecx),%ecx - adcl $0,%edx - addl %eax,%ebp - movl (%esi,%ecx,4),%eax - adcl $0,%edx - cmpl %ebx,%ecx - movl %ebp,24(%esp,%ecx,4) - jl .L0143rdmadd - movl %edx,%ebp - mull %edi - addl 32(%esp,%ebx,4),%ebp - adcl $0,%edx - addl %eax,%ebp - adcl $0,%edx - movl %ebp,28(%esp,%ebx,4) - movl 12(%esp),%ecx - xorl %eax,%eax - movl 8(%esp),%esi - addl 36(%esp,%ebx,4),%edx - adcl 40(%esp,%ebx,4),%eax - movl %edx,32(%esp,%ebx,4) - cmpl %ebx,%ecx - movl %eax,36(%esp,%ebx,4) - je .L008common_tail - movl 4(%esi,%ecx,4),%edi - leal 1(%ecx),%ecx - movl %edi,%eax - movl %ecx,12(%esp) - mull %edi - addl 32(%esp,%ecx,4),%eax - adcl $0,%edx - movl %eax,32(%esp,%ecx,4) - xorl %ebp,%ebp - cmpl %ebx,%ecx - leal 1(%ecx),%ecx - je .L015sqrlast - movl %edx,%ebx - shrl $1,%edx - andl $1,%ebx -.align 16 -.L016sqradd: - movl (%esi,%ecx,4),%eax - movl %edx,%ebp - mull %edi - addl %ebp,%eax - leal (%eax,%eax,1),%ebp - adcl $0,%edx - shrl $31,%eax - addl 32(%esp,%ecx,4),%ebp - leal 1(%ecx),%ecx - adcl $0,%eax - addl %ebx,%ebp - adcl $0,%eax - cmpl (%esp),%ecx - movl %ebp,28(%esp,%ecx,4) - movl %eax,%ebx - jle .L016sqradd - movl %edx,%ebp - addl %edx,%edx - shrl $31,%ebp - addl %ebx,%edx - adcl $0,%ebp -.L015sqrlast: - movl 20(%esp),%edi - movl 16(%esp),%esi - imull 32(%esp),%edi - addl 32(%esp,%ecx,4),%edx - movl (%esi),%eax - adcl $0,%ebp - movl %edx,32(%esp,%ecx,4) - movl %ebp,36(%esp,%ecx,4) - mull %edi - addl 32(%esp),%eax - leal -1(%ecx),%ebx - adcl $0,%edx - movl $1,%ecx - movl 4(%esi),%eax - jmp .L0143rdmadd -.align 16 -.L008common_tail: - movl 16(%esp),%ebp - movl 4(%esp),%edi - leal 32(%esp),%esi - movl (%esi),%eax - movl %ebx,%ecx - xorl %edx,%edx -.align 16 -.L017sub: - sbbl (%ebp,%edx,4),%eax - movl %eax,(%edi,%edx,4) - decl %ecx - movl 4(%esi,%edx,4),%eax - leal 1(%edx),%edx - jge .L017sub - sbbl $0,%eax - movl $-1,%edx - xorl %eax,%edx - jmp .L018copy -.align 16 -.L018copy: - movl 32(%esp,%ebx,4),%esi - movl (%edi,%ebx,4),%ebp - movl %ecx,32(%esp,%ebx,4) - andl %eax,%esi - andl %edx,%ebp - orl %esi,%ebp - movl %ebp,(%edi,%ebx,4) - decl %ebx - jge .L018copy - movl 24(%esp),%esp - movl $1,%eax -.L000just_leave: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size bn_mul_mont,.-.L_bn_mul_mont_begin -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 -.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 -.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 -.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 -.byte 111,114,103,62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86/crypto/test/trampoline-x86.S b/packager/third_party/boringssl/linux-x86/crypto/test/trampoline-x86.S deleted file mode 100644 index 13eb677c97..0000000000 --- a/packager/third_party/boringssl/linux-x86/crypto/test/trampoline-x86.S +++ /dev/null @@ -1,206 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl abi_test_trampoline -.hidden abi_test_trampoline -.type abi_test_trampoline,@function -.align 16 -abi_test_trampoline: -.L_abi_test_trampoline_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 24(%esp),%ecx - movl (%ecx),%esi - movl 4(%ecx),%edi - movl 8(%ecx),%ebx - movl 12(%ecx),%ebp - subl $44,%esp - movl 72(%esp),%eax - xorl %ecx,%ecx -.L000loop: - cmpl 76(%esp),%ecx - jae .L001loop_done - movl (%eax,%ecx,4),%edx - movl %edx,(%esp,%ecx,4) - addl $1,%ecx - jmp .L000loop -.L001loop_done: - call *64(%esp) - addl $44,%esp - movl 24(%esp),%ecx - movl %esi,(%ecx) - movl %edi,4(%ecx) - movl %ebx,8(%ecx) - movl %ebp,12(%ecx) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.size abi_test_trampoline,.-.L_abi_test_trampoline_begin -.globl abi_test_get_and_clear_direction_flag -.hidden abi_test_get_and_clear_direction_flag -.type abi_test_get_and_clear_direction_flag,@function -.align 16 -abi_test_get_and_clear_direction_flag: -.L_abi_test_get_and_clear_direction_flag_begin: - pushfl - popl %eax - andl $1024,%eax - shrl $10,%eax - cld - ret -.size abi_test_get_and_clear_direction_flag,.-.L_abi_test_get_and_clear_direction_flag_begin -.globl abi_test_set_direction_flag -.hidden abi_test_set_direction_flag -.type abi_test_set_direction_flag,@function -.align 16 -abi_test_set_direction_flag: -.L_abi_test_set_direction_flag_begin: - std - ret -.size abi_test_set_direction_flag,.-.L_abi_test_set_direction_flag_begin -.globl abi_test_clobber_eax -.hidden abi_test_clobber_eax -.type abi_test_clobber_eax,@function -.align 16 -abi_test_clobber_eax: -.L_abi_test_clobber_eax_begin: - xorl %eax,%eax - ret -.size abi_test_clobber_eax,.-.L_abi_test_clobber_eax_begin -.globl abi_test_clobber_ebx -.hidden abi_test_clobber_ebx -.type abi_test_clobber_ebx,@function -.align 16 -abi_test_clobber_ebx: -.L_abi_test_clobber_ebx_begin: - xorl %ebx,%ebx - ret -.size abi_test_clobber_ebx,.-.L_abi_test_clobber_ebx_begin -.globl abi_test_clobber_ecx -.hidden abi_test_clobber_ecx -.type abi_test_clobber_ecx,@function -.align 16 -abi_test_clobber_ecx: -.L_abi_test_clobber_ecx_begin: - xorl %ecx,%ecx - ret -.size abi_test_clobber_ecx,.-.L_abi_test_clobber_ecx_begin -.globl abi_test_clobber_edx -.hidden abi_test_clobber_edx -.type abi_test_clobber_edx,@function -.align 16 -abi_test_clobber_edx: -.L_abi_test_clobber_edx_begin: - xorl %edx,%edx - ret -.size abi_test_clobber_edx,.-.L_abi_test_clobber_edx_begin -.globl abi_test_clobber_edi -.hidden abi_test_clobber_edi -.type abi_test_clobber_edi,@function -.align 16 -abi_test_clobber_edi: -.L_abi_test_clobber_edi_begin: - xorl %edi,%edi - ret -.size abi_test_clobber_edi,.-.L_abi_test_clobber_edi_begin -.globl abi_test_clobber_esi -.hidden abi_test_clobber_esi -.type abi_test_clobber_esi,@function -.align 16 -abi_test_clobber_esi: -.L_abi_test_clobber_esi_begin: - xorl %esi,%esi - ret -.size abi_test_clobber_esi,.-.L_abi_test_clobber_esi_begin -.globl abi_test_clobber_ebp -.hidden abi_test_clobber_ebp -.type abi_test_clobber_ebp,@function -.align 16 -abi_test_clobber_ebp: -.L_abi_test_clobber_ebp_begin: - xorl %ebp,%ebp - ret -.size abi_test_clobber_ebp,.-.L_abi_test_clobber_ebp_begin -.globl abi_test_clobber_xmm0 -.hidden abi_test_clobber_xmm0 -.type abi_test_clobber_xmm0,@function -.align 16 -abi_test_clobber_xmm0: -.L_abi_test_clobber_xmm0_begin: - pxor %xmm0,%xmm0 - ret -.size abi_test_clobber_xmm0,.-.L_abi_test_clobber_xmm0_begin -.globl abi_test_clobber_xmm1 -.hidden abi_test_clobber_xmm1 -.type abi_test_clobber_xmm1,@function -.align 16 -abi_test_clobber_xmm1: -.L_abi_test_clobber_xmm1_begin: - pxor %xmm1,%xmm1 - ret -.size abi_test_clobber_xmm1,.-.L_abi_test_clobber_xmm1_begin -.globl abi_test_clobber_xmm2 -.hidden abi_test_clobber_xmm2 -.type abi_test_clobber_xmm2,@function -.align 16 -abi_test_clobber_xmm2: -.L_abi_test_clobber_xmm2_begin: - pxor %xmm2,%xmm2 - ret -.size abi_test_clobber_xmm2,.-.L_abi_test_clobber_xmm2_begin -.globl abi_test_clobber_xmm3 -.hidden abi_test_clobber_xmm3 -.type abi_test_clobber_xmm3,@function -.align 16 -abi_test_clobber_xmm3: -.L_abi_test_clobber_xmm3_begin: - pxor %xmm3,%xmm3 - ret -.size abi_test_clobber_xmm3,.-.L_abi_test_clobber_xmm3_begin -.globl abi_test_clobber_xmm4 -.hidden abi_test_clobber_xmm4 -.type abi_test_clobber_xmm4,@function -.align 16 -abi_test_clobber_xmm4: -.L_abi_test_clobber_xmm4_begin: - pxor %xmm4,%xmm4 - ret -.size abi_test_clobber_xmm4,.-.L_abi_test_clobber_xmm4_begin -.globl abi_test_clobber_xmm5 -.hidden abi_test_clobber_xmm5 -.type abi_test_clobber_xmm5,@function -.align 16 -abi_test_clobber_xmm5: -.L_abi_test_clobber_xmm5_begin: - pxor %xmm5,%xmm5 - ret -.size abi_test_clobber_xmm5,.-.L_abi_test_clobber_xmm5_begin -.globl abi_test_clobber_xmm6 -.hidden abi_test_clobber_xmm6 -.type abi_test_clobber_xmm6,@function -.align 16 -abi_test_clobber_xmm6: -.L_abi_test_clobber_xmm6_begin: - pxor %xmm6,%xmm6 - ret -.size abi_test_clobber_xmm6,.-.L_abi_test_clobber_xmm6_begin -.globl abi_test_clobber_xmm7 -.hidden abi_test_clobber_xmm7 -.type abi_test_clobber_xmm7,@function -.align 16 -abi_test_clobber_xmm7: -.L_abi_test_clobber_xmm7_begin: - pxor %xmm7,%xmm7 - ret -.size abi_test_clobber_xmm7,.-.L_abi_test_clobber_xmm7_begin -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/chacha/chacha-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/chacha/chacha-x86_64.S deleted file mode 100644 index b76713398d..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/chacha/chacha-x86_64.S +++ /dev/null @@ -1,1633 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - -.align 64 -.Lzero: -.long 0,0,0,0 -.Lone: -.long 1,0,0,0 -.Linc: -.long 0,1,2,3 -.Lfour: -.long 4,4,4,4 -.Lincy: -.long 0,2,4,6,1,3,5,7 -.Leight: -.long 8,8,8,8,8,8,8,8 -.Lrot16: -.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd -.Lrot24: -.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe -.Lsigma: -.byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0 -.align 64 -.Lzeroz: -.long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0 -.Lfourz: -.long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0 -.Lincz: -.long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 -.Lsixteen: -.long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 -.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.globl ChaCha20_ctr32 -.hidden ChaCha20_ctr32 -.type ChaCha20_ctr32,@function -.align 64 -ChaCha20_ctr32: -.cfi_startproc - cmpq $0,%rdx - je .Lno_data - movq OPENSSL_ia32cap_P+4(%rip),%r10 - testl $512,%r10d - jnz .LChaCha20_ssse3 - - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset rbx,-16 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset rbp,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset r15,-56 - subq $64+24,%rsp -.cfi_adjust_cfa_offset 88 -.Lctr32_body: - - - movdqu (%rcx),%xmm1 - movdqu 16(%rcx),%xmm2 - movdqu (%r8),%xmm3 - movdqa .Lone(%rip),%xmm4 - - - movdqa %xmm1,16(%rsp) - movdqa %xmm2,32(%rsp) - movdqa %xmm3,48(%rsp) - movq %rdx,%rbp - jmp .Loop_outer - -.align 32 -.Loop_outer: - movl $0x61707865,%eax - movl $0x3320646e,%ebx - movl $0x79622d32,%ecx - movl $0x6b206574,%edx - movl 16(%rsp),%r8d - movl 20(%rsp),%r9d - movl 24(%rsp),%r10d - movl 28(%rsp),%r11d - movd %xmm3,%r12d - movl 52(%rsp),%r13d - movl 56(%rsp),%r14d - movl 60(%rsp),%r15d - - movq %rbp,64+0(%rsp) - movl $10,%ebp - movq %rsi,64+8(%rsp) -.byte 102,72,15,126,214 - movq %rdi,64+16(%rsp) - movq %rsi,%rdi - shrq $32,%rdi - jmp .Loop - -.align 32 -.Loop: - addl %r8d,%eax - xorl %eax,%r12d - roll $16,%r12d - addl %r9d,%ebx - xorl %ebx,%r13d - roll $16,%r13d - addl %r12d,%esi - xorl %esi,%r8d - roll $12,%r8d - addl %r13d,%edi - xorl %edi,%r9d - roll $12,%r9d - addl %r8d,%eax - xorl %eax,%r12d - roll $8,%r12d - addl %r9d,%ebx - xorl %ebx,%r13d - roll $8,%r13d - addl %r12d,%esi - xorl %esi,%r8d - roll $7,%r8d - addl %r13d,%edi - xorl %edi,%r9d - roll $7,%r9d - movl %esi,32(%rsp) - movl %edi,36(%rsp) - movl 40(%rsp),%esi - movl 44(%rsp),%edi - addl %r10d,%ecx - xorl %ecx,%r14d - roll $16,%r14d - addl %r11d,%edx - xorl %edx,%r15d - roll $16,%r15d - addl %r14d,%esi - xorl %esi,%r10d - roll $12,%r10d - addl %r15d,%edi - xorl %edi,%r11d - roll $12,%r11d - addl %r10d,%ecx - xorl %ecx,%r14d - roll $8,%r14d - addl %r11d,%edx - xorl %edx,%r15d - roll $8,%r15d - addl %r14d,%esi - xorl %esi,%r10d - roll $7,%r10d - addl %r15d,%edi - xorl %edi,%r11d - roll $7,%r11d - addl %r9d,%eax - xorl %eax,%r15d - roll $16,%r15d - addl %r10d,%ebx - xorl %ebx,%r12d - roll $16,%r12d - addl %r15d,%esi - xorl %esi,%r9d - roll $12,%r9d - addl %r12d,%edi - xorl %edi,%r10d - roll $12,%r10d - addl %r9d,%eax - xorl %eax,%r15d - roll $8,%r15d - addl %r10d,%ebx - xorl %ebx,%r12d - roll $8,%r12d - addl %r15d,%esi - xorl %esi,%r9d - roll $7,%r9d - addl %r12d,%edi - xorl %edi,%r10d - roll $7,%r10d - movl %esi,40(%rsp) - movl %edi,44(%rsp) - movl 32(%rsp),%esi - movl 36(%rsp),%edi - addl %r11d,%ecx - xorl %ecx,%r13d - roll $16,%r13d - addl %r8d,%edx - xorl %edx,%r14d - roll $16,%r14d - addl %r13d,%esi - xorl %esi,%r11d - roll $12,%r11d - addl %r14d,%edi - xorl %edi,%r8d - roll $12,%r8d - addl %r11d,%ecx - xorl %ecx,%r13d - roll $8,%r13d - addl %r8d,%edx - xorl %edx,%r14d - roll $8,%r14d - addl %r13d,%esi - xorl %esi,%r11d - roll $7,%r11d - addl %r14d,%edi - xorl %edi,%r8d - roll $7,%r8d - decl %ebp - jnz .Loop - movl %edi,36(%rsp) - movl %esi,32(%rsp) - movq 64(%rsp),%rbp - movdqa %xmm2,%xmm1 - movq 64+8(%rsp),%rsi - paddd %xmm4,%xmm3 - movq 64+16(%rsp),%rdi - - addl $0x61707865,%eax - addl $0x3320646e,%ebx - addl $0x79622d32,%ecx - addl $0x6b206574,%edx - addl 16(%rsp),%r8d - addl 20(%rsp),%r9d - addl 24(%rsp),%r10d - addl 28(%rsp),%r11d - addl 48(%rsp),%r12d - addl 52(%rsp),%r13d - addl 56(%rsp),%r14d - addl 60(%rsp),%r15d - paddd 32(%rsp),%xmm1 - - cmpq $64,%rbp - jb .Ltail - - xorl 0(%rsi),%eax - xorl 4(%rsi),%ebx - xorl 8(%rsi),%ecx - xorl 12(%rsi),%edx - xorl 16(%rsi),%r8d - xorl 20(%rsi),%r9d - xorl 24(%rsi),%r10d - xorl 28(%rsi),%r11d - movdqu 32(%rsi),%xmm0 - xorl 48(%rsi),%r12d - xorl 52(%rsi),%r13d - xorl 56(%rsi),%r14d - xorl 60(%rsi),%r15d - leaq 64(%rsi),%rsi - pxor %xmm1,%xmm0 - - movdqa %xmm2,32(%rsp) - movd %xmm3,48(%rsp) - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - movdqu %xmm0,32(%rdi) - movl %r12d,48(%rdi) - movl %r13d,52(%rdi) - movl %r14d,56(%rdi) - movl %r15d,60(%rdi) - leaq 64(%rdi),%rdi - - subq $64,%rbp - jnz .Loop_outer - - jmp .Ldone - -.align 16 -.Ltail: - movl %eax,0(%rsp) - movl %ebx,4(%rsp) - xorq %rbx,%rbx - movl %ecx,8(%rsp) - movl %edx,12(%rsp) - movl %r8d,16(%rsp) - movl %r9d,20(%rsp) - movl %r10d,24(%rsp) - movl %r11d,28(%rsp) - movdqa %xmm1,32(%rsp) - movl %r12d,48(%rsp) - movl %r13d,52(%rsp) - movl %r14d,56(%rsp) - movl %r15d,60(%rsp) - -.Loop_tail: - movzbl (%rsi,%rbx,1),%eax - movzbl (%rsp,%rbx,1),%edx - leaq 1(%rbx),%rbx - xorl %edx,%eax - movb %al,-1(%rdi,%rbx,1) - decq %rbp - jnz .Loop_tail - -.Ldone: - leaq 64+24+48(%rsp),%rsi - movq -48(%rsi),%r15 -.cfi_restore r15 - movq -40(%rsi),%r14 -.cfi_restore r14 - movq -32(%rsi),%r13 -.cfi_restore r13 - movq -24(%rsi),%r12 -.cfi_restore r12 - movq -16(%rsi),%rbp -.cfi_restore rbp - movq -8(%rsi),%rbx -.cfi_restore rbx - leaq (%rsi),%rsp -.cfi_adjust_cfa_offset -136 -.Lno_data: - .byte 0xf3,0xc3 -.cfi_endproc -.size ChaCha20_ctr32,.-ChaCha20_ctr32 -.type ChaCha20_ssse3,@function -.align 32 -ChaCha20_ssse3: -.LChaCha20_ssse3: -.cfi_startproc - movq %rsp,%r9 -.cfi_def_cfa_register r9 - cmpq $128,%rdx - ja .LChaCha20_4x - -.Ldo_sse3_after_all: - subq $64+8,%rsp - movdqa .Lsigma(%rip),%xmm0 - movdqu (%rcx),%xmm1 - movdqu 16(%rcx),%xmm2 - movdqu (%r8),%xmm3 - movdqa .Lrot16(%rip),%xmm6 - movdqa .Lrot24(%rip),%xmm7 - - movdqa %xmm0,0(%rsp) - movdqa %xmm1,16(%rsp) - movdqa %xmm2,32(%rsp) - movdqa %xmm3,48(%rsp) - movq $10,%r8 - jmp .Loop_ssse3 - -.align 32 -.Loop_outer_ssse3: - movdqa .Lone(%rip),%xmm3 - movdqa 0(%rsp),%xmm0 - movdqa 16(%rsp),%xmm1 - movdqa 32(%rsp),%xmm2 - paddd 48(%rsp),%xmm3 - movq $10,%r8 - movdqa %xmm3,48(%rsp) - jmp .Loop_ssse3 - -.align 32 -.Loop_ssse3: - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $57,%xmm1,%xmm1 - pshufd $147,%xmm3,%xmm3 - nop - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $147,%xmm1,%xmm1 - pshufd $57,%xmm3,%xmm3 - decq %r8 - jnz .Loop_ssse3 - paddd 0(%rsp),%xmm0 - paddd 16(%rsp),%xmm1 - paddd 32(%rsp),%xmm2 - paddd 48(%rsp),%xmm3 - - cmpq $64,%rdx - jb .Ltail_ssse3 - - movdqu 0(%rsi),%xmm4 - movdqu 16(%rsi),%xmm5 - pxor %xmm4,%xmm0 - movdqu 32(%rsi),%xmm4 - pxor %xmm5,%xmm1 - movdqu 48(%rsi),%xmm5 - leaq 64(%rsi),%rsi - pxor %xmm4,%xmm2 - pxor %xmm5,%xmm3 - - movdqu %xmm0,0(%rdi) - movdqu %xmm1,16(%rdi) - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - leaq 64(%rdi),%rdi - - subq $64,%rdx - jnz .Loop_outer_ssse3 - - jmp .Ldone_ssse3 - -.align 16 -.Ltail_ssse3: - movdqa %xmm0,0(%rsp) - movdqa %xmm1,16(%rsp) - movdqa %xmm2,32(%rsp) - movdqa %xmm3,48(%rsp) - xorq %r8,%r8 - -.Loop_tail_ssse3: - movzbl (%rsi,%r8,1),%eax - movzbl (%rsp,%r8,1),%ecx - leaq 1(%r8),%r8 - xorl %ecx,%eax - movb %al,-1(%rdi,%r8,1) - decq %rdx - jnz .Loop_tail_ssse3 - -.Ldone_ssse3: - leaq (%r9),%rsp -.cfi_def_cfa_register rsp -.Lssse3_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ChaCha20_ssse3,.-ChaCha20_ssse3 -.type ChaCha20_4x,@function -.align 32 -ChaCha20_4x: -.LChaCha20_4x: -.cfi_startproc - movq %rsp,%r9 -.cfi_def_cfa_register r9 - movq %r10,%r11 - shrq $32,%r10 - testq $32,%r10 - jnz .LChaCha20_8x - cmpq $192,%rdx - ja .Lproceed4x - - andq $71303168,%r11 - cmpq $4194304,%r11 - je .Ldo_sse3_after_all - -.Lproceed4x: - subq $0x140+8,%rsp - movdqa .Lsigma(%rip),%xmm11 - movdqu (%rcx),%xmm15 - movdqu 16(%rcx),%xmm7 - movdqu (%r8),%xmm3 - leaq 256(%rsp),%rcx - leaq .Lrot16(%rip),%r10 - leaq .Lrot24(%rip),%r11 - - pshufd $0x00,%xmm11,%xmm8 - pshufd $0x55,%xmm11,%xmm9 - movdqa %xmm8,64(%rsp) - pshufd $0xaa,%xmm11,%xmm10 - movdqa %xmm9,80(%rsp) - pshufd $0xff,%xmm11,%xmm11 - movdqa %xmm10,96(%rsp) - movdqa %xmm11,112(%rsp) - - pshufd $0x00,%xmm15,%xmm12 - pshufd $0x55,%xmm15,%xmm13 - movdqa %xmm12,128-256(%rcx) - pshufd $0xaa,%xmm15,%xmm14 - movdqa %xmm13,144-256(%rcx) - pshufd $0xff,%xmm15,%xmm15 - movdqa %xmm14,160-256(%rcx) - movdqa %xmm15,176-256(%rcx) - - pshufd $0x00,%xmm7,%xmm4 - pshufd $0x55,%xmm7,%xmm5 - movdqa %xmm4,192-256(%rcx) - pshufd $0xaa,%xmm7,%xmm6 - movdqa %xmm5,208-256(%rcx) - pshufd $0xff,%xmm7,%xmm7 - movdqa %xmm6,224-256(%rcx) - movdqa %xmm7,240-256(%rcx) - - pshufd $0x00,%xmm3,%xmm0 - pshufd $0x55,%xmm3,%xmm1 - paddd .Linc(%rip),%xmm0 - pshufd $0xaa,%xmm3,%xmm2 - movdqa %xmm1,272-256(%rcx) - pshufd $0xff,%xmm3,%xmm3 - movdqa %xmm2,288-256(%rcx) - movdqa %xmm3,304-256(%rcx) - - jmp .Loop_enter4x - -.align 32 -.Loop_outer4x: - movdqa 64(%rsp),%xmm8 - movdqa 80(%rsp),%xmm9 - movdqa 96(%rsp),%xmm10 - movdqa 112(%rsp),%xmm11 - movdqa 128-256(%rcx),%xmm12 - movdqa 144-256(%rcx),%xmm13 - movdqa 160-256(%rcx),%xmm14 - movdqa 176-256(%rcx),%xmm15 - movdqa 192-256(%rcx),%xmm4 - movdqa 208-256(%rcx),%xmm5 - movdqa 224-256(%rcx),%xmm6 - movdqa 240-256(%rcx),%xmm7 - movdqa 256-256(%rcx),%xmm0 - movdqa 272-256(%rcx),%xmm1 - movdqa 288-256(%rcx),%xmm2 - movdqa 304-256(%rcx),%xmm3 - paddd .Lfour(%rip),%xmm0 - -.Loop_enter4x: - movdqa %xmm6,32(%rsp) - movdqa %xmm7,48(%rsp) - movdqa (%r10),%xmm7 - movl $10,%eax - movdqa %xmm0,256-256(%rcx) - jmp .Loop4x - -.align 32 -.Loop4x: - paddd %xmm12,%xmm8 - paddd %xmm13,%xmm9 - pxor %xmm8,%xmm0 - pxor %xmm9,%xmm1 -.byte 102,15,56,0,199 -.byte 102,15,56,0,207 - paddd %xmm0,%xmm4 - paddd %xmm1,%xmm5 - pxor %xmm4,%xmm12 - pxor %xmm5,%xmm13 - movdqa %xmm12,%xmm6 - pslld $12,%xmm12 - psrld $20,%xmm6 - movdqa %xmm13,%xmm7 - pslld $12,%xmm13 - por %xmm6,%xmm12 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm13 - paddd %xmm12,%xmm8 - paddd %xmm13,%xmm9 - pxor %xmm8,%xmm0 - pxor %xmm9,%xmm1 -.byte 102,15,56,0,198 -.byte 102,15,56,0,206 - paddd %xmm0,%xmm4 - paddd %xmm1,%xmm5 - pxor %xmm4,%xmm12 - pxor %xmm5,%xmm13 - movdqa %xmm12,%xmm7 - pslld $7,%xmm12 - psrld $25,%xmm7 - movdqa %xmm13,%xmm6 - pslld $7,%xmm13 - por %xmm7,%xmm12 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm13 - movdqa %xmm4,0(%rsp) - movdqa %xmm5,16(%rsp) - movdqa 32(%rsp),%xmm4 - movdqa 48(%rsp),%xmm5 - paddd %xmm14,%xmm10 - paddd %xmm15,%xmm11 - pxor %xmm10,%xmm2 - pxor %xmm11,%xmm3 -.byte 102,15,56,0,215 -.byte 102,15,56,0,223 - paddd %xmm2,%xmm4 - paddd %xmm3,%xmm5 - pxor %xmm4,%xmm14 - pxor %xmm5,%xmm15 - movdqa %xmm14,%xmm6 - pslld $12,%xmm14 - psrld $20,%xmm6 - movdqa %xmm15,%xmm7 - pslld $12,%xmm15 - por %xmm6,%xmm14 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm15 - paddd %xmm14,%xmm10 - paddd %xmm15,%xmm11 - pxor %xmm10,%xmm2 - pxor %xmm11,%xmm3 -.byte 102,15,56,0,214 -.byte 102,15,56,0,222 - paddd %xmm2,%xmm4 - paddd %xmm3,%xmm5 - pxor %xmm4,%xmm14 - pxor %xmm5,%xmm15 - movdqa %xmm14,%xmm7 - pslld $7,%xmm14 - psrld $25,%xmm7 - movdqa %xmm15,%xmm6 - pslld $7,%xmm15 - por %xmm7,%xmm14 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm15 - paddd %xmm13,%xmm8 - paddd %xmm14,%xmm9 - pxor %xmm8,%xmm3 - pxor %xmm9,%xmm0 -.byte 102,15,56,0,223 -.byte 102,15,56,0,199 - paddd %xmm3,%xmm4 - paddd %xmm0,%xmm5 - pxor %xmm4,%xmm13 - pxor %xmm5,%xmm14 - movdqa %xmm13,%xmm6 - pslld $12,%xmm13 - psrld $20,%xmm6 - movdqa %xmm14,%xmm7 - pslld $12,%xmm14 - por %xmm6,%xmm13 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm14 - paddd %xmm13,%xmm8 - paddd %xmm14,%xmm9 - pxor %xmm8,%xmm3 - pxor %xmm9,%xmm0 -.byte 102,15,56,0,222 -.byte 102,15,56,0,198 - paddd %xmm3,%xmm4 - paddd %xmm0,%xmm5 - pxor %xmm4,%xmm13 - pxor %xmm5,%xmm14 - movdqa %xmm13,%xmm7 - pslld $7,%xmm13 - psrld $25,%xmm7 - movdqa %xmm14,%xmm6 - pslld $7,%xmm14 - por %xmm7,%xmm13 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm14 - movdqa %xmm4,32(%rsp) - movdqa %xmm5,48(%rsp) - movdqa 0(%rsp),%xmm4 - movdqa 16(%rsp),%xmm5 - paddd %xmm15,%xmm10 - paddd %xmm12,%xmm11 - pxor %xmm10,%xmm1 - pxor %xmm11,%xmm2 -.byte 102,15,56,0,207 -.byte 102,15,56,0,215 - paddd %xmm1,%xmm4 - paddd %xmm2,%xmm5 - pxor %xmm4,%xmm15 - pxor %xmm5,%xmm12 - movdqa %xmm15,%xmm6 - pslld $12,%xmm15 - psrld $20,%xmm6 - movdqa %xmm12,%xmm7 - pslld $12,%xmm12 - por %xmm6,%xmm15 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm12 - paddd %xmm15,%xmm10 - paddd %xmm12,%xmm11 - pxor %xmm10,%xmm1 - pxor %xmm11,%xmm2 -.byte 102,15,56,0,206 -.byte 102,15,56,0,214 - paddd %xmm1,%xmm4 - paddd %xmm2,%xmm5 - pxor %xmm4,%xmm15 - pxor %xmm5,%xmm12 - movdqa %xmm15,%xmm7 - pslld $7,%xmm15 - psrld $25,%xmm7 - movdqa %xmm12,%xmm6 - pslld $7,%xmm12 - por %xmm7,%xmm15 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm12 - decl %eax - jnz .Loop4x - - paddd 64(%rsp),%xmm8 - paddd 80(%rsp),%xmm9 - paddd 96(%rsp),%xmm10 - paddd 112(%rsp),%xmm11 - - movdqa %xmm8,%xmm6 - punpckldq %xmm9,%xmm8 - movdqa %xmm10,%xmm7 - punpckldq %xmm11,%xmm10 - punpckhdq %xmm9,%xmm6 - punpckhdq %xmm11,%xmm7 - movdqa %xmm8,%xmm9 - punpcklqdq %xmm10,%xmm8 - movdqa %xmm6,%xmm11 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm10,%xmm9 - punpckhqdq %xmm7,%xmm11 - paddd 128-256(%rcx),%xmm12 - paddd 144-256(%rcx),%xmm13 - paddd 160-256(%rcx),%xmm14 - paddd 176-256(%rcx),%xmm15 - - movdqa %xmm8,0(%rsp) - movdqa %xmm9,16(%rsp) - movdqa 32(%rsp),%xmm8 - movdqa 48(%rsp),%xmm9 - - movdqa %xmm12,%xmm10 - punpckldq %xmm13,%xmm12 - movdqa %xmm14,%xmm7 - punpckldq %xmm15,%xmm14 - punpckhdq %xmm13,%xmm10 - punpckhdq %xmm15,%xmm7 - movdqa %xmm12,%xmm13 - punpcklqdq %xmm14,%xmm12 - movdqa %xmm10,%xmm15 - punpcklqdq %xmm7,%xmm10 - punpckhqdq %xmm14,%xmm13 - punpckhqdq %xmm7,%xmm15 - paddd 192-256(%rcx),%xmm4 - paddd 208-256(%rcx),%xmm5 - paddd 224-256(%rcx),%xmm8 - paddd 240-256(%rcx),%xmm9 - - movdqa %xmm6,32(%rsp) - movdqa %xmm11,48(%rsp) - - movdqa %xmm4,%xmm14 - punpckldq %xmm5,%xmm4 - movdqa %xmm8,%xmm7 - punpckldq %xmm9,%xmm8 - punpckhdq %xmm5,%xmm14 - punpckhdq %xmm9,%xmm7 - movdqa %xmm4,%xmm5 - punpcklqdq %xmm8,%xmm4 - movdqa %xmm14,%xmm9 - punpcklqdq %xmm7,%xmm14 - punpckhqdq %xmm8,%xmm5 - punpckhqdq %xmm7,%xmm9 - paddd 256-256(%rcx),%xmm0 - paddd 272-256(%rcx),%xmm1 - paddd 288-256(%rcx),%xmm2 - paddd 304-256(%rcx),%xmm3 - - movdqa %xmm0,%xmm8 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm8 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm8,%xmm3 - punpcklqdq %xmm7,%xmm8 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - cmpq $256,%rdx - jb .Ltail4x - - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - leaq 128(%rsi),%rsi - pxor 16(%rsp),%xmm6 - pxor %xmm13,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm1,%xmm7 - - movdqu %xmm6,64(%rdi) - movdqu 0(%rsi),%xmm6 - movdqu %xmm11,80(%rdi) - movdqu 16(%rsi),%xmm11 - movdqu %xmm2,96(%rdi) - movdqu 32(%rsi),%xmm2 - movdqu %xmm7,112(%rdi) - leaq 128(%rdi),%rdi - movdqu 48(%rsi),%xmm7 - pxor 32(%rsp),%xmm6 - pxor %xmm10,%xmm11 - pxor %xmm14,%xmm2 - pxor %xmm8,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - leaq 128(%rsi),%rsi - pxor 48(%rsp),%xmm6 - pxor %xmm15,%xmm11 - pxor %xmm9,%xmm2 - pxor %xmm3,%xmm7 - movdqu %xmm6,64(%rdi) - movdqu %xmm11,80(%rdi) - movdqu %xmm2,96(%rdi) - movdqu %xmm7,112(%rdi) - leaq 128(%rdi),%rdi - - subq $256,%rdx - jnz .Loop_outer4x - - jmp .Ldone4x - -.Ltail4x: - cmpq $192,%rdx - jae .L192_or_more4x - cmpq $128,%rdx - jae .L128_or_more4x - cmpq $64,%rdx - jae .L64_or_more4x - - - xorq %r10,%r10 - - movdqa %xmm12,16(%rsp) - movdqa %xmm4,32(%rsp) - movdqa %xmm0,48(%rsp) - jmp .Loop_tail4x - -.align 32 -.L64_or_more4x: - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - movdqu %xmm6,0(%rdi) - movdqu %xmm11,16(%rdi) - movdqu %xmm2,32(%rdi) - movdqu %xmm7,48(%rdi) - je .Ldone4x - - movdqa 16(%rsp),%xmm6 - leaq 64(%rsi),%rsi - xorq %r10,%r10 - movdqa %xmm6,0(%rsp) - movdqa %xmm13,16(%rsp) - leaq 64(%rdi),%rdi - movdqa %xmm5,32(%rsp) - subq $64,%rdx - movdqa %xmm1,48(%rsp) - jmp .Loop_tail4x - -.align 32 -.L128_or_more4x: - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - pxor 16(%rsp),%xmm6 - pxor %xmm13,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm1,%xmm7 - movdqu %xmm6,64(%rdi) - movdqu %xmm11,80(%rdi) - movdqu %xmm2,96(%rdi) - movdqu %xmm7,112(%rdi) - je .Ldone4x - - movdqa 32(%rsp),%xmm6 - leaq 128(%rsi),%rsi - xorq %r10,%r10 - movdqa %xmm6,0(%rsp) - movdqa %xmm10,16(%rsp) - leaq 128(%rdi),%rdi - movdqa %xmm14,32(%rsp) - subq $128,%rdx - movdqa %xmm8,48(%rsp) - jmp .Loop_tail4x - -.align 32 -.L192_or_more4x: - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - leaq 128(%rsi),%rsi - pxor 16(%rsp),%xmm6 - pxor %xmm13,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm1,%xmm7 - - movdqu %xmm6,64(%rdi) - movdqu 0(%rsi),%xmm6 - movdqu %xmm11,80(%rdi) - movdqu 16(%rsi),%xmm11 - movdqu %xmm2,96(%rdi) - movdqu 32(%rsi),%xmm2 - movdqu %xmm7,112(%rdi) - leaq 128(%rdi),%rdi - movdqu 48(%rsi),%xmm7 - pxor 32(%rsp),%xmm6 - pxor %xmm10,%xmm11 - pxor %xmm14,%xmm2 - pxor %xmm8,%xmm7 - movdqu %xmm6,0(%rdi) - movdqu %xmm11,16(%rdi) - movdqu %xmm2,32(%rdi) - movdqu %xmm7,48(%rdi) - je .Ldone4x - - movdqa 48(%rsp),%xmm6 - leaq 64(%rsi),%rsi - xorq %r10,%r10 - movdqa %xmm6,0(%rsp) - movdqa %xmm15,16(%rsp) - leaq 64(%rdi),%rdi - movdqa %xmm9,32(%rsp) - subq $192,%rdx - movdqa %xmm3,48(%rsp) - -.Loop_tail4x: - movzbl (%rsi,%r10,1),%eax - movzbl (%rsp,%r10,1),%ecx - leaq 1(%r10),%r10 - xorl %ecx,%eax - movb %al,-1(%rdi,%r10,1) - decq %rdx - jnz .Loop_tail4x - -.Ldone4x: - leaq (%r9),%rsp -.cfi_def_cfa_register rsp -.L4x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ChaCha20_4x,.-ChaCha20_4x -.type ChaCha20_8x,@function -.align 32 -ChaCha20_8x: -.LChaCha20_8x: -.cfi_startproc - movq %rsp,%r9 -.cfi_def_cfa_register r9 - subq $0x280+8,%rsp - andq $-32,%rsp - vzeroupper - - - - - - - - - - - vbroadcasti128 .Lsigma(%rip),%ymm11 - vbroadcasti128 (%rcx),%ymm3 - vbroadcasti128 16(%rcx),%ymm15 - vbroadcasti128 (%r8),%ymm7 - leaq 256(%rsp),%rcx - leaq 512(%rsp),%rax - leaq .Lrot16(%rip),%r10 - leaq .Lrot24(%rip),%r11 - - vpshufd $0x00,%ymm11,%ymm8 - vpshufd $0x55,%ymm11,%ymm9 - vmovdqa %ymm8,128-256(%rcx) - vpshufd $0xaa,%ymm11,%ymm10 - vmovdqa %ymm9,160-256(%rcx) - vpshufd $0xff,%ymm11,%ymm11 - vmovdqa %ymm10,192-256(%rcx) - vmovdqa %ymm11,224-256(%rcx) - - vpshufd $0x00,%ymm3,%ymm0 - vpshufd $0x55,%ymm3,%ymm1 - vmovdqa %ymm0,256-256(%rcx) - vpshufd $0xaa,%ymm3,%ymm2 - vmovdqa %ymm1,288-256(%rcx) - vpshufd $0xff,%ymm3,%ymm3 - vmovdqa %ymm2,320-256(%rcx) - vmovdqa %ymm3,352-256(%rcx) - - vpshufd $0x00,%ymm15,%ymm12 - vpshufd $0x55,%ymm15,%ymm13 - vmovdqa %ymm12,384-512(%rax) - vpshufd $0xaa,%ymm15,%ymm14 - vmovdqa %ymm13,416-512(%rax) - vpshufd $0xff,%ymm15,%ymm15 - vmovdqa %ymm14,448-512(%rax) - vmovdqa %ymm15,480-512(%rax) - - vpshufd $0x00,%ymm7,%ymm4 - vpshufd $0x55,%ymm7,%ymm5 - vpaddd .Lincy(%rip),%ymm4,%ymm4 - vpshufd $0xaa,%ymm7,%ymm6 - vmovdqa %ymm5,544-512(%rax) - vpshufd $0xff,%ymm7,%ymm7 - vmovdqa %ymm6,576-512(%rax) - vmovdqa %ymm7,608-512(%rax) - - jmp .Loop_enter8x - -.align 32 -.Loop_outer8x: - vmovdqa 128-256(%rcx),%ymm8 - vmovdqa 160-256(%rcx),%ymm9 - vmovdqa 192-256(%rcx),%ymm10 - vmovdqa 224-256(%rcx),%ymm11 - vmovdqa 256-256(%rcx),%ymm0 - vmovdqa 288-256(%rcx),%ymm1 - vmovdqa 320-256(%rcx),%ymm2 - vmovdqa 352-256(%rcx),%ymm3 - vmovdqa 384-512(%rax),%ymm12 - vmovdqa 416-512(%rax),%ymm13 - vmovdqa 448-512(%rax),%ymm14 - vmovdqa 480-512(%rax),%ymm15 - vmovdqa 512-512(%rax),%ymm4 - vmovdqa 544-512(%rax),%ymm5 - vmovdqa 576-512(%rax),%ymm6 - vmovdqa 608-512(%rax),%ymm7 - vpaddd .Leight(%rip),%ymm4,%ymm4 - -.Loop_enter8x: - vmovdqa %ymm14,64(%rsp) - vmovdqa %ymm15,96(%rsp) - vbroadcasti128 (%r10),%ymm15 - vmovdqa %ymm4,512-512(%rax) - movl $10,%eax - jmp .Loop8x - -.align 32 -.Loop8x: - vpaddd %ymm0,%ymm8,%ymm8 - vpxor %ymm4,%ymm8,%ymm4 - vpshufb %ymm15,%ymm4,%ymm4 - vpaddd %ymm1,%ymm9,%ymm9 - vpxor %ymm5,%ymm9,%ymm5 - vpshufb %ymm15,%ymm5,%ymm5 - vpaddd %ymm4,%ymm12,%ymm12 - vpxor %ymm0,%ymm12,%ymm0 - vpslld $12,%ymm0,%ymm14 - vpsrld $20,%ymm0,%ymm0 - vpor %ymm0,%ymm14,%ymm0 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm5,%ymm13,%ymm13 - vpxor %ymm1,%ymm13,%ymm1 - vpslld $12,%ymm1,%ymm15 - vpsrld $20,%ymm1,%ymm1 - vpor %ymm1,%ymm15,%ymm1 - vpaddd %ymm0,%ymm8,%ymm8 - vpxor %ymm4,%ymm8,%ymm4 - vpshufb %ymm14,%ymm4,%ymm4 - vpaddd %ymm1,%ymm9,%ymm9 - vpxor %ymm5,%ymm9,%ymm5 - vpshufb %ymm14,%ymm5,%ymm5 - vpaddd %ymm4,%ymm12,%ymm12 - vpxor %ymm0,%ymm12,%ymm0 - vpslld $7,%ymm0,%ymm15 - vpsrld $25,%ymm0,%ymm0 - vpor %ymm0,%ymm15,%ymm0 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm5,%ymm13,%ymm13 - vpxor %ymm1,%ymm13,%ymm1 - vpslld $7,%ymm1,%ymm14 - vpsrld $25,%ymm1,%ymm1 - vpor %ymm1,%ymm14,%ymm1 - vmovdqa %ymm12,0(%rsp) - vmovdqa %ymm13,32(%rsp) - vmovdqa 64(%rsp),%ymm12 - vmovdqa 96(%rsp),%ymm13 - vpaddd %ymm2,%ymm10,%ymm10 - vpxor %ymm6,%ymm10,%ymm6 - vpshufb %ymm15,%ymm6,%ymm6 - vpaddd %ymm3,%ymm11,%ymm11 - vpxor %ymm7,%ymm11,%ymm7 - vpshufb %ymm15,%ymm7,%ymm7 - vpaddd %ymm6,%ymm12,%ymm12 - vpxor %ymm2,%ymm12,%ymm2 - vpslld $12,%ymm2,%ymm14 - vpsrld $20,%ymm2,%ymm2 - vpor %ymm2,%ymm14,%ymm2 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm7,%ymm13,%ymm13 - vpxor %ymm3,%ymm13,%ymm3 - vpslld $12,%ymm3,%ymm15 - vpsrld $20,%ymm3,%ymm3 - vpor %ymm3,%ymm15,%ymm3 - vpaddd %ymm2,%ymm10,%ymm10 - vpxor %ymm6,%ymm10,%ymm6 - vpshufb %ymm14,%ymm6,%ymm6 - vpaddd %ymm3,%ymm11,%ymm11 - vpxor %ymm7,%ymm11,%ymm7 - vpshufb %ymm14,%ymm7,%ymm7 - vpaddd %ymm6,%ymm12,%ymm12 - vpxor %ymm2,%ymm12,%ymm2 - vpslld $7,%ymm2,%ymm15 - vpsrld $25,%ymm2,%ymm2 - vpor %ymm2,%ymm15,%ymm2 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm7,%ymm13,%ymm13 - vpxor %ymm3,%ymm13,%ymm3 - vpslld $7,%ymm3,%ymm14 - vpsrld $25,%ymm3,%ymm3 - vpor %ymm3,%ymm14,%ymm3 - vpaddd %ymm1,%ymm8,%ymm8 - vpxor %ymm7,%ymm8,%ymm7 - vpshufb %ymm15,%ymm7,%ymm7 - vpaddd %ymm2,%ymm9,%ymm9 - vpxor %ymm4,%ymm9,%ymm4 - vpshufb %ymm15,%ymm4,%ymm4 - vpaddd %ymm7,%ymm12,%ymm12 - vpxor %ymm1,%ymm12,%ymm1 - vpslld $12,%ymm1,%ymm14 - vpsrld $20,%ymm1,%ymm1 - vpor %ymm1,%ymm14,%ymm1 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm4,%ymm13,%ymm13 - vpxor %ymm2,%ymm13,%ymm2 - vpslld $12,%ymm2,%ymm15 - vpsrld $20,%ymm2,%ymm2 - vpor %ymm2,%ymm15,%ymm2 - vpaddd %ymm1,%ymm8,%ymm8 - vpxor %ymm7,%ymm8,%ymm7 - vpshufb %ymm14,%ymm7,%ymm7 - vpaddd %ymm2,%ymm9,%ymm9 - vpxor %ymm4,%ymm9,%ymm4 - vpshufb %ymm14,%ymm4,%ymm4 - vpaddd %ymm7,%ymm12,%ymm12 - vpxor %ymm1,%ymm12,%ymm1 - vpslld $7,%ymm1,%ymm15 - vpsrld $25,%ymm1,%ymm1 - vpor %ymm1,%ymm15,%ymm1 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm4,%ymm13,%ymm13 - vpxor %ymm2,%ymm13,%ymm2 - vpslld $7,%ymm2,%ymm14 - vpsrld $25,%ymm2,%ymm2 - vpor %ymm2,%ymm14,%ymm2 - vmovdqa %ymm12,64(%rsp) - vmovdqa %ymm13,96(%rsp) - vmovdqa 0(%rsp),%ymm12 - vmovdqa 32(%rsp),%ymm13 - vpaddd %ymm3,%ymm10,%ymm10 - vpxor %ymm5,%ymm10,%ymm5 - vpshufb %ymm15,%ymm5,%ymm5 - vpaddd %ymm0,%ymm11,%ymm11 - vpxor %ymm6,%ymm11,%ymm6 - vpshufb %ymm15,%ymm6,%ymm6 - vpaddd %ymm5,%ymm12,%ymm12 - vpxor %ymm3,%ymm12,%ymm3 - vpslld $12,%ymm3,%ymm14 - vpsrld $20,%ymm3,%ymm3 - vpor %ymm3,%ymm14,%ymm3 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm6,%ymm13,%ymm13 - vpxor %ymm0,%ymm13,%ymm0 - vpslld $12,%ymm0,%ymm15 - vpsrld $20,%ymm0,%ymm0 - vpor %ymm0,%ymm15,%ymm0 - vpaddd %ymm3,%ymm10,%ymm10 - vpxor %ymm5,%ymm10,%ymm5 - vpshufb %ymm14,%ymm5,%ymm5 - vpaddd %ymm0,%ymm11,%ymm11 - vpxor %ymm6,%ymm11,%ymm6 - vpshufb %ymm14,%ymm6,%ymm6 - vpaddd %ymm5,%ymm12,%ymm12 - vpxor %ymm3,%ymm12,%ymm3 - vpslld $7,%ymm3,%ymm15 - vpsrld $25,%ymm3,%ymm3 - vpor %ymm3,%ymm15,%ymm3 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm6,%ymm13,%ymm13 - vpxor %ymm0,%ymm13,%ymm0 - vpslld $7,%ymm0,%ymm14 - vpsrld $25,%ymm0,%ymm0 - vpor %ymm0,%ymm14,%ymm0 - decl %eax - jnz .Loop8x - - leaq 512(%rsp),%rax - vpaddd 128-256(%rcx),%ymm8,%ymm8 - vpaddd 160-256(%rcx),%ymm9,%ymm9 - vpaddd 192-256(%rcx),%ymm10,%ymm10 - vpaddd 224-256(%rcx),%ymm11,%ymm11 - - vpunpckldq %ymm9,%ymm8,%ymm14 - vpunpckldq %ymm11,%ymm10,%ymm15 - vpunpckhdq %ymm9,%ymm8,%ymm8 - vpunpckhdq %ymm11,%ymm10,%ymm10 - vpunpcklqdq %ymm15,%ymm14,%ymm9 - vpunpckhqdq %ymm15,%ymm14,%ymm14 - vpunpcklqdq %ymm10,%ymm8,%ymm11 - vpunpckhqdq %ymm10,%ymm8,%ymm8 - vpaddd 256-256(%rcx),%ymm0,%ymm0 - vpaddd 288-256(%rcx),%ymm1,%ymm1 - vpaddd 320-256(%rcx),%ymm2,%ymm2 - vpaddd 352-256(%rcx),%ymm3,%ymm3 - - vpunpckldq %ymm1,%ymm0,%ymm10 - vpunpckldq %ymm3,%ymm2,%ymm15 - vpunpckhdq %ymm1,%ymm0,%ymm0 - vpunpckhdq %ymm3,%ymm2,%ymm2 - vpunpcklqdq %ymm15,%ymm10,%ymm1 - vpunpckhqdq %ymm15,%ymm10,%ymm10 - vpunpcklqdq %ymm2,%ymm0,%ymm3 - vpunpckhqdq %ymm2,%ymm0,%ymm0 - vperm2i128 $0x20,%ymm1,%ymm9,%ymm15 - vperm2i128 $0x31,%ymm1,%ymm9,%ymm1 - vperm2i128 $0x20,%ymm10,%ymm14,%ymm9 - vperm2i128 $0x31,%ymm10,%ymm14,%ymm10 - vperm2i128 $0x20,%ymm3,%ymm11,%ymm14 - vperm2i128 $0x31,%ymm3,%ymm11,%ymm3 - vperm2i128 $0x20,%ymm0,%ymm8,%ymm11 - vperm2i128 $0x31,%ymm0,%ymm8,%ymm0 - vmovdqa %ymm15,0(%rsp) - vmovdqa %ymm9,32(%rsp) - vmovdqa 64(%rsp),%ymm15 - vmovdqa 96(%rsp),%ymm9 - - vpaddd 384-512(%rax),%ymm12,%ymm12 - vpaddd 416-512(%rax),%ymm13,%ymm13 - vpaddd 448-512(%rax),%ymm15,%ymm15 - vpaddd 480-512(%rax),%ymm9,%ymm9 - - vpunpckldq %ymm13,%ymm12,%ymm2 - vpunpckldq %ymm9,%ymm15,%ymm8 - vpunpckhdq %ymm13,%ymm12,%ymm12 - vpunpckhdq %ymm9,%ymm15,%ymm15 - vpunpcklqdq %ymm8,%ymm2,%ymm13 - vpunpckhqdq %ymm8,%ymm2,%ymm2 - vpunpcklqdq %ymm15,%ymm12,%ymm9 - vpunpckhqdq %ymm15,%ymm12,%ymm12 - vpaddd 512-512(%rax),%ymm4,%ymm4 - vpaddd 544-512(%rax),%ymm5,%ymm5 - vpaddd 576-512(%rax),%ymm6,%ymm6 - vpaddd 608-512(%rax),%ymm7,%ymm7 - - vpunpckldq %ymm5,%ymm4,%ymm15 - vpunpckldq %ymm7,%ymm6,%ymm8 - vpunpckhdq %ymm5,%ymm4,%ymm4 - vpunpckhdq %ymm7,%ymm6,%ymm6 - vpunpcklqdq %ymm8,%ymm15,%ymm5 - vpunpckhqdq %ymm8,%ymm15,%ymm15 - vpunpcklqdq %ymm6,%ymm4,%ymm7 - vpunpckhqdq %ymm6,%ymm4,%ymm4 - vperm2i128 $0x20,%ymm5,%ymm13,%ymm8 - vperm2i128 $0x31,%ymm5,%ymm13,%ymm5 - vperm2i128 $0x20,%ymm15,%ymm2,%ymm13 - vperm2i128 $0x31,%ymm15,%ymm2,%ymm15 - vperm2i128 $0x20,%ymm7,%ymm9,%ymm2 - vperm2i128 $0x31,%ymm7,%ymm9,%ymm7 - vperm2i128 $0x20,%ymm4,%ymm12,%ymm9 - vperm2i128 $0x31,%ymm4,%ymm12,%ymm4 - vmovdqa 0(%rsp),%ymm6 - vmovdqa 32(%rsp),%ymm12 - - cmpq $512,%rdx - jb .Ltail8x - - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - leaq 128(%rsi),%rsi - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - leaq 128(%rdi),%rdi - - vpxor 0(%rsi),%ymm12,%ymm12 - vpxor 32(%rsi),%ymm13,%ymm13 - vpxor 64(%rsi),%ymm10,%ymm10 - vpxor 96(%rsi),%ymm15,%ymm15 - leaq 128(%rsi),%rsi - vmovdqu %ymm12,0(%rdi) - vmovdqu %ymm13,32(%rdi) - vmovdqu %ymm10,64(%rdi) - vmovdqu %ymm15,96(%rdi) - leaq 128(%rdi),%rdi - - vpxor 0(%rsi),%ymm14,%ymm14 - vpxor 32(%rsi),%ymm2,%ymm2 - vpxor 64(%rsi),%ymm3,%ymm3 - vpxor 96(%rsi),%ymm7,%ymm7 - leaq 128(%rsi),%rsi - vmovdqu %ymm14,0(%rdi) - vmovdqu %ymm2,32(%rdi) - vmovdqu %ymm3,64(%rdi) - vmovdqu %ymm7,96(%rdi) - leaq 128(%rdi),%rdi - - vpxor 0(%rsi),%ymm11,%ymm11 - vpxor 32(%rsi),%ymm9,%ymm9 - vpxor 64(%rsi),%ymm0,%ymm0 - vpxor 96(%rsi),%ymm4,%ymm4 - leaq 128(%rsi),%rsi - vmovdqu %ymm11,0(%rdi) - vmovdqu %ymm9,32(%rdi) - vmovdqu %ymm0,64(%rdi) - vmovdqu %ymm4,96(%rdi) - leaq 128(%rdi),%rdi - - subq $512,%rdx - jnz .Loop_outer8x - - jmp .Ldone8x - -.Ltail8x: - cmpq $448,%rdx - jae .L448_or_more8x - cmpq $384,%rdx - jae .L384_or_more8x - cmpq $320,%rdx - jae .L320_or_more8x - cmpq $256,%rdx - jae .L256_or_more8x - cmpq $192,%rdx - jae .L192_or_more8x - cmpq $128,%rdx - jae .L128_or_more8x - cmpq $64,%rdx - jae .L64_or_more8x - - xorq %r10,%r10 - vmovdqa %ymm6,0(%rsp) - vmovdqa %ymm8,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L64_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - je .Ldone8x - - leaq 64(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm1,0(%rsp) - leaq 64(%rdi),%rdi - subq $64,%rdx - vmovdqa %ymm5,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L128_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - je .Ldone8x - - leaq 128(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm12,0(%rsp) - leaq 128(%rdi),%rdi - subq $128,%rdx - vmovdqa %ymm13,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L192_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - je .Ldone8x - - leaq 192(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm10,0(%rsp) - leaq 192(%rdi),%rdi - subq $192,%rdx - vmovdqa %ymm15,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L256_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - je .Ldone8x - - leaq 256(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm14,0(%rsp) - leaq 256(%rdi),%rdi - subq $256,%rdx - vmovdqa %ymm2,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L320_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vpxor 256(%rsi),%ymm14,%ymm14 - vpxor 288(%rsi),%ymm2,%ymm2 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - vmovdqu %ymm14,256(%rdi) - vmovdqu %ymm2,288(%rdi) - je .Ldone8x - - leaq 320(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm3,0(%rsp) - leaq 320(%rdi),%rdi - subq $320,%rdx - vmovdqa %ymm7,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L384_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vpxor 256(%rsi),%ymm14,%ymm14 - vpxor 288(%rsi),%ymm2,%ymm2 - vpxor 320(%rsi),%ymm3,%ymm3 - vpxor 352(%rsi),%ymm7,%ymm7 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - vmovdqu %ymm14,256(%rdi) - vmovdqu %ymm2,288(%rdi) - vmovdqu %ymm3,320(%rdi) - vmovdqu %ymm7,352(%rdi) - je .Ldone8x - - leaq 384(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm11,0(%rsp) - leaq 384(%rdi),%rdi - subq $384,%rdx - vmovdqa %ymm9,32(%rsp) - jmp .Loop_tail8x - -.align 32 -.L448_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vpxor 256(%rsi),%ymm14,%ymm14 - vpxor 288(%rsi),%ymm2,%ymm2 - vpxor 320(%rsi),%ymm3,%ymm3 - vpxor 352(%rsi),%ymm7,%ymm7 - vpxor 384(%rsi),%ymm11,%ymm11 - vpxor 416(%rsi),%ymm9,%ymm9 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - vmovdqu %ymm14,256(%rdi) - vmovdqu %ymm2,288(%rdi) - vmovdqu %ymm3,320(%rdi) - vmovdqu %ymm7,352(%rdi) - vmovdqu %ymm11,384(%rdi) - vmovdqu %ymm9,416(%rdi) - je .Ldone8x - - leaq 448(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm0,0(%rsp) - leaq 448(%rdi),%rdi - subq $448,%rdx - vmovdqa %ymm4,32(%rsp) - -.Loop_tail8x: - movzbl (%rsi,%r10,1),%eax - movzbl (%rsp,%r10,1),%ecx - leaq 1(%r10),%r10 - xorl %ecx,%eax - movb %al,-1(%rdi,%r10,1) - decq %rdx - jnz .Loop_tail8x - -.Ldone8x: - vzeroall - leaq (%r9),%rsp -.cfi_def_cfa_register rsp -.L8x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ChaCha20_8x,.-ChaCha20_8x -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S deleted file mode 100644 index a22bee8fcf..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S +++ /dev/null @@ -1,3079 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.data - -.align 16 -one: -.quad 1,0 -two: -.quad 2,0 -three: -.quad 3,0 -four: -.quad 4,0 -five: -.quad 5,0 -six: -.quad 6,0 -seven: -.quad 7,0 -eight: -.quad 8,0 - -OR_MASK: -.long 0x00000000,0x00000000,0x00000000,0x80000000 -poly: -.quad 0x1, 0xc200000000000000 -mask: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -con1: -.long 1,1,1,1 -con2: -.long 0x1b,0x1b,0x1b,0x1b -con3: -.byte -1,-1,-1,-1,-1,-1,-1,-1,4,5,6,7,4,5,6,7 -and_mask: -.long 0,0xffffffff, 0xffffffff, 0xffffffff -.text -.type GFMUL,@function -.align 16 -GFMUL: -.cfi_startproc - vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 - vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5 - vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 - vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $8,%xmm3,%xmm4 - vpsrldq $8,%xmm3,%xmm3 - vpxor %xmm4,%xmm2,%xmm2 - vpxor %xmm3,%xmm5,%xmm5 - - vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 - vpshufd $78,%xmm2,%xmm4 - vpxor %xmm4,%xmm3,%xmm2 - - vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 - vpshufd $78,%xmm2,%xmm4 - vpxor %xmm4,%xmm3,%xmm2 - - vpxor %xmm5,%xmm2,%xmm0 - .byte 0xf3,0xc3 -.cfi_endproc -.size GFMUL, .-GFMUL -.globl aesgcmsiv_htable_init -.hidden aesgcmsiv_htable_init -.type aesgcmsiv_htable_init,@function -.align 16 -aesgcmsiv_htable_init: -.cfi_startproc - vmovdqa (%rsi),%xmm0 - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm0,(%rdi) - call GFMUL - vmovdqa %xmm0,16(%rdi) - call GFMUL - vmovdqa %xmm0,32(%rdi) - call GFMUL - vmovdqa %xmm0,48(%rdi) - call GFMUL - vmovdqa %xmm0,64(%rdi) - call GFMUL - vmovdqa %xmm0,80(%rdi) - call GFMUL - vmovdqa %xmm0,96(%rdi) - call GFMUL - vmovdqa %xmm0,112(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aesgcmsiv_htable_init, .-aesgcmsiv_htable_init -.globl aesgcmsiv_htable6_init -.hidden aesgcmsiv_htable6_init -.type aesgcmsiv_htable6_init,@function -.align 16 -aesgcmsiv_htable6_init: -.cfi_startproc - vmovdqa (%rsi),%xmm0 - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm0,(%rdi) - call GFMUL - vmovdqa %xmm0,16(%rdi) - call GFMUL - vmovdqa %xmm0,32(%rdi) - call GFMUL - vmovdqa %xmm0,48(%rdi) - call GFMUL - vmovdqa %xmm0,64(%rdi) - call GFMUL - vmovdqa %xmm0,80(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aesgcmsiv_htable6_init, .-aesgcmsiv_htable6_init -.globl aesgcmsiv_htable_polyval -.hidden aesgcmsiv_htable_polyval -.type aesgcmsiv_htable_polyval,@function -.align 16 -aesgcmsiv_htable_polyval: -.cfi_startproc - testq %rdx,%rdx - jnz .Lhtable_polyval_start - .byte 0xf3,0xc3 - -.Lhtable_polyval_start: - vzeroall - - - - movq %rdx,%r11 - andq $127,%r11 - - jz .Lhtable_polyval_no_prefix - - vpxor %xmm9,%xmm9,%xmm9 - vmovdqa (%rcx),%xmm1 - subq %r11,%rdx - - subq $16,%r11 - - - vmovdqu (%rsi),%xmm0 - vpxor %xmm1,%xmm0,%xmm0 - - vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm5 - vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm3 - vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm4 - vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - leaq 16(%rsi),%rsi - testq %r11,%r11 - jnz .Lhtable_polyval_prefix_loop - jmp .Lhtable_polyval_prefix_complete - - -.align 64 -.Lhtable_polyval_prefix_loop: - subq $16,%r11 - - vmovdqu (%rsi),%xmm0 - - vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - testq %r11,%r11 - - leaq 16(%rsi),%rsi - - jnz .Lhtable_polyval_prefix_loop - -.Lhtable_polyval_prefix_complete: - vpsrldq $8,%xmm5,%xmm6 - vpslldq $8,%xmm5,%xmm5 - - vpxor %xmm6,%xmm4,%xmm9 - vpxor %xmm5,%xmm3,%xmm1 - - jmp .Lhtable_polyval_main_loop - -.Lhtable_polyval_no_prefix: - - - - - vpxor %xmm1,%xmm1,%xmm1 - vmovdqa (%rcx),%xmm9 - -.align 64 -.Lhtable_polyval_main_loop: - subq $0x80,%rdx - jb .Lhtable_polyval_out - - vmovdqu 112(%rsi),%xmm0 - - vpclmulqdq $0x01,(%rdi),%xmm0,%xmm5 - vpclmulqdq $0x00,(%rdi),%xmm0,%xmm3 - vpclmulqdq $0x11,(%rdi),%xmm0,%xmm4 - vpclmulqdq $0x10,(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vmovdqu 96(%rsi),%xmm0 - vpclmulqdq $0x01,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - - vmovdqu 80(%rsi),%xmm0 - - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 - vpalignr $8,%xmm1,%xmm1,%xmm1 - - vpclmulqdq $0x01,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpxor %xmm7,%xmm1,%xmm1 - - vmovdqu 64(%rsi),%xmm0 - - vpclmulqdq $0x01,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vmovdqu 48(%rsi),%xmm0 - - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 - vpalignr $8,%xmm1,%xmm1,%xmm1 - - vpclmulqdq $0x01,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpxor %xmm7,%xmm1,%xmm1 - - vmovdqu 32(%rsi),%xmm0 - - vpclmulqdq $0x01,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpxor %xmm9,%xmm1,%xmm1 - - vmovdqu 16(%rsi),%xmm0 - - vpclmulqdq $0x01,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vmovdqu 0(%rsi),%xmm0 - vpxor %xmm1,%xmm0,%xmm0 - - vpclmulqdq $0x01,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpsrldq $8,%xmm5,%xmm6 - vpslldq $8,%xmm5,%xmm5 - - vpxor %xmm6,%xmm4,%xmm9 - vpxor %xmm5,%xmm3,%xmm1 - - leaq 128(%rsi),%rsi - jmp .Lhtable_polyval_main_loop - - - -.Lhtable_polyval_out: - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 - vpalignr $8,%xmm1,%xmm1,%xmm1 - vpxor %xmm6,%xmm1,%xmm1 - - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 - vpalignr $8,%xmm1,%xmm1,%xmm1 - vpxor %xmm6,%xmm1,%xmm1 - vpxor %xmm9,%xmm1,%xmm1 - - vmovdqu %xmm1,(%rcx) - vzeroupper - .byte 0xf3,0xc3 -.cfi_endproc -.size aesgcmsiv_htable_polyval,.-aesgcmsiv_htable_polyval -.globl aesgcmsiv_polyval_horner -.hidden aesgcmsiv_polyval_horner -.type aesgcmsiv_polyval_horner,@function -.align 16 -aesgcmsiv_polyval_horner: -.cfi_startproc - testq %rcx,%rcx - jnz .Lpolyval_horner_start - .byte 0xf3,0xc3 - -.Lpolyval_horner_start: - - - - xorq %r10,%r10 - shlq $4,%rcx - - vmovdqa (%rsi),%xmm1 - vmovdqa (%rdi),%xmm0 - -.Lpolyval_horner_loop: - vpxor (%rdx,%r10,1),%xmm0,%xmm0 - call GFMUL - - addq $16,%r10 - cmpq %r10,%rcx - jne .Lpolyval_horner_loop - - - vmovdqa %xmm0,(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aesgcmsiv_polyval_horner,.-aesgcmsiv_polyval_horner -.globl aes128gcmsiv_aes_ks -.hidden aes128gcmsiv_aes_ks -.type aes128gcmsiv_aes_ks,@function -.align 16 -aes128gcmsiv_aes_ks: -.cfi_startproc - vmovdqu (%rdi),%xmm1 - vmovdqa %xmm1,(%rsi) - - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - - movq $8,%rax - -.Lks128_loop: - addq $16,%rsi - subq $1,%rax - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,(%rsi) - jne .Lks128_loop - - vmovdqa con2(%rip),%xmm0 - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,16(%rsi) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslldq $4,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,32(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_aes_ks,.-aes128gcmsiv_aes_ks -.globl aes256gcmsiv_aes_ks -.hidden aes256gcmsiv_aes_ks -.type aes256gcmsiv_aes_ks,@function -.align 16 -aes256gcmsiv_aes_ks: -.cfi_startproc - vmovdqu (%rdi),%xmm1 - vmovdqu 16(%rdi),%xmm3 - vmovdqa %xmm1,(%rsi) - vmovdqa %xmm3,16(%rsi) - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - vpxor %xmm14,%xmm14,%xmm14 - movq $6,%rax - -.Lks256_loop: - addq $32,%rsi - subq $1,%rax - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,(%rsi) - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpsllq $32,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpshufb con3(%rip),%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vmovdqa %xmm3,16(%rsi) - jne .Lks256_loop - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpsllq $32,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,32(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.globl aes128gcmsiv_aes_ks_enc_x1 -.hidden aes128gcmsiv_aes_ks_enc_x1 -.type aes128gcmsiv_aes_ks_enc_x1,@function -.align 16 -aes128gcmsiv_aes_ks_enc_x1: -.cfi_startproc - vmovdqa (%rcx),%xmm1 - vmovdqa 0(%rdi),%xmm4 - - vmovdqa %xmm1,(%rdx) - vpxor %xmm1,%xmm4,%xmm4 - - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,16(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,32(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,48(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,64(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,80(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,96(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,112(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,128(%rdx) - - - vmovdqa con2(%rip),%xmm0 - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,144(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenclast %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,160(%rdx) - - - vmovdqa %xmm4,0(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_aes_ks_enc_x1,.-aes128gcmsiv_aes_ks_enc_x1 -.globl aes128gcmsiv_kdf -.hidden aes128gcmsiv_kdf -.type aes128gcmsiv_kdf,@function -.align 16 -aes128gcmsiv_kdf: -.cfi_startproc - - - - - vmovdqa (%rdx),%xmm1 - vmovdqa 0(%rdi),%xmm9 - vmovdqa and_mask(%rip),%xmm12 - vmovdqa one(%rip),%xmm13 - vpshufd $0x90,%xmm9,%xmm9 - vpand %xmm12,%xmm9,%xmm9 - vpaddd %xmm13,%xmm9,%xmm10 - vpaddd %xmm13,%xmm10,%xmm11 - vpaddd %xmm13,%xmm11,%xmm12 - - vpxor %xmm1,%xmm9,%xmm9 - vpxor %xmm1,%xmm10,%xmm10 - vpxor %xmm1,%xmm11,%xmm11 - vpxor %xmm1,%xmm12,%xmm12 - - vmovdqa 16(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 32(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 48(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 64(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 80(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 96(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 112(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 128(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 144(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 160(%rdx),%xmm2 - vaesenclast %xmm2,%xmm9,%xmm9 - vaesenclast %xmm2,%xmm10,%xmm10 - vaesenclast %xmm2,%xmm11,%xmm11 - vaesenclast %xmm2,%xmm12,%xmm12 - - - vmovdqa %xmm9,0(%rsi) - vmovdqa %xmm10,16(%rsi) - vmovdqa %xmm11,32(%rsi) - vmovdqa %xmm12,48(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_kdf,.-aes128gcmsiv_kdf -.globl aes128gcmsiv_enc_msg_x4 -.hidden aes128gcmsiv_enc_msg_x4 -.type aes128gcmsiv_enc_msg_x4,@function -.align 16 -aes128gcmsiv_enc_msg_x4: -.cfi_startproc - testq %r8,%r8 - jnz .L128_enc_msg_x4_start - .byte 0xf3,0xc3 - -.L128_enc_msg_x4_start: - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-24 - - shrq $4,%r8 - movq %r8,%r10 - shlq $62,%r10 - shrq $62,%r10 - - - vmovdqa (%rdx),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - - vmovdqu four(%rip),%xmm4 - vmovdqa %xmm15,%xmm0 - vpaddd one(%rip),%xmm15,%xmm1 - vpaddd two(%rip),%xmm15,%xmm2 - vpaddd three(%rip),%xmm15,%xmm3 - - shrq $2,%r8 - je .L128_enc_msg_x4_check_remainder - - subq $64,%rsi - subq $64,%rdi - -.L128_enc_msg_x4_loop1: - addq $64,%rsi - addq $64,%rdi - - vmovdqa %xmm0,%xmm5 - vmovdqa %xmm1,%xmm6 - vmovdqa %xmm2,%xmm7 - vmovdqa %xmm3,%xmm8 - - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm0,%xmm0 - vmovdqu 32(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm1,%xmm1 - vmovdqu 48(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm2,%xmm2 - vmovdqu 64(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm3,%xmm3 - - vmovdqu 80(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 96(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 112(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 128(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 144(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm12 - vaesenclast %xmm12,%xmm5,%xmm5 - vaesenclast %xmm12,%xmm6,%xmm6 - vaesenclast %xmm12,%xmm7,%xmm7 - vaesenclast %xmm12,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm5,%xmm5 - vpxor 16(%rdi),%xmm6,%xmm6 - vpxor 32(%rdi),%xmm7,%xmm7 - vpxor 48(%rdi),%xmm8,%xmm8 - - subq $1,%r8 - - vmovdqu %xmm5,0(%rsi) - vmovdqu %xmm6,16(%rsi) - vmovdqu %xmm7,32(%rsi) - vmovdqu %xmm8,48(%rsi) - - jne .L128_enc_msg_x4_loop1 - - addq $64,%rsi - addq $64,%rdi - -.L128_enc_msg_x4_check_remainder: - cmpq $0,%r10 - je .L128_enc_msg_x4_out - -.L128_enc_msg_x4_loop2: - - - vmovdqa %xmm0,%xmm5 - vpaddd one(%rip),%xmm0,%xmm0 - - vpxor (%rcx),%xmm5,%xmm5 - vaesenc 16(%rcx),%xmm5,%xmm5 - vaesenc 32(%rcx),%xmm5,%xmm5 - vaesenc 48(%rcx),%xmm5,%xmm5 - vaesenc 64(%rcx),%xmm5,%xmm5 - vaesenc 80(%rcx),%xmm5,%xmm5 - vaesenc 96(%rcx),%xmm5,%xmm5 - vaesenc 112(%rcx),%xmm5,%xmm5 - vaesenc 128(%rcx),%xmm5,%xmm5 - vaesenc 144(%rcx),%xmm5,%xmm5 - vaesenclast 160(%rcx),%xmm5,%xmm5 - - - vpxor (%rdi),%xmm5,%xmm5 - vmovdqu %xmm5,(%rsi) - - addq $16,%rdi - addq $16,%rsi - - subq $1,%r10 - jne .L128_enc_msg_x4_loop2 - -.L128_enc_msg_x4_out: - popq %r13 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r13 - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r12 - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_enc_msg_x4,.-aes128gcmsiv_enc_msg_x4 -.globl aes128gcmsiv_enc_msg_x8 -.hidden aes128gcmsiv_enc_msg_x8 -.type aes128gcmsiv_enc_msg_x8,@function -.align 16 -aes128gcmsiv_enc_msg_x8: -.cfi_startproc - testq %r8,%r8 - jnz .L128_enc_msg_x8_start - .byte 0xf3,0xc3 - -.L128_enc_msg_x8_start: - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-24 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-32 - movq %rsp,%rbp -.cfi_def_cfa_register rbp - - - subq $128,%rsp - andq $-64,%rsp - - shrq $4,%r8 - movq %r8,%r10 - shlq $61,%r10 - shrq $61,%r10 - - - vmovdqu (%rdx),%xmm1 - vpor OR_MASK(%rip),%xmm1,%xmm1 - - - vpaddd seven(%rip),%xmm1,%xmm0 - vmovdqu %xmm0,(%rsp) - vpaddd one(%rip),%xmm1,%xmm9 - vpaddd two(%rip),%xmm1,%xmm10 - vpaddd three(%rip),%xmm1,%xmm11 - vpaddd four(%rip),%xmm1,%xmm12 - vpaddd five(%rip),%xmm1,%xmm13 - vpaddd six(%rip),%xmm1,%xmm14 - vmovdqa %xmm1,%xmm0 - - shrq $3,%r8 - je .L128_enc_msg_x8_check_remainder - - subq $128,%rsi - subq $128,%rdi - -.L128_enc_msg_x8_loop1: - addq $128,%rsi - addq $128,%rdi - - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm9,%xmm2 - vmovdqa %xmm10,%xmm3 - vmovdqa %xmm11,%xmm4 - vmovdqa %xmm12,%xmm5 - vmovdqa %xmm13,%xmm6 - vmovdqa %xmm14,%xmm7 - - vmovdqu (%rsp),%xmm8 - - vpxor (%rcx),%xmm1,%xmm1 - vpxor (%rcx),%xmm2,%xmm2 - vpxor (%rcx),%xmm3,%xmm3 - vpxor (%rcx),%xmm4,%xmm4 - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu (%rsp),%xmm14 - vpaddd eight(%rip),%xmm14,%xmm14 - vmovdqu %xmm14,(%rsp) - vmovdqu 32(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpsubd one(%rip),%xmm14,%xmm14 - vmovdqu 48(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm0,%xmm0 - vmovdqu 64(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm9,%xmm9 - vmovdqu 80(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm10,%xmm10 - vmovdqu 96(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm11,%xmm11 - vmovdqu 112(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm12,%xmm12 - vmovdqu 128(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm13,%xmm13 - vmovdqu 144(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm15 - vaesenclast %xmm15,%xmm1,%xmm1 - vaesenclast %xmm15,%xmm2,%xmm2 - vaesenclast %xmm15,%xmm3,%xmm3 - vaesenclast %xmm15,%xmm4,%xmm4 - vaesenclast %xmm15,%xmm5,%xmm5 - vaesenclast %xmm15,%xmm6,%xmm6 - vaesenclast %xmm15,%xmm7,%xmm7 - vaesenclast %xmm15,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm1,%xmm1 - vpxor 16(%rdi),%xmm2,%xmm2 - vpxor 32(%rdi),%xmm3,%xmm3 - vpxor 48(%rdi),%xmm4,%xmm4 - vpxor 64(%rdi),%xmm5,%xmm5 - vpxor 80(%rdi),%xmm6,%xmm6 - vpxor 96(%rdi),%xmm7,%xmm7 - vpxor 112(%rdi),%xmm8,%xmm8 - - decq %r8 - - vmovdqu %xmm1,0(%rsi) - vmovdqu %xmm2,16(%rsi) - vmovdqu %xmm3,32(%rsi) - vmovdqu %xmm4,48(%rsi) - vmovdqu %xmm5,64(%rsi) - vmovdqu %xmm6,80(%rsi) - vmovdqu %xmm7,96(%rsi) - vmovdqu %xmm8,112(%rsi) - - jne .L128_enc_msg_x8_loop1 - - addq $128,%rsi - addq $128,%rdi - -.L128_enc_msg_x8_check_remainder: - cmpq $0,%r10 - je .L128_enc_msg_x8_out - -.L128_enc_msg_x8_loop2: - - - vmovdqa %xmm0,%xmm1 - vpaddd one(%rip),%xmm0,%xmm0 - - vpxor (%rcx),%xmm1,%xmm1 - vaesenc 16(%rcx),%xmm1,%xmm1 - vaesenc 32(%rcx),%xmm1,%xmm1 - vaesenc 48(%rcx),%xmm1,%xmm1 - vaesenc 64(%rcx),%xmm1,%xmm1 - vaesenc 80(%rcx),%xmm1,%xmm1 - vaesenc 96(%rcx),%xmm1,%xmm1 - vaesenc 112(%rcx),%xmm1,%xmm1 - vaesenc 128(%rcx),%xmm1,%xmm1 - vaesenc 144(%rcx),%xmm1,%xmm1 - vaesenclast 160(%rcx),%xmm1,%xmm1 - - - vpxor (%rdi),%xmm1,%xmm1 - - vmovdqu %xmm1,(%rsi) - - addq $16,%rdi - addq $16,%rsi - - decq %r10 - jne .L128_enc_msg_x8_loop2 - -.L128_enc_msg_x8_out: - movq %rbp,%rsp -.cfi_def_cfa_register %rsp - popq %rbp -.cfi_adjust_cfa_offset -8 -.cfi_restore %rbp - popq %r13 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r13 - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r12 - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_enc_msg_x8,.-aes128gcmsiv_enc_msg_x8 -.globl aes128gcmsiv_dec -.hidden aes128gcmsiv_dec -.type aes128gcmsiv_dec,@function -.align 16 -aes128gcmsiv_dec: -.cfi_startproc - testq $~15,%r9 - jnz .L128_dec_start - .byte 0xf3,0xc3 - -.L128_dec_start: - vzeroupper - vmovdqa (%rdx),%xmm0 - movq %rdx,%rax - - leaq 32(%rax),%rax - leaq 32(%rcx),%rcx - - - vmovdqu (%rdi,%r9,1),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - andq $~15,%r9 - - - cmpq $96,%r9 - jb .L128_dec_loop2 - - - subq $96,%r9 - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vpxor (%r8),%xmm7,%xmm7 - vpxor (%r8),%xmm8,%xmm8 - vpxor (%r8),%xmm9,%xmm9 - vpxor (%r8),%xmm10,%xmm10 - vpxor (%r8),%xmm11,%xmm11 - vpxor (%r8),%xmm12,%xmm12 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vaesenclast %xmm4,%xmm8,%xmm8 - vaesenclast %xmm4,%xmm9,%xmm9 - vaesenclast %xmm4,%xmm10,%xmm10 - vaesenclast %xmm4,%xmm11,%xmm11 - vaesenclast %xmm4,%xmm12,%xmm12 - - - vpxor 0(%rdi),%xmm7,%xmm7 - vpxor 16(%rdi),%xmm8,%xmm8 - vpxor 32(%rdi),%xmm9,%xmm9 - vpxor 48(%rdi),%xmm10,%xmm10 - vpxor 64(%rdi),%xmm11,%xmm11 - vpxor 80(%rdi),%xmm12,%xmm12 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - addq $96,%rdi - addq $96,%rsi - jmp .L128_dec_loop1 - - -.align 64 -.L128_dec_loop1: - cmpq $96,%r9 - jb .L128_dec_finish_96 - subq $96,%r9 - - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vmovdqa (%r8),%xmm4 - vpxor %xmm4,%xmm7,%xmm7 - vpxor %xmm4,%xmm8,%xmm8 - vpxor %xmm4,%xmm9,%xmm9 - vpxor %xmm4,%xmm10,%xmm10 - vpxor %xmm4,%xmm11,%xmm11 - vpxor %xmm4,%xmm12,%xmm12 - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vmovdqa 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm6 - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor 0(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vpxor 16(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm8,%xmm8 - vpxor 32(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm9,%xmm9 - vpxor 48(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm10,%xmm10 - vpxor 64(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm11,%xmm11 - vpxor 80(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm12,%xmm12 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - vpxor %xmm5,%xmm0,%xmm0 - - leaq 96(%rdi),%rdi - leaq 96(%rsi),%rsi - jmp .L128_dec_loop1 - -.L128_dec_finish_96: - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor %xmm5,%xmm0,%xmm0 - -.L128_dec_loop2: - - - - cmpq $16,%r9 - jb .L128_dec_out - subq $16,%r9 - - vmovdqa %xmm15,%xmm2 - vpaddd one(%rip),%xmm15,%xmm15 - - vpxor 0(%r8),%xmm2,%xmm2 - vaesenc 16(%r8),%xmm2,%xmm2 - vaesenc 32(%r8),%xmm2,%xmm2 - vaesenc 48(%r8),%xmm2,%xmm2 - vaesenc 64(%r8),%xmm2,%xmm2 - vaesenc 80(%r8),%xmm2,%xmm2 - vaesenc 96(%r8),%xmm2,%xmm2 - vaesenc 112(%r8),%xmm2,%xmm2 - vaesenc 128(%r8),%xmm2,%xmm2 - vaesenc 144(%r8),%xmm2,%xmm2 - vaesenclast 160(%r8),%xmm2,%xmm2 - vpxor (%rdi),%xmm2,%xmm2 - vmovdqu %xmm2,(%rsi) - addq $16,%rdi - addq $16,%rsi - - vpxor %xmm2,%xmm0,%xmm0 - vmovdqa -32(%rcx),%xmm1 - call GFMUL - - jmp .L128_dec_loop2 - -.L128_dec_out: - vmovdqu %xmm0,(%rdx) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_dec, .-aes128gcmsiv_dec -.globl aes128gcmsiv_ecb_enc_block -.hidden aes128gcmsiv_ecb_enc_block -.type aes128gcmsiv_ecb_enc_block,@function -.align 16 -aes128gcmsiv_ecb_enc_block: -.cfi_startproc - vmovdqa (%rdi),%xmm1 - - vpxor (%rdx),%xmm1,%xmm1 - vaesenc 16(%rdx),%xmm1,%xmm1 - vaesenc 32(%rdx),%xmm1,%xmm1 - vaesenc 48(%rdx),%xmm1,%xmm1 - vaesenc 64(%rdx),%xmm1,%xmm1 - vaesenc 80(%rdx),%xmm1,%xmm1 - vaesenc 96(%rdx),%xmm1,%xmm1 - vaesenc 112(%rdx),%xmm1,%xmm1 - vaesenc 128(%rdx),%xmm1,%xmm1 - vaesenc 144(%rdx),%xmm1,%xmm1 - vaesenclast 160(%rdx),%xmm1,%xmm1 - - vmovdqa %xmm1,(%rsi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size aes128gcmsiv_ecb_enc_block,.-aes128gcmsiv_ecb_enc_block -.globl aes256gcmsiv_aes_ks_enc_x1 -.hidden aes256gcmsiv_aes_ks_enc_x1 -.type aes256gcmsiv_aes_ks_enc_x1,@function -.align 16 -aes256gcmsiv_aes_ks_enc_x1: -.cfi_startproc - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - vmovdqa (%rdi),%xmm8 - vmovdqa (%rcx),%xmm1 - vmovdqa 16(%rcx),%xmm3 - vpxor %xmm1,%xmm8,%xmm8 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm1,(%rdx) - vmovdqu %xmm3,16(%rdx) - vpxor %xmm14,%xmm14,%xmm14 - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,32(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,48(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,64(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,80(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,96(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,112(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,128(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,144(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,160(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,176(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,192(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,208(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenclast %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,224(%rdx) - - vmovdqa %xmm8,(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes256gcmsiv_aes_ks_enc_x1,.-aes256gcmsiv_aes_ks_enc_x1 -.globl aes256gcmsiv_ecb_enc_block -.hidden aes256gcmsiv_ecb_enc_block -.type aes256gcmsiv_ecb_enc_block,@function -.align 16 -aes256gcmsiv_ecb_enc_block: -.cfi_startproc - vmovdqa (%rdi),%xmm1 - vpxor (%rdx),%xmm1,%xmm1 - vaesenc 16(%rdx),%xmm1,%xmm1 - vaesenc 32(%rdx),%xmm1,%xmm1 - vaesenc 48(%rdx),%xmm1,%xmm1 - vaesenc 64(%rdx),%xmm1,%xmm1 - vaesenc 80(%rdx),%xmm1,%xmm1 - vaesenc 96(%rdx),%xmm1,%xmm1 - vaesenc 112(%rdx),%xmm1,%xmm1 - vaesenc 128(%rdx),%xmm1,%xmm1 - vaesenc 144(%rdx),%xmm1,%xmm1 - vaesenc 160(%rdx),%xmm1,%xmm1 - vaesenc 176(%rdx),%xmm1,%xmm1 - vaesenc 192(%rdx),%xmm1,%xmm1 - vaesenc 208(%rdx),%xmm1,%xmm1 - vaesenclast 224(%rdx),%xmm1,%xmm1 - vmovdqa %xmm1,(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes256gcmsiv_ecb_enc_block,.-aes256gcmsiv_ecb_enc_block -.globl aes256gcmsiv_enc_msg_x4 -.hidden aes256gcmsiv_enc_msg_x4 -.type aes256gcmsiv_enc_msg_x4,@function -.align 16 -aes256gcmsiv_enc_msg_x4: -.cfi_startproc - testq %r8,%r8 - jnz .L256_enc_msg_x4_start - .byte 0xf3,0xc3 - -.L256_enc_msg_x4_start: - movq %r8,%r10 - shrq $4,%r8 - shlq $60,%r10 - jz .L256_enc_msg_x4_start2 - addq $1,%r8 - -.L256_enc_msg_x4_start2: - movq %r8,%r10 - shlq $62,%r10 - shrq $62,%r10 - - - vmovdqa (%rdx),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - - vmovdqa four(%rip),%xmm4 - vmovdqa %xmm15,%xmm0 - vpaddd one(%rip),%xmm15,%xmm1 - vpaddd two(%rip),%xmm15,%xmm2 - vpaddd three(%rip),%xmm15,%xmm3 - - shrq $2,%r8 - je .L256_enc_msg_x4_check_remainder - - subq $64,%rsi - subq $64,%rdi - -.L256_enc_msg_x4_loop1: - addq $64,%rsi - addq $64,%rdi - - vmovdqa %xmm0,%xmm5 - vmovdqa %xmm1,%xmm6 - vmovdqa %xmm2,%xmm7 - vmovdqa %xmm3,%xmm8 - - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm0,%xmm0 - vmovdqu 32(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm1,%xmm1 - vmovdqu 48(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm2,%xmm2 - vmovdqu 64(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm3,%xmm3 - - vmovdqu 80(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 96(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 112(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 128(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 144(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 176(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 192(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 208(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 224(%rcx),%xmm12 - vaesenclast %xmm12,%xmm5,%xmm5 - vaesenclast %xmm12,%xmm6,%xmm6 - vaesenclast %xmm12,%xmm7,%xmm7 - vaesenclast %xmm12,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm5,%xmm5 - vpxor 16(%rdi),%xmm6,%xmm6 - vpxor 32(%rdi),%xmm7,%xmm7 - vpxor 48(%rdi),%xmm8,%xmm8 - - subq $1,%r8 - - vmovdqu %xmm5,0(%rsi) - vmovdqu %xmm6,16(%rsi) - vmovdqu %xmm7,32(%rsi) - vmovdqu %xmm8,48(%rsi) - - jne .L256_enc_msg_x4_loop1 - - addq $64,%rsi - addq $64,%rdi - -.L256_enc_msg_x4_check_remainder: - cmpq $0,%r10 - je .L256_enc_msg_x4_out - -.L256_enc_msg_x4_loop2: - - - - vmovdqa %xmm0,%xmm5 - vpaddd one(%rip),%xmm0,%xmm0 - vpxor (%rcx),%xmm5,%xmm5 - vaesenc 16(%rcx),%xmm5,%xmm5 - vaesenc 32(%rcx),%xmm5,%xmm5 - vaesenc 48(%rcx),%xmm5,%xmm5 - vaesenc 64(%rcx),%xmm5,%xmm5 - vaesenc 80(%rcx),%xmm5,%xmm5 - vaesenc 96(%rcx),%xmm5,%xmm5 - vaesenc 112(%rcx),%xmm5,%xmm5 - vaesenc 128(%rcx),%xmm5,%xmm5 - vaesenc 144(%rcx),%xmm5,%xmm5 - vaesenc 160(%rcx),%xmm5,%xmm5 - vaesenc 176(%rcx),%xmm5,%xmm5 - vaesenc 192(%rcx),%xmm5,%xmm5 - vaesenc 208(%rcx),%xmm5,%xmm5 - vaesenclast 224(%rcx),%xmm5,%xmm5 - - - vpxor (%rdi),%xmm5,%xmm5 - - vmovdqu %xmm5,(%rsi) - - addq $16,%rdi - addq $16,%rsi - - subq $1,%r10 - jne .L256_enc_msg_x4_loop2 - -.L256_enc_msg_x4_out: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes256gcmsiv_enc_msg_x4,.-aes256gcmsiv_enc_msg_x4 -.globl aes256gcmsiv_enc_msg_x8 -.hidden aes256gcmsiv_enc_msg_x8 -.type aes256gcmsiv_enc_msg_x8,@function -.align 16 -aes256gcmsiv_enc_msg_x8: -.cfi_startproc - testq %r8,%r8 - jnz .L256_enc_msg_x8_start - .byte 0xf3,0xc3 - -.L256_enc_msg_x8_start: - - movq %rsp,%r11 - subq $16,%r11 - andq $-64,%r11 - - movq %r8,%r10 - shrq $4,%r8 - shlq $60,%r10 - jz .L256_enc_msg_x8_start2 - addq $1,%r8 - -.L256_enc_msg_x8_start2: - movq %r8,%r10 - shlq $61,%r10 - shrq $61,%r10 - - - vmovdqa (%rdx),%xmm1 - vpor OR_MASK(%rip),%xmm1,%xmm1 - - - vpaddd seven(%rip),%xmm1,%xmm0 - vmovdqa %xmm0,(%r11) - vpaddd one(%rip),%xmm1,%xmm9 - vpaddd two(%rip),%xmm1,%xmm10 - vpaddd three(%rip),%xmm1,%xmm11 - vpaddd four(%rip),%xmm1,%xmm12 - vpaddd five(%rip),%xmm1,%xmm13 - vpaddd six(%rip),%xmm1,%xmm14 - vmovdqa %xmm1,%xmm0 - - shrq $3,%r8 - jz .L256_enc_msg_x8_check_remainder - - subq $128,%rsi - subq $128,%rdi - -.L256_enc_msg_x8_loop1: - addq $128,%rsi - addq $128,%rdi - - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm9,%xmm2 - vmovdqa %xmm10,%xmm3 - vmovdqa %xmm11,%xmm4 - vmovdqa %xmm12,%xmm5 - vmovdqa %xmm13,%xmm6 - vmovdqa %xmm14,%xmm7 - - vmovdqa (%r11),%xmm8 - - vpxor (%rcx),%xmm1,%xmm1 - vpxor (%rcx),%xmm2,%xmm2 - vpxor (%rcx),%xmm3,%xmm3 - vpxor (%rcx),%xmm4,%xmm4 - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqa (%r11),%xmm14 - vpaddd eight(%rip),%xmm14,%xmm14 - vmovdqa %xmm14,(%r11) - vmovdqu 32(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpsubd one(%rip),%xmm14,%xmm14 - vmovdqu 48(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm0,%xmm0 - vmovdqu 64(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm9,%xmm9 - vmovdqu 80(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm10,%xmm10 - vmovdqu 96(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm11,%xmm11 - vmovdqu 112(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm12,%xmm12 - vmovdqu 128(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm13,%xmm13 - vmovdqu 144(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 176(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 192(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 208(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 224(%rcx),%xmm15 - vaesenclast %xmm15,%xmm1,%xmm1 - vaesenclast %xmm15,%xmm2,%xmm2 - vaesenclast %xmm15,%xmm3,%xmm3 - vaesenclast %xmm15,%xmm4,%xmm4 - vaesenclast %xmm15,%xmm5,%xmm5 - vaesenclast %xmm15,%xmm6,%xmm6 - vaesenclast %xmm15,%xmm7,%xmm7 - vaesenclast %xmm15,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm1,%xmm1 - vpxor 16(%rdi),%xmm2,%xmm2 - vpxor 32(%rdi),%xmm3,%xmm3 - vpxor 48(%rdi),%xmm4,%xmm4 - vpxor 64(%rdi),%xmm5,%xmm5 - vpxor 80(%rdi),%xmm6,%xmm6 - vpxor 96(%rdi),%xmm7,%xmm7 - vpxor 112(%rdi),%xmm8,%xmm8 - - subq $1,%r8 - - vmovdqu %xmm1,0(%rsi) - vmovdqu %xmm2,16(%rsi) - vmovdqu %xmm3,32(%rsi) - vmovdqu %xmm4,48(%rsi) - vmovdqu %xmm5,64(%rsi) - vmovdqu %xmm6,80(%rsi) - vmovdqu %xmm7,96(%rsi) - vmovdqu %xmm8,112(%rsi) - - jne .L256_enc_msg_x8_loop1 - - addq $128,%rsi - addq $128,%rdi - -.L256_enc_msg_x8_check_remainder: - cmpq $0,%r10 - je .L256_enc_msg_x8_out - -.L256_enc_msg_x8_loop2: - - - vmovdqa %xmm0,%xmm1 - vpaddd one(%rip),%xmm0,%xmm0 - - vpxor (%rcx),%xmm1,%xmm1 - vaesenc 16(%rcx),%xmm1,%xmm1 - vaesenc 32(%rcx),%xmm1,%xmm1 - vaesenc 48(%rcx),%xmm1,%xmm1 - vaesenc 64(%rcx),%xmm1,%xmm1 - vaesenc 80(%rcx),%xmm1,%xmm1 - vaesenc 96(%rcx),%xmm1,%xmm1 - vaesenc 112(%rcx),%xmm1,%xmm1 - vaesenc 128(%rcx),%xmm1,%xmm1 - vaesenc 144(%rcx),%xmm1,%xmm1 - vaesenc 160(%rcx),%xmm1,%xmm1 - vaesenc 176(%rcx),%xmm1,%xmm1 - vaesenc 192(%rcx),%xmm1,%xmm1 - vaesenc 208(%rcx),%xmm1,%xmm1 - vaesenclast 224(%rcx),%xmm1,%xmm1 - - - vpxor (%rdi),%xmm1,%xmm1 - - vmovdqu %xmm1,(%rsi) - - addq $16,%rdi - addq $16,%rsi - subq $1,%r10 - jnz .L256_enc_msg_x8_loop2 - -.L256_enc_msg_x8_out: - .byte 0xf3,0xc3 - -.cfi_endproc -.size aes256gcmsiv_enc_msg_x8,.-aes256gcmsiv_enc_msg_x8 -.globl aes256gcmsiv_dec -.hidden aes256gcmsiv_dec -.type aes256gcmsiv_dec,@function -.align 16 -aes256gcmsiv_dec: -.cfi_startproc - testq $~15,%r9 - jnz .L256_dec_start - .byte 0xf3,0xc3 - -.L256_dec_start: - vzeroupper - vmovdqa (%rdx),%xmm0 - movq %rdx,%rax - - leaq 32(%rax),%rax - leaq 32(%rcx),%rcx - - - vmovdqu (%rdi,%r9,1),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - andq $~15,%r9 - - - cmpq $96,%r9 - jb .L256_dec_loop2 - - - subq $96,%r9 - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vpxor (%r8),%xmm7,%xmm7 - vpxor (%r8),%xmm8,%xmm8 - vpxor (%r8),%xmm9,%xmm9 - vpxor (%r8),%xmm10,%xmm10 - vpxor (%r8),%xmm11,%xmm11 - vpxor (%r8),%xmm12,%xmm12 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 176(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 192(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 208(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 224(%r8),%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vaesenclast %xmm4,%xmm8,%xmm8 - vaesenclast %xmm4,%xmm9,%xmm9 - vaesenclast %xmm4,%xmm10,%xmm10 - vaesenclast %xmm4,%xmm11,%xmm11 - vaesenclast %xmm4,%xmm12,%xmm12 - - - vpxor 0(%rdi),%xmm7,%xmm7 - vpxor 16(%rdi),%xmm8,%xmm8 - vpxor 32(%rdi),%xmm9,%xmm9 - vpxor 48(%rdi),%xmm10,%xmm10 - vpxor 64(%rdi),%xmm11,%xmm11 - vpxor 80(%rdi),%xmm12,%xmm12 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - addq $96,%rdi - addq $96,%rsi - jmp .L256_dec_loop1 - - -.align 64 -.L256_dec_loop1: - cmpq $96,%r9 - jb .L256_dec_finish_96 - subq $96,%r9 - - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vmovdqa (%r8),%xmm4 - vpxor %xmm4,%xmm7,%xmm7 - vpxor %xmm4,%xmm8,%xmm8 - vpxor %xmm4,%xmm9,%xmm9 - vpxor %xmm4,%xmm10,%xmm10 - vpxor %xmm4,%xmm11,%xmm11 - vpxor %xmm4,%xmm12,%xmm12 - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vmovdqa 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 176(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 192(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 208(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 224(%r8),%xmm6 - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor 0(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vpxor 16(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm8,%xmm8 - vpxor 32(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm9,%xmm9 - vpxor 48(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm10,%xmm10 - vpxor 64(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm11,%xmm11 - vpxor 80(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm12,%xmm12 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - vpxor %xmm5,%xmm0,%xmm0 - - leaq 96(%rdi),%rdi - leaq 96(%rsi),%rsi - jmp .L256_dec_loop1 - -.L256_dec_finish_96: - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor %xmm5,%xmm0,%xmm0 - -.L256_dec_loop2: - - - - cmpq $16,%r9 - jb .L256_dec_out - subq $16,%r9 - - vmovdqa %xmm15,%xmm2 - vpaddd one(%rip),%xmm15,%xmm15 - - vpxor 0(%r8),%xmm2,%xmm2 - vaesenc 16(%r8),%xmm2,%xmm2 - vaesenc 32(%r8),%xmm2,%xmm2 - vaesenc 48(%r8),%xmm2,%xmm2 - vaesenc 64(%r8),%xmm2,%xmm2 - vaesenc 80(%r8),%xmm2,%xmm2 - vaesenc 96(%r8),%xmm2,%xmm2 - vaesenc 112(%r8),%xmm2,%xmm2 - vaesenc 128(%r8),%xmm2,%xmm2 - vaesenc 144(%r8),%xmm2,%xmm2 - vaesenc 160(%r8),%xmm2,%xmm2 - vaesenc 176(%r8),%xmm2,%xmm2 - vaesenc 192(%r8),%xmm2,%xmm2 - vaesenc 208(%r8),%xmm2,%xmm2 - vaesenclast 224(%r8),%xmm2,%xmm2 - vpxor (%rdi),%xmm2,%xmm2 - vmovdqu %xmm2,(%rsi) - addq $16,%rdi - addq $16,%rsi - - vpxor %xmm2,%xmm0,%xmm0 - vmovdqa -32(%rcx),%xmm1 - call GFMUL - - jmp .L256_dec_loop2 - -.L256_dec_out: - vmovdqu %xmm0,(%rdx) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes256gcmsiv_dec, .-aes256gcmsiv_dec -.globl aes256gcmsiv_kdf -.hidden aes256gcmsiv_kdf -.type aes256gcmsiv_kdf,@function -.align 16 -aes256gcmsiv_kdf: -.cfi_startproc - - - - - vmovdqa (%rdx),%xmm1 - vmovdqa 0(%rdi),%xmm4 - vmovdqa and_mask(%rip),%xmm11 - vmovdqa one(%rip),%xmm8 - vpshufd $0x90,%xmm4,%xmm4 - vpand %xmm11,%xmm4,%xmm4 - vpaddd %xmm8,%xmm4,%xmm6 - vpaddd %xmm8,%xmm6,%xmm7 - vpaddd %xmm8,%xmm7,%xmm11 - vpaddd %xmm8,%xmm11,%xmm12 - vpaddd %xmm8,%xmm12,%xmm13 - - vpxor %xmm1,%xmm4,%xmm4 - vpxor %xmm1,%xmm6,%xmm6 - vpxor %xmm1,%xmm7,%xmm7 - vpxor %xmm1,%xmm11,%xmm11 - vpxor %xmm1,%xmm12,%xmm12 - vpxor %xmm1,%xmm13,%xmm13 - - vmovdqa 16(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 32(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 48(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 64(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 80(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 96(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 112(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 128(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 144(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 160(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 176(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 192(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 208(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 224(%rdx),%xmm2 - vaesenclast %xmm2,%xmm4,%xmm4 - vaesenclast %xmm2,%xmm6,%xmm6 - vaesenclast %xmm2,%xmm7,%xmm7 - vaesenclast %xmm2,%xmm11,%xmm11 - vaesenclast %xmm2,%xmm12,%xmm12 - vaesenclast %xmm2,%xmm13,%xmm13 - - - vmovdqa %xmm4,0(%rsi) - vmovdqa %xmm6,16(%rsi) - vmovdqa %xmm7,32(%rsi) - vmovdqa %xmm11,48(%rsi) - vmovdqa %xmm12,64(%rsi) - vmovdqa %xmm13,80(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size aes256gcmsiv_kdf, .-aes256gcmsiv_kdf -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S deleted file mode 100644 index e313348808..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S +++ /dev/null @@ -1,8987 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - -chacha20_poly1305_constants: - -.align 64 -.chacha20_consts: -.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' -.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' -.rol8: -.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 -.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 -.rol16: -.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 -.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 -.avx2_init: -.long 0,0,0,0 -.sse_inc: -.long 1,0,0,0 -.avx2_inc: -.long 2,0,0,0,2,0,0,0 -.clamp: -.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC -.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF -.align 16 -.and_masks: -.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff - -.type poly_hash_ad_internal,@function -.align 64 -poly_hash_ad_internal: -.cfi_startproc - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r12,%r12 - cmpq $13,%r8 - jne hash_ad_loop -poly_fast_tls_ad: - - movq (%rcx),%r10 - movq 5(%rcx),%r11 - shrq $24,%r11 - movq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - .byte 0xf3,0xc3 -hash_ad_loop: - - cmpq $16,%r8 - jb hash_ad_tail - addq 0(%rcx),%r10 - adcq 8+0(%rcx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rcx),%rcx - subq $16,%r8 - jmp hash_ad_loop -hash_ad_tail: - cmpq $0,%r8 - je 1f - - xorq %r13,%r13 - xorq %r14,%r14 - xorq %r15,%r15 - addq %r8,%rcx -hash_ad_tail_loop: - shldq $8,%r13,%r14 - shlq $8,%r13 - movzbq -1(%rcx),%r15 - xorq %r15,%r13 - decq %rcx - decq %r8 - jne hash_ad_tail_loop - - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -1: - .byte 0xf3,0xc3 -.cfi_endproc -.size poly_hash_ad_internal, .-poly_hash_ad_internal - -.globl chacha20_poly1305_open -.hidden chacha20_poly1305_open -.type chacha20_poly1305_open,@function -.align 64 -chacha20_poly1305_open: -.cfi_startproc - pushq %rbp -.cfi_adjust_cfa_offset 8 - pushq %rbx -.cfi_adjust_cfa_offset 8 - pushq %r12 -.cfi_adjust_cfa_offset 8 - pushq %r13 -.cfi_adjust_cfa_offset 8 - pushq %r14 -.cfi_adjust_cfa_offset 8 - pushq %r15 -.cfi_adjust_cfa_offset 8 - - - pushq %r9 -.cfi_adjust_cfa_offset 8 - subq $288 + 32,%rsp -.cfi_adjust_cfa_offset 288 + 32 -.cfi_offset rbp, -16 -.cfi_offset rbx, -24 -.cfi_offset r12, -32 -.cfi_offset r13, -40 -.cfi_offset r14, -48 -.cfi_offset r15, -56 - leaq 32(%rsp),%rbp - andq $-32,%rbp - movq %rdx,8+32(%rbp) - movq %r8,0+32(%rbp) - movq %rdx,%rbx - - movl OPENSSL_ia32cap_P+8(%rip),%eax - andl $288,%eax - xorl $288,%eax - jz chacha20_poly1305_open_avx2 - -1: - cmpq $128,%rbx - jbe open_sse_128 - - movdqa .chacha20_consts(%rip),%xmm0 - movdqu 0(%r9),%xmm4 - movdqu 16(%r9),%xmm8 - movdqu 32(%r9),%xmm12 - movdqa %xmm12,%xmm7 - - movdqa %xmm4,48(%rbp) - movdqa %xmm8,64(%rbp) - movdqa %xmm12,96(%rbp) - movq $10,%r10 -1: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - decq %r10 - jne 1b - - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - - pand .clamp(%rip),%xmm0 - movdqa %xmm0,0(%rbp) - movdqa %xmm4,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal -open_sse_main_loop: - cmpq $256,%rbx - jb 2f - - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa 96(%rbp),%xmm15 - paddd .sse_inc(%rip),%xmm15 - movdqa %xmm15,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - - - - movq $4,%rcx - movq %rsi,%r8 -1: - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - - leaq 16(%r8),%r8 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - decq %rcx - jge 1b - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - cmpq $-6,%rcx - jg 1b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqa %xmm12,80(%rbp) - movdqu 0 + 0(%rsi),%xmm12 - pxor %xmm3,%xmm12 - movdqu %xmm12,0 + 0(%rdi) - movdqu 16 + 0(%rsi),%xmm12 - pxor %xmm7,%xmm12 - movdqu %xmm12,16 + 0(%rdi) - movdqu 32 + 0(%rsi),%xmm12 - pxor %xmm11,%xmm12 - movdqu %xmm12,32 + 0(%rdi) - movdqu 48 + 0(%rsi),%xmm12 - pxor %xmm15,%xmm12 - movdqu %xmm12,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 64(%rdi) - movdqu %xmm6,16 + 64(%rdi) - movdqu %xmm10,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 128(%rdi) - movdqu %xmm5,16 + 128(%rdi) - movdqu %xmm9,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - movdqu 0 + 192(%rsi),%xmm3 - movdqu 16 + 192(%rsi),%xmm7 - movdqu 32 + 192(%rsi),%xmm11 - movdqu 48 + 192(%rsi),%xmm15 - pxor %xmm3,%xmm0 - pxor %xmm7,%xmm4 - pxor %xmm11,%xmm8 - pxor 80(%rbp),%xmm15 - movdqu %xmm0,0 + 192(%rdi) - movdqu %xmm4,16 + 192(%rdi) - movdqu %xmm8,32 + 192(%rdi) - movdqu %xmm15,48 + 192(%rdi) - - leaq 256(%rsi),%rsi - leaq 256(%rdi),%rdi - subq $256,%rbx - jmp open_sse_main_loop -2: - - testq %rbx,%rbx - jz open_sse_finalize - cmpq $64,%rbx - ja 3f - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa 96(%rbp),%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - - xorq %r8,%r8 - movq %rbx,%rcx - cmpq $16,%rcx - jb 2f -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - subq $16,%rcx -2: - addq $16,%r8 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - cmpq $16,%rcx - jae 1b - cmpq $160,%r8 - jne 2b - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - jmp open_sse_tail_64_dec_loop -3: - cmpq $128,%rbx - ja 3f - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa 96(%rbp),%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - - movq %rbx,%rcx - andq $-16,%rcx - xorq %r8,%r8 -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -2: - addq $16,%r8 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - - cmpq %rcx,%r8 - jb 1b - cmpq $160,%r8 - jne 2b - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 0(%rdi) - movdqu %xmm5,16 + 0(%rdi) - movdqu %xmm9,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - - subq $64,%rbx - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - jmp open_sse_tail_64_dec_loop -3: - cmpq $192,%rbx - ja 3f - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa 96(%rbp),%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - - movq %rbx,%rcx - movq $160,%r8 - cmpq $160,%rcx - cmovgq %r8,%rcx - andq $-16,%rcx - xorq %r8,%r8 -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -2: - addq $16,%r8 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - cmpq %rcx,%r8 - jb 1b - cmpq $160,%r8 - jne 2b - cmpq $176,%rbx - jb 1f - addq 160(%rsi),%r10 - adcq 8+160(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - cmpq $192,%rbx - jb 1f - addq 176(%rsi),%r10 - adcq 8+176(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -1: - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 0(%rdi) - movdqu %xmm6,16 + 0(%rdi) - movdqu %xmm10,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 64(%rdi) - movdqu %xmm5,16 + 64(%rdi) - movdqu %xmm9,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - - subq $128,%rbx - leaq 128(%rsi),%rsi - leaq 128(%rdi),%rdi - jmp open_sse_tail_64_dec_loop -3: - - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa 96(%rbp),%xmm15 - paddd .sse_inc(%rip),%xmm15 - movdqa %xmm15,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - - xorq %r8,%r8 -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movdqa %xmm11,80(%rbp) - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm4 - pxor %xmm11,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm4 - pxor %xmm11,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm5 - pxor %xmm11,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm5 - pxor %xmm11,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm6 - pxor %xmm11,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm6 - pxor %xmm11,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - movdqa 80(%rbp),%xmm11 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movdqa %xmm9,80(%rbp) - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol16(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $12,%xmm9 - psrld $20,%xmm7 - pxor %xmm9,%xmm7 - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol8(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $7,%xmm9 - psrld $25,%xmm7 - pxor %xmm9,%xmm7 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 - movdqa 80(%rbp),%xmm9 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - movdqa %xmm11,80(%rbp) - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm4 - pxor %xmm11,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm4 - pxor %xmm11,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm5 - pxor %xmm11,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm5 - pxor %xmm11,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm6 - pxor %xmm11,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm6 - pxor %xmm11,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - movdqa 80(%rbp),%xmm11 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - movdqa %xmm9,80(%rbp) - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol16(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $12,%xmm9 - psrld $20,%xmm7 - pxor %xmm9,%xmm7 - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol8(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $7,%xmm9 - psrld $25,%xmm7 - pxor %xmm9,%xmm7 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 - movdqa 80(%rbp),%xmm9 - - addq $16,%r8 - cmpq $160,%r8 - jb 1b - movq %rbx,%rcx - andq $-16,%rcx -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - addq $16,%r8 - cmpq %rcx,%r8 - jb 1b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqa %xmm12,80(%rbp) - movdqu 0 + 0(%rsi),%xmm12 - pxor %xmm3,%xmm12 - movdqu %xmm12,0 + 0(%rdi) - movdqu 16 + 0(%rsi),%xmm12 - pxor %xmm7,%xmm12 - movdqu %xmm12,16 + 0(%rdi) - movdqu 32 + 0(%rsi),%xmm12 - pxor %xmm11,%xmm12 - movdqu %xmm12,32 + 0(%rdi) - movdqu 48 + 0(%rsi),%xmm12 - pxor %xmm15,%xmm12 - movdqu %xmm12,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 64(%rdi) - movdqu %xmm6,16 + 64(%rdi) - movdqu %xmm10,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 128(%rdi) - movdqu %xmm5,16 + 128(%rdi) - movdqu %xmm9,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - - movdqa 80(%rbp),%xmm12 - subq $192,%rbx - leaq 192(%rsi),%rsi - leaq 192(%rdi),%rdi - - -open_sse_tail_64_dec_loop: - cmpq $16,%rbx - jb 1f - subq $16,%rbx - movdqu (%rsi),%xmm3 - pxor %xmm3,%xmm0 - movdqu %xmm0,(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - movdqa %xmm4,%xmm0 - movdqa %xmm8,%xmm4 - movdqa %xmm12,%xmm8 - jmp open_sse_tail_64_dec_loop -1: - movdqa %xmm0,%xmm1 - - -open_sse_tail_16: - testq %rbx,%rbx - jz open_sse_finalize - - - - pxor %xmm3,%xmm3 - leaq -1(%rsi,%rbx), %rsi - movq %rbx,%r8 -2: - pslldq $1,%xmm3 - pinsrb $0,(%rsi),%xmm3 - subq $1,%rsi - subq $1,%r8 - jnz 2b - -3: -.byte 102,73,15,126,221 - pextrq $1,%xmm3,%r14 - - pxor %xmm1,%xmm3 - - -2: - pextrb $0,%xmm3,(%rdi) - psrldq $1,%xmm3 - addq $1,%rdi - subq $1,%rbx - jne 2b - - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -open_sse_finalize: - addq 32(%rbp),%r10 - adcq 8+32(%rbp),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movq %r10,%r13 - movq %r11,%r14 - movq %r12,%r15 - subq $-5,%r10 - sbbq $-1,%r11 - sbbq $3,%r12 - cmovcq %r13,%r10 - cmovcq %r14,%r11 - cmovcq %r15,%r12 - - addq 0+16(%rbp),%r10 - adcq 8+16(%rbp),%r11 - - addq $288 + 32,%rsp -.cfi_adjust_cfa_offset -(288 + 32) - popq %r9 -.cfi_adjust_cfa_offset -8 - movq %r10,(%r9) - movq %r11,8(%r9) - - popq %r15 -.cfi_adjust_cfa_offset -8 - popq %r14 -.cfi_adjust_cfa_offset -8 - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - popq %rbx -.cfi_adjust_cfa_offset -8 - popq %rbp -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_adjust_cfa_offset (8 * 6) + 288 + 32 - -open_sse_128: - movdqu .chacha20_consts(%rip),%xmm0 - movdqa %xmm0,%xmm1 - movdqa %xmm0,%xmm2 - movdqu 0(%r9),%xmm4 - movdqa %xmm4,%xmm5 - movdqa %xmm4,%xmm6 - movdqu 16(%r9),%xmm8 - movdqa %xmm8,%xmm9 - movdqa %xmm8,%xmm10 - movdqu 32(%r9),%xmm12 - movdqa %xmm12,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa %xmm13,%xmm15 - movq $10,%r10 -1: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - decq %r10 - jnz 1b - paddd .chacha20_consts(%rip),%xmm0 - paddd .chacha20_consts(%rip),%xmm1 - paddd .chacha20_consts(%rip),%xmm2 - paddd %xmm7,%xmm4 - paddd %xmm7,%xmm5 - paddd %xmm7,%xmm6 - paddd %xmm11,%xmm9 - paddd %xmm11,%xmm10 - paddd %xmm15,%xmm13 - paddd .sse_inc(%rip),%xmm15 - paddd %xmm15,%xmm14 - - pand .clamp(%rip),%xmm0 - movdqa %xmm0,0(%rbp) - movdqa %xmm4,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal -1: - cmpq $16,%rbx - jb open_sse_tail_16 - subq $16,%rbx - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - - - movdqu 0(%rsi),%xmm3 - pxor %xmm3,%xmm1 - movdqu %xmm1,0(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movdqa %xmm5,%xmm1 - movdqa %xmm9,%xmm5 - movdqa %xmm13,%xmm9 - movdqa %xmm2,%xmm13 - movdqa %xmm6,%xmm2 - movdqa %xmm10,%xmm6 - movdqa %xmm14,%xmm10 - jmp 1b - jmp open_sse_tail_16 -.size chacha20_poly1305_open, .-chacha20_poly1305_open -.cfi_endproc - - - - -.globl chacha20_poly1305_seal -.hidden chacha20_poly1305_seal -.type chacha20_poly1305_seal,@function -.align 64 -chacha20_poly1305_seal: -.cfi_startproc - pushq %rbp -.cfi_adjust_cfa_offset 8 - pushq %rbx -.cfi_adjust_cfa_offset 8 - pushq %r12 -.cfi_adjust_cfa_offset 8 - pushq %r13 -.cfi_adjust_cfa_offset 8 - pushq %r14 -.cfi_adjust_cfa_offset 8 - pushq %r15 -.cfi_adjust_cfa_offset 8 - - - pushq %r9 -.cfi_adjust_cfa_offset 8 - subq $288 + 32,%rsp -.cfi_adjust_cfa_offset 288 + 32 -.cfi_offset rbp, -16 -.cfi_offset rbx, -24 -.cfi_offset r12, -32 -.cfi_offset r13, -40 -.cfi_offset r14, -48 -.cfi_offset r15, -56 - leaq 32(%rsp),%rbp - andq $-32,%rbp - movq 56(%r9),%rbx - addq %rdx,%rbx - movq %rbx,8+32(%rbp) - movq %r8,0+32(%rbp) - movq %rdx,%rbx - - movl OPENSSL_ia32cap_P+8(%rip),%eax - andl $288,%eax - xorl $288,%eax - jz chacha20_poly1305_seal_avx2 - - cmpq $128,%rbx - jbe seal_sse_128 - - movdqa .chacha20_consts(%rip),%xmm0 - movdqu 0(%r9),%xmm4 - movdqu 16(%r9),%xmm8 - movdqu 32(%r9),%xmm12 - movdqa %xmm0,%xmm1 - movdqa %xmm0,%xmm2 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm5 - movdqa %xmm4,%xmm6 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm9 - movdqa %xmm8,%xmm10 - movdqa %xmm8,%xmm11 - movdqa %xmm12,%xmm15 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,%xmm14 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,%xmm13 - paddd .sse_inc(%rip),%xmm12 - - movdqa %xmm4,48(%rbp) - movdqa %xmm8,64(%rbp) - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - movq $10,%r10 -1: - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - decq %r10 - jnz 1b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - - pand .clamp(%rip),%xmm3 - movdqa %xmm3,0(%rbp) - movdqa %xmm7,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 0(%rdi) - movdqu %xmm6,16 + 0(%rdi) - movdqu %xmm10,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 64(%rdi) - movdqu %xmm5,16 + 64(%rdi) - movdqu %xmm9,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - - cmpq $192,%rbx - ja 1f - movq $128,%rcx - subq $128,%rbx - leaq 128(%rsi),%rsi - jmp seal_sse_128_seal_hash -1: - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm0 - pxor %xmm7,%xmm4 - pxor %xmm11,%xmm8 - pxor %xmm12,%xmm15 - movdqu %xmm0,0 + 128(%rdi) - movdqu %xmm4,16 + 128(%rdi) - movdqu %xmm8,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - - movq $192,%rcx - subq $192,%rbx - leaq 192(%rsi),%rsi - movq $2,%rcx - movq $8,%r8 - cmpq $64,%rbx - jbe seal_sse_tail_64 - cmpq $128,%rbx - jbe seal_sse_tail_128 - cmpq $192,%rbx - jbe seal_sse_tail_192 - -1: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa 96(%rbp),%xmm15 - paddd .sse_inc(%rip),%xmm15 - movdqa %xmm15,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - -2: - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - leaq 16(%rdi),%rdi - decq %r8 - jge 2b - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi - decq %rcx - jg 2b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - movdqa %xmm14,80(%rbp) - movdqa %xmm14,80(%rbp) - movdqu 0 + 0(%rsi),%xmm14 - pxor %xmm3,%xmm14 - movdqu %xmm14,0 + 0(%rdi) - movdqu 16 + 0(%rsi),%xmm14 - pxor %xmm7,%xmm14 - movdqu %xmm14,16 + 0(%rdi) - movdqu 32 + 0(%rsi),%xmm14 - pxor %xmm11,%xmm14 - movdqu %xmm14,32 + 0(%rdi) - movdqu 48 + 0(%rsi),%xmm14 - pxor %xmm15,%xmm14 - movdqu %xmm14,48 + 0(%rdi) - - movdqa 80(%rbp),%xmm14 - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 64(%rdi) - movdqu %xmm6,16 + 64(%rdi) - movdqu %xmm10,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 128(%rdi) - movdqu %xmm5,16 + 128(%rdi) - movdqu %xmm9,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - - cmpq $256,%rbx - ja 3f - - movq $192,%rcx - subq $192,%rbx - leaq 192(%rsi),%rsi - jmp seal_sse_128_seal_hash -3: - movdqu 0 + 192(%rsi),%xmm3 - movdqu 16 + 192(%rsi),%xmm7 - movdqu 32 + 192(%rsi),%xmm11 - movdqu 48 + 192(%rsi),%xmm15 - pxor %xmm3,%xmm0 - pxor %xmm7,%xmm4 - pxor %xmm11,%xmm8 - pxor %xmm12,%xmm15 - movdqu %xmm0,0 + 192(%rdi) - movdqu %xmm4,16 + 192(%rdi) - movdqu %xmm8,32 + 192(%rdi) - movdqu %xmm15,48 + 192(%rdi) - - leaq 256(%rsi),%rsi - subq $256,%rbx - movq $6,%rcx - movq $4,%r8 - cmpq $192,%rbx - jg 1b - movq %rbx,%rcx - testq %rbx,%rbx - je seal_sse_128_seal_hash - movq $6,%rcx - cmpq $64,%rbx - jg 3f - -seal_sse_tail_64: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa 96(%rbp),%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - jmp seal_sse_128_seal -3: - cmpq $128,%rbx - jg 3f - -seal_sse_tail_128: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa 96(%rbp),%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - - leaq 16(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 0(%rdi) - movdqu %xmm5,16 + 0(%rdi) - movdqu %xmm9,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - - movq $64,%rcx - subq $64,%rbx - leaq 64(%rsi),%rsi - jmp seal_sse_128_seal_hash -3: - -seal_sse_tail_192: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa 96(%rbp),%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - leaq 16(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 0(%rdi) - movdqu %xmm6,16 + 0(%rdi) - movdqu %xmm10,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 64(%rdi) - movdqu %xmm5,16 + 64(%rdi) - movdqu %xmm9,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - - movq $128,%rcx - subq $128,%rbx - leaq 128(%rsi),%rsi - -seal_sse_128_seal_hash: - cmpq $16,%rcx - jb seal_sse_128_seal - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - subq $16,%rcx - leaq 16(%rdi),%rdi - jmp seal_sse_128_seal_hash - -seal_sse_128_seal: - cmpq $16,%rbx - jb seal_sse_tail_16 - subq $16,%rbx - - movdqu 0(%rsi),%xmm3 - pxor %xmm3,%xmm0 - movdqu %xmm0,0(%rdi) - - addq 0(%rdi),%r10 - adcq 8(%rdi),%r11 - adcq $1,%r12 - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movdqa %xmm4,%xmm0 - movdqa %xmm8,%xmm4 - movdqa %xmm12,%xmm8 - movdqa %xmm1,%xmm12 - movdqa %xmm5,%xmm1 - movdqa %xmm9,%xmm5 - movdqa %xmm13,%xmm9 - jmp seal_sse_128_seal - -seal_sse_tail_16: - testq %rbx,%rbx - jz process_blocks_of_extra_in - - movq %rbx,%r8 - movq %rbx,%rcx - leaq -1(%rsi,%rbx), %rsi - pxor %xmm15,%xmm15 -1: - pslldq $1,%xmm15 - pinsrb $0,(%rsi),%xmm15 - leaq -1(%rsi),%rsi - decq %rcx - jne 1b - - - pxor %xmm0,%xmm15 - - - movq %rbx,%rcx - movdqu %xmm15,%xmm0 -2: - pextrb $0,%xmm0,(%rdi) - psrldq $1,%xmm0 - addq $1,%rdi - subq $1,%rcx - jnz 2b - - - - - - - - - movq 288+32(%rsp),%r9 - movq 56(%r9),%r14 - movq 48(%r9),%r13 - testq %r14,%r14 - jz process_partial_block - - movq $16,%r15 - subq %rbx,%r15 - cmpq %r15,%r14 - - jge load_extra_in - movq %r14,%r15 - -load_extra_in: - - - leaq -1(%r13,%r15), %rsi - - - addq %r15,%r13 - subq %r15,%r14 - movq %r13,48(%r9) - movq %r14,56(%r9) - - - - addq %r15,%r8 - - - pxor %xmm11,%xmm11 -3: - pslldq $1,%xmm11 - pinsrb $0,(%rsi),%xmm11 - leaq -1(%rsi),%rsi - subq $1,%r15 - jnz 3b - - - - - movq %rbx,%r15 - -4: - pslldq $1,%xmm11 - subq $1,%r15 - jnz 4b - - - - - leaq .and_masks(%rip),%r15 - shlq $4,%rbx - pand -16(%r15,%rbx), %xmm15 - - - por %xmm11,%xmm15 - - - -.byte 102,77,15,126,253 - pextrq $1,%xmm15,%r14 - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -process_blocks_of_extra_in: - - movq 288+32(%rsp),%r9 - movq 48(%r9),%rsi - movq 56(%r9),%r8 - movq %r8,%rcx - shrq $4,%r8 - -5: - jz process_extra_in_trailer - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rsi),%rsi - subq $1,%r8 - jmp 5b - -process_extra_in_trailer: - andq $15,%rcx - movq %rcx,%rbx - jz do_length_block - leaq -1(%rsi,%rcx), %rsi - -6: - pslldq $1,%xmm15 - pinsrb $0,(%rsi),%xmm15 - leaq -1(%rsi),%rsi - subq $1,%rcx - jnz 6b - -process_partial_block: - - leaq .and_masks(%rip),%r15 - shlq $4,%rbx - pand -16(%r15,%rbx), %xmm15 -.byte 102,77,15,126,253 - pextrq $1,%xmm15,%r14 - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -do_length_block: - addq 32(%rbp),%r10 - adcq 8+32(%rbp),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movq %r10,%r13 - movq %r11,%r14 - movq %r12,%r15 - subq $-5,%r10 - sbbq $-1,%r11 - sbbq $3,%r12 - cmovcq %r13,%r10 - cmovcq %r14,%r11 - cmovcq %r15,%r12 - - addq 0+16(%rbp),%r10 - adcq 8+16(%rbp),%r11 - - addq $288 + 32,%rsp -.cfi_adjust_cfa_offset -(288 + 32) - popq %r9 -.cfi_adjust_cfa_offset -8 - movq %r10,0(%r9) - movq %r11,8(%r9) - - popq %r15 -.cfi_adjust_cfa_offset -8 - popq %r14 -.cfi_adjust_cfa_offset -8 - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - popq %rbx -.cfi_adjust_cfa_offset -8 - popq %rbp -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_adjust_cfa_offset (8 * 6) + 288 + 32 - -seal_sse_128: - movdqu .chacha20_consts(%rip),%xmm0 - movdqa %xmm0,%xmm1 - movdqa %xmm0,%xmm2 - movdqu 0(%r9),%xmm4 - movdqa %xmm4,%xmm5 - movdqa %xmm4,%xmm6 - movdqu 16(%r9),%xmm8 - movdqa %xmm8,%xmm9 - movdqa %xmm8,%xmm10 - movdqu 32(%r9),%xmm14 - movdqa %xmm14,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa %xmm12,%xmm15 - movq $10,%r10 -1: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - decq %r10 - jnz 1b - paddd .chacha20_consts(%rip),%xmm0 - paddd .chacha20_consts(%rip),%xmm1 - paddd .chacha20_consts(%rip),%xmm2 - paddd %xmm7,%xmm4 - paddd %xmm7,%xmm5 - paddd %xmm7,%xmm6 - paddd %xmm11,%xmm8 - paddd %xmm11,%xmm9 - paddd %xmm15,%xmm12 - paddd .sse_inc(%rip),%xmm15 - paddd %xmm15,%xmm13 - - pand .clamp(%rip),%xmm2 - movdqa %xmm2,0(%rbp) - movdqa %xmm6,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal - jmp seal_sse_128_seal -.size chacha20_poly1305_seal, .-chacha20_poly1305_seal - - -.type chacha20_poly1305_open_avx2,@function -.align 64 -chacha20_poly1305_open_avx2: - vzeroupper - vmovdqa .chacha20_consts(%rip),%ymm0 - vbroadcasti128 0(%r9),%ymm4 - vbroadcasti128 16(%r9),%ymm8 - vbroadcasti128 32(%r9),%ymm12 - vpaddd .avx2_init(%rip),%ymm12,%ymm12 - cmpq $192,%rbx - jbe open_avx2_192 - cmpq $320,%rbx - jbe open_avx2_320 - - vmovdqa %ymm4,64(%rbp) - vmovdqa %ymm8,96(%rbp) - vmovdqa %ymm12,160(%rbp) - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - - decq %r10 - jne 1b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - - movq %r8,%r8 - call poly_hash_ad_internal - xorq %rcx,%rcx - -1: - addq 0(%rsi,%rcx), %r10 - adcq 8+0(%rsi,%rcx), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - addq $16,%rcx - cmpq $64,%rcx - jne 1b - - vpxor 0(%rsi),%ymm0,%ymm0 - vpxor 32(%rsi),%ymm4,%ymm4 - vmovdqu %ymm0,0(%rdi) - vmovdqu %ymm4,32(%rdi) - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - subq $64,%rbx -1: - - cmpq $512,%rbx - jb 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - - xorq %rcx,%rcx -2: - addq 0*8(%rsi,%rcx), %r10 - adcq 8+0*8(%rsi,%rcx), %r11 - adcq $1,%r12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - addq %rax,%r15 - adcq %rdx,%r9 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - addq 2*8(%rsi,%rcx), %r10 - adcq 8+2*8(%rsi,%rcx), %r11 - adcq $1,%r12 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - addq %rax,%r15 - adcq %rdx,%r9 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - addq 4*8(%rsi,%rcx), %r10 - adcq 8+4*8(%rsi,%rcx), %r11 - adcq $1,%r12 - - leaq 48(%rcx),%rcx - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - addq %rax,%r15 - adcq %rdx,%r9 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - cmpq $60*8,%rcx - jne 2b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vmovdqa %ymm0,128(%rbp) - addq 60*8(%rsi),%r10 - adcq 8+60*8(%rsi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - addq 60*8+16(%rsi),%r10 - adcq 8+60*8+16(%rsi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 - vpxor 0+384(%rsi),%ymm3,%ymm3 - vpxor 32+384(%rsi),%ymm0,%ymm0 - vpxor 64+384(%rsi),%ymm4,%ymm4 - vpxor 96+384(%rsi),%ymm8,%ymm8 - vmovdqu %ymm3,0+384(%rdi) - vmovdqu %ymm0,32+384(%rdi) - vmovdqu %ymm4,64+384(%rdi) - vmovdqu %ymm8,96+384(%rdi) - - leaq 512(%rsi),%rsi - leaq 512(%rdi),%rdi - subq $512,%rbx - jmp 1b -3: - testq %rbx,%rbx - vzeroupper - je open_sse_finalize -3: - cmpq $128,%rbx - ja 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - - xorq %r8,%r8 - movq %rbx,%rcx - andq $-16,%rcx - testq %rcx,%rcx - je 2f -1: - addq 0*8(%rsi,%r8), %r10 - adcq 8+0*8(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -2: - addq $16,%r8 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - - cmpq %rcx,%r8 - jb 1b - cmpq $160,%r8 - jne 2b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - jmp open_avx2_tail_loop -3: - cmpq $256,%rbx - ja 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - - movq %rbx,128(%rbp) - movq %rbx,%rcx - subq $128,%rcx - shrq $4,%rcx - movq $10,%r8 - cmpq $10,%rcx - cmovgq %r8,%rcx - movq %rsi,%rbx - xorq %r8,%r8 -1: - addq 0(%rbx),%r10 - adcq 8+0(%rbx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rbx),%rbx -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - - incq %r8 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - cmpq %rcx,%r8 - jb 1b - cmpq $10,%r8 - jne 2b - movq %rbx,%r8 - subq %rsi,%rbx - movq %rbx,%rcx - movq 128(%rbp),%rbx -1: - addq $16,%rcx - cmpq %rbx,%rcx - jg 1f - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - jmp 1b -1: - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm1,%ymm1 - vpxor 64+0(%rsi),%ymm5,%ymm5 - vpxor 96+0(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm1,32+0(%rdi) - vmovdqu %ymm5,64+0(%rdi) - vmovdqu %ymm9,96+0(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - leaq 128(%rsi),%rsi - leaq 128(%rdi),%rdi - subq $128,%rbx - jmp open_avx2_tail_loop -3: - cmpq $384,%rbx - ja 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - - movq %rbx,128(%rbp) - movq %rbx,%rcx - subq $256,%rcx - shrq $4,%rcx - addq $6,%rcx - movq $10,%r8 - cmpq $10,%rcx - cmovgq %r8,%rcx - movq %rsi,%rbx - xorq %r8,%r8 -1: - addq 0(%rbx),%r10 - adcq 8+0(%rbx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rbx),%rbx -2: - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - addq 0(%rbx),%r10 - adcq 8+0(%rbx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rbx),%rbx - incq %r8 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - - cmpq %rcx,%r8 - jb 1b - cmpq $10,%r8 - jne 2b - movq %rbx,%r8 - subq %rsi,%rbx - movq %rbx,%rcx - movq 128(%rbp),%rbx -1: - addq $16,%rcx - cmpq %rbx,%rcx - jg 1f - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - jmp 1b -1: - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm2,%ymm2 - vpxor 64+0(%rsi),%ymm6,%ymm6 - vpxor 96+0(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm2,32+0(%rdi) - vmovdqu %ymm6,64+0(%rdi) - vmovdqu %ymm10,96+0(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm1,%ymm1 - vpxor 64+128(%rsi),%ymm5,%ymm5 - vpxor 96+128(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm1,32+128(%rdi) - vmovdqu %ymm5,64+128(%rdi) - vmovdqu %ymm9,96+128(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - leaq 256(%rsi),%rsi - leaq 256(%rdi),%rdi - subq $256,%rbx - jmp open_avx2_tail_loop -3: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - - xorq %rcx,%rcx - movq %rsi,%r8 -1: - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 -2: - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - addq 16(%r8),%r10 - adcq 8+16(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%r8),%r8 - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - incq %rcx - cmpq $4,%rcx - jl 1b - cmpq $10,%rcx - jne 2b - movq %rbx,%rcx - subq $384,%rcx - andq $-16,%rcx -1: - testq %rcx,%rcx - je 1f - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - subq $16,%rcx - jmp 1b -1: - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vmovdqa %ymm0,128(%rbp) - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - leaq 384(%rsi),%rsi - leaq 384(%rdi),%rdi - subq $384,%rbx -open_avx2_tail_loop: - cmpq $32,%rbx - jb open_avx2_tail - subq $32,%rbx - vpxor (%rsi),%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - leaq 32(%rsi),%rsi - leaq 32(%rdi),%rdi - vmovdqa %ymm4,%ymm0 - vmovdqa %ymm8,%ymm4 - vmovdqa %ymm12,%ymm8 - jmp open_avx2_tail_loop -open_avx2_tail: - cmpq $16,%rbx - vmovdqa %xmm0,%xmm1 - jb 1f - subq $16,%rbx - - vpxor (%rsi),%xmm0,%xmm1 - vmovdqu %xmm1,(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - vperm2i128 $0x11,%ymm0,%ymm0,%ymm0 - vmovdqa %xmm0,%xmm1 -1: - vzeroupper - jmp open_sse_tail_16 - -open_avx2_192: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vmovdqa %ymm12,%ymm11 - vmovdqa %ymm13,%ymm15 - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - - decq %r10 - jne 1b - vpaddd %ymm2,%ymm0,%ymm0 - vpaddd %ymm2,%ymm1,%ymm1 - vpaddd %ymm6,%ymm4,%ymm4 - vpaddd %ymm6,%ymm5,%ymm5 - vpaddd %ymm10,%ymm8,%ymm8 - vpaddd %ymm10,%ymm9,%ymm9 - vpaddd %ymm11,%ymm12,%ymm12 - vpaddd %ymm15,%ymm13,%ymm13 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 -open_avx2_short: - movq %r8,%r8 - call poly_hash_ad_internal -open_avx2_hash_and_xor_loop: - cmpq $32,%rbx - jb open_avx2_short_tail_32 - subq $32,%rbx - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - addq 16(%rsi),%r10 - adcq 8+16(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - vpxor (%rsi),%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - leaq 32(%rsi),%rsi - leaq 32(%rdi),%rdi - - vmovdqa %ymm4,%ymm0 - vmovdqa %ymm8,%ymm4 - vmovdqa %ymm12,%ymm8 - vmovdqa %ymm1,%ymm12 - vmovdqa %ymm5,%ymm1 - vmovdqa %ymm9,%ymm5 - vmovdqa %ymm13,%ymm9 - vmovdqa %ymm2,%ymm13 - vmovdqa %ymm6,%ymm2 - jmp open_avx2_hash_and_xor_loop -open_avx2_short_tail_32: - cmpq $16,%rbx - vmovdqa %xmm0,%xmm1 - jb 1f - subq $16,%rbx - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - vpxor (%rsi),%xmm0,%xmm3 - vmovdqu %xmm3,(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - vextracti128 $1,%ymm0,%xmm1 -1: - vzeroupper - jmp open_sse_tail_16 - -open_avx2_320: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vpaddd .avx2_inc(%rip),%ymm13,%ymm14 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - decq %r10 - jne 1b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd %ymm7,%ymm4,%ymm4 - vpaddd %ymm7,%ymm5,%ymm5 - vpaddd %ymm7,%ymm6,%ymm6 - vpaddd %ymm11,%ymm8,%ymm8 - vpaddd %ymm11,%ymm9,%ymm9 - vpaddd %ymm11,%ymm10,%ymm10 - vpaddd 160(%rbp),%ymm12,%ymm12 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd 224(%rbp),%ymm14,%ymm14 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 - jmp open_avx2_short -.size chacha20_poly1305_open_avx2, .-chacha20_poly1305_open_avx2 - - -.type chacha20_poly1305_seal_avx2,@function -.align 64 -chacha20_poly1305_seal_avx2: - vzeroupper - vmovdqa .chacha20_consts(%rip),%ymm0 - vbroadcasti128 0(%r9),%ymm4 - vbroadcasti128 16(%r9),%ymm8 - vbroadcasti128 32(%r9),%ymm12 - vpaddd .avx2_init(%rip),%ymm12,%ymm12 - cmpq $192,%rbx - jbe seal_avx2_192 - cmpq $320,%rbx - jbe seal_avx2_320 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm4,64(%rbp) - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm8,%ymm11 - vmovdqa %ymm8,96(%rbp) - vmovdqa %ymm12,%ymm15 - vpaddd .avx2_inc(%rip),%ymm15,%ymm14 - vpaddd .avx2_inc(%rip),%ymm14,%ymm13 - vpaddd .avx2_inc(%rip),%ymm13,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm15,256(%rbp) - movq $10,%r10 -1: - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - decq %r10 - jnz 1b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vperm2i128 $0x02,%ymm3,%ymm7,%ymm15 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm3 - vpand .clamp(%rip),%ymm15,%ymm15 - vmovdqa %ymm15,0(%rbp) - movq %r8,%r8 - call poly_hash_ad_internal - - vpxor 0(%rsi),%ymm3,%ymm3 - vpxor 32(%rsi),%ymm11,%ymm11 - vmovdqu %ymm3,0(%rdi) - vmovdqu %ymm11,32(%rdi) - vperm2i128 $0x02,%ymm2,%ymm6,%ymm15 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+64(%rsi),%ymm15,%ymm15 - vpxor 32+64(%rsi),%ymm2,%ymm2 - vpxor 64+64(%rsi),%ymm6,%ymm6 - vpxor 96+64(%rsi),%ymm10,%ymm10 - vmovdqu %ymm15,0+64(%rdi) - vmovdqu %ymm2,32+64(%rdi) - vmovdqu %ymm6,64+64(%rdi) - vmovdqu %ymm10,96+64(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm15 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+192(%rsi),%ymm15,%ymm15 - vpxor 32+192(%rsi),%ymm1,%ymm1 - vpxor 64+192(%rsi),%ymm5,%ymm5 - vpxor 96+192(%rsi),%ymm9,%ymm9 - vmovdqu %ymm15,0+192(%rdi) - vmovdqu %ymm1,32+192(%rdi) - vmovdqu %ymm5,64+192(%rdi) - vmovdqu %ymm9,96+192(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm15 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm15,%ymm8 - - leaq 320(%rsi),%rsi - subq $320,%rbx - movq $320,%rcx - cmpq $128,%rbx - jbe seal_avx2_hash - vpxor 0(%rsi),%ymm0,%ymm0 - vpxor 32(%rsi),%ymm4,%ymm4 - vpxor 64(%rsi),%ymm8,%ymm8 - vpxor 96(%rsi),%ymm12,%ymm12 - vmovdqu %ymm0,320(%rdi) - vmovdqu %ymm4,352(%rdi) - vmovdqu %ymm8,384(%rdi) - vmovdqu %ymm12,416(%rdi) - leaq 128(%rsi),%rsi - subq $128,%rbx - movq $8,%rcx - movq $2,%r8 - cmpq $128,%rbx - jbe seal_avx2_tail_128 - cmpq $256,%rbx - jbe seal_avx2_tail_256 - cmpq $384,%rbx - jbe seal_avx2_tail_384 - cmpq $512,%rbx - jbe seal_avx2_tail_512 - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - - subq $16,%rdi - movq $9,%rcx - jmp 4f -1: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - - movq $10,%rcx -2: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - addq %rax,%r15 - adcq %rdx,%r9 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -4: - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - addq %rax,%r15 - adcq %rdx,%r9 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - addq 32(%rdi),%r10 - adcq 8+32(%rdi),%r11 - adcq $1,%r12 - - leaq 48(%rdi),%rdi - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - addq %rax,%r15 - adcq %rdx,%r9 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - decq %rcx - jne 2b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - leaq 32(%rdi),%rdi - vmovdqa %ymm0,128(%rbp) - addq -32(%rdi),%r10 - adcq 8+-32(%rdi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - addq -16(%rdi),%r10 - adcq 8+-16(%rdi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 - vpxor 0+384(%rsi),%ymm3,%ymm3 - vpxor 32+384(%rsi),%ymm0,%ymm0 - vpxor 64+384(%rsi),%ymm4,%ymm4 - vpxor 96+384(%rsi),%ymm8,%ymm8 - vmovdqu %ymm3,0+384(%rdi) - vmovdqu %ymm0,32+384(%rdi) - vmovdqu %ymm4,64+384(%rdi) - vmovdqu %ymm8,96+384(%rdi) - - leaq 512(%rsi),%rsi - subq $512,%rbx - cmpq $512,%rbx - jg 1b - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - movq $10,%rcx - xorq %r8,%r8 - cmpq $128,%rbx - ja 3f - -seal_avx2_tail_128: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - jmp seal_avx2_short_loop -3: - cmpq $256,%rbx - ja 3f - -seal_avx2_tail_256: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm1,%ymm1 - vpxor 64+0(%rsi),%ymm5,%ymm5 - vpxor 96+0(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm1,32+0(%rdi) - vmovdqu %ymm5,64+0(%rdi) - vmovdqu %ymm9,96+0(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - movq $128,%rcx - leaq 128(%rsi),%rsi - subq $128,%rbx - jmp seal_avx2_hash -3: - cmpq $384,%rbx - ja seal_avx2_tail_512 - -seal_avx2_tail_384: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm2,%ymm2 - vpxor 64+0(%rsi),%ymm6,%ymm6 - vpxor 96+0(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm2,32+0(%rdi) - vmovdqu %ymm6,64+0(%rdi) - vmovdqu %ymm10,96+0(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm1,%ymm1 - vpxor 64+128(%rsi),%ymm5,%ymm5 - vpxor 96+128(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm1,32+128(%rdi) - vmovdqu %ymm5,64+128(%rdi) - vmovdqu %ymm9,96+128(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - movq $256,%rcx - leaq 256(%rsi),%rsi - subq $256,%rbx - jmp seal_avx2_hash - -seal_avx2_tail_512: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - addq %rax,%r15 - adcq %rdx,%r9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - - - - - - - - - - - - addq %rax,%r15 - adcq %rdx,%r9 - - - - - - - - - - - - - - - - - - - - - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vmovdqa %ymm0,128(%rbp) - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - movq $384,%rcx - leaq 384(%rsi),%rsi - subq $384,%rbx - jmp seal_avx2_hash - -seal_avx2_320: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vpaddd .avx2_inc(%rip),%ymm13,%ymm14 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - decq %r10 - jne 1b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd %ymm7,%ymm4,%ymm4 - vpaddd %ymm7,%ymm5,%ymm5 - vpaddd %ymm7,%ymm6,%ymm6 - vpaddd %ymm11,%ymm8,%ymm8 - vpaddd %ymm11,%ymm9,%ymm9 - vpaddd %ymm11,%ymm10,%ymm10 - vpaddd 160(%rbp),%ymm12,%ymm12 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd 224(%rbp),%ymm14,%ymm14 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 - jmp seal_avx2_short - -seal_avx2_192: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vmovdqa %ymm12,%ymm11 - vmovdqa %ymm13,%ymm15 - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - - decq %r10 - jne 1b - vpaddd %ymm2,%ymm0,%ymm0 - vpaddd %ymm2,%ymm1,%ymm1 - vpaddd %ymm6,%ymm4,%ymm4 - vpaddd %ymm6,%ymm5,%ymm5 - vpaddd %ymm10,%ymm8,%ymm8 - vpaddd %ymm10,%ymm9,%ymm9 - vpaddd %ymm11,%ymm12,%ymm12 - vpaddd %ymm15,%ymm13,%ymm13 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 -seal_avx2_short: - movq %r8,%r8 - call poly_hash_ad_internal - xorq %rcx,%rcx -seal_avx2_hash: - cmpq $16,%rcx - jb seal_avx2_short_loop - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - subq $16,%rcx - addq $16,%rdi - jmp seal_avx2_hash -seal_avx2_short_loop: - cmpq $32,%rbx - jb seal_avx2_short_tail - subq $32,%rbx - - vpxor (%rsi),%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - leaq 32(%rsi),%rsi - - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - - vmovdqa %ymm4,%ymm0 - vmovdqa %ymm8,%ymm4 - vmovdqa %ymm12,%ymm8 - vmovdqa %ymm1,%ymm12 - vmovdqa %ymm5,%ymm1 - vmovdqa %ymm9,%ymm5 - vmovdqa %ymm13,%ymm9 - vmovdqa %ymm2,%ymm13 - vmovdqa %ymm6,%ymm2 - jmp seal_avx2_short_loop -seal_avx2_short_tail: - cmpq $16,%rbx - jb 1f - subq $16,%rbx - vpxor (%rsi),%xmm0,%xmm3 - vmovdqu %xmm3,(%rdi) - leaq 16(%rsi),%rsi - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi - vextracti128 $1,%ymm0,%xmm0 -1: - vzeroupper - jmp seal_sse_tail_16 -.cfi_endproc -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aes-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aes-x86_64.S deleted file mode 100644 index 47a69ec862..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aes-x86_64.S +++ /dev/null @@ -1,2665 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.type _x86_64_AES_encrypt,@function -.align 16 -_x86_64_AES_encrypt: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - - movl 240(%r15),%r13d - subl $1,%r13d - jmp .Lenc_loop -.align 16 -.Lenc_loop: - - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movl 0(%r14,%rsi,8),%r10d - movl 0(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r12d - - movzbl %bh,%esi - movzbl %ch,%edi - movzbl %dl,%ebp - xorl 3(%r14,%rsi,8),%r10d - xorl 3(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r8d - - movzbl %dh,%esi - shrl $16,%ecx - movzbl %ah,%ebp - xorl 3(%r14,%rsi,8),%r12d - shrl $16,%edx - xorl 3(%r14,%rbp,8),%r8d - - shrl $16,%ebx - leaq 16(%r15),%r15 - shrl $16,%eax - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - xorl 2(%r14,%rsi,8),%r10d - xorl 2(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r12d - - movzbl %dh,%esi - movzbl %ah,%edi - movzbl %bl,%ebp - xorl 1(%r14,%rsi,8),%r10d - xorl 1(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r8d - - movl 12(%r15),%edx - movzbl %bh,%edi - movzbl %ch,%ebp - movl 0(%r15),%eax - xorl 1(%r14,%rdi,8),%r12d - xorl 1(%r14,%rbp,8),%r8d - - movl 4(%r15),%ebx - movl 8(%r15),%ecx - xorl %r10d,%eax - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx - subl $1,%r13d - jnz .Lenc_loop - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movzbl 2(%r14,%rsi,8),%r10d - movzbl 2(%r14,%rdi,8),%r11d - movzbl 2(%r14,%rbp,8),%r12d - - movzbl %dl,%esi - movzbl %bh,%edi - movzbl %ch,%ebp - movzbl 2(%r14,%rsi,8),%r8d - movl 0(%r14,%rdi,8),%edi - movl 0(%r14,%rbp,8),%ebp - - andl $0x0000ff00,%edi - andl $0x0000ff00,%ebp - - xorl %edi,%r10d - xorl %ebp,%r11d - shrl $16,%ecx - - movzbl %dh,%esi - movzbl %ah,%edi - shrl $16,%edx - movl 0(%r14,%rsi,8),%esi - movl 0(%r14,%rdi,8),%edi - - andl $0x0000ff00,%esi - andl $0x0000ff00,%edi - shrl $16,%ebx - xorl %esi,%r12d - xorl %edi,%r8d - shrl $16,%eax - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - movl 0(%r14,%rsi,8),%esi - movl 0(%r14,%rdi,8),%edi - movl 0(%r14,%rbp,8),%ebp - - andl $0x00ff0000,%esi - andl $0x00ff0000,%edi - andl $0x00ff0000,%ebp - - xorl %esi,%r10d - xorl %edi,%r11d - xorl %ebp,%r12d - - movzbl %bl,%esi - movzbl %dh,%edi - movzbl %ah,%ebp - movl 0(%r14,%rsi,8),%esi - movl 2(%r14,%rdi,8),%edi - movl 2(%r14,%rbp,8),%ebp - - andl $0x00ff0000,%esi - andl $0xff000000,%edi - andl $0xff000000,%ebp - - xorl %esi,%r8d - xorl %edi,%r10d - xorl %ebp,%r11d - - movzbl %bh,%esi - movzbl %ch,%edi - movl 16+12(%r15),%edx - movl 2(%r14,%rsi,8),%esi - movl 2(%r14,%rdi,8),%edi - movl 16+0(%r15),%eax - - andl $0xff000000,%esi - andl $0xff000000,%edi - - xorl %esi,%r12d - xorl %edi,%r8d - - movl 16+4(%r15),%ebx - movl 16+8(%r15),%ecx - xorl %r10d,%eax - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx -.byte 0xf3,0xc3 -.size _x86_64_AES_encrypt,.-_x86_64_AES_encrypt -.type _x86_64_AES_encrypt_compact,@function -.align 16 -_x86_64_AES_encrypt_compact: -.cfi_startproc - leaq 128(%r14),%r8 - movl 0-128(%r8),%edi - movl 32-128(%r8),%ebp - movl 64-128(%r8),%r10d - movl 96-128(%r8),%r11d - movl 128-128(%r8),%edi - movl 160-128(%r8),%ebp - movl 192-128(%r8),%r10d - movl 224-128(%r8),%r11d - jmp .Lenc_loop_compact -.align 16 -.Lenc_loop_compact: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - leaq 16(%r15),%r15 - movzbl %al,%r10d - movzbl %bl,%r11d - movzbl %cl,%r12d - movzbl %dl,%r8d - movzbl %bh,%esi - movzbl %ch,%edi - shrl $16,%ecx - movzbl %dh,%ebp - movzbl (%r14,%r10,1),%r10d - movzbl (%r14,%r11,1),%r11d - movzbl (%r14,%r12,1),%r12d - movzbl (%r14,%r8,1),%r8d - - movzbl (%r14,%rsi,1),%r9d - movzbl %ah,%esi - movzbl (%r14,%rdi,1),%r13d - movzbl %cl,%edi - movzbl (%r14,%rbp,1),%ebp - movzbl (%r14,%rsi,1),%esi - - shll $8,%r9d - shrl $16,%edx - shll $8,%r13d - xorl %r9d,%r10d - shrl $16,%eax - movzbl %dl,%r9d - shrl $16,%ebx - xorl %r13d,%r11d - shll $8,%ebp - movzbl %al,%r13d - movzbl (%r14,%rdi,1),%edi - xorl %ebp,%r12d - - shll $8,%esi - movzbl %bl,%ebp - shll $16,%edi - xorl %esi,%r8d - movzbl (%r14,%r9,1),%r9d - movzbl %dh,%esi - movzbl (%r14,%r13,1),%r13d - xorl %edi,%r10d - - shrl $8,%ecx - movzbl %ah,%edi - shll $16,%r9d - shrl $8,%ebx - shll $16,%r13d - xorl %r9d,%r11d - movzbl (%r14,%rbp,1),%ebp - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rcx,1),%edx - movzbl (%r14,%rbx,1),%ecx - - shll $16,%ebp - xorl %r13d,%r12d - shll $24,%esi - xorl %ebp,%r8d - shll $24,%edi - xorl %esi,%r10d - shll $24,%edx - xorl %edi,%r11d - shll $24,%ecx - movl %r10d,%eax - movl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx - cmpq 16(%rsp),%r15 - je .Lenc_compact_done - movl $0x80808080,%r10d - movl $0x80808080,%r11d - andl %eax,%r10d - andl %ebx,%r11d - movl %r10d,%esi - movl %r11d,%edi - shrl $7,%r10d - leal (%rax,%rax,1),%r8d - shrl $7,%r11d - leal (%rbx,%rbx,1),%r9d - subl %r10d,%esi - subl %r11d,%edi - andl $0xfefefefe,%r8d - andl $0xfefefefe,%r9d - andl $0x1b1b1b1b,%esi - andl $0x1b1b1b1b,%edi - movl %eax,%r10d - movl %ebx,%r11d - xorl %esi,%r8d - xorl %edi,%r9d - - xorl %r8d,%eax - xorl %r9d,%ebx - movl $0x80808080,%r12d - roll $24,%eax - movl $0x80808080,%ebp - roll $24,%ebx - andl %ecx,%r12d - andl %edx,%ebp - xorl %r8d,%eax - xorl %r9d,%ebx - movl %r12d,%esi - rorl $16,%r10d - movl %ebp,%edi - rorl $16,%r11d - leal (%rcx,%rcx,1),%r8d - shrl $7,%r12d - xorl %r10d,%eax - shrl $7,%ebp - xorl %r11d,%ebx - rorl $8,%r10d - leal (%rdx,%rdx,1),%r9d - rorl $8,%r11d - subl %r12d,%esi - subl %ebp,%edi - xorl %r10d,%eax - xorl %r11d,%ebx - - andl $0xfefefefe,%r8d - andl $0xfefefefe,%r9d - andl $0x1b1b1b1b,%esi - andl $0x1b1b1b1b,%edi - movl %ecx,%r12d - movl %edx,%ebp - xorl %esi,%r8d - xorl %edi,%r9d - - rorl $16,%r12d - xorl %r8d,%ecx - rorl $16,%ebp - xorl %r9d,%edx - roll $24,%ecx - movl 0(%r14),%esi - roll $24,%edx - xorl %r8d,%ecx - movl 64(%r14),%edi - xorl %r9d,%edx - movl 128(%r14),%r8d - xorl %r12d,%ecx - rorl $8,%r12d - xorl %ebp,%edx - rorl $8,%ebp - xorl %r12d,%ecx - movl 192(%r14),%r9d - xorl %ebp,%edx - jmp .Lenc_loop_compact -.align 16 -.Lenc_compact_done: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx -.byte 0xf3,0xc3 -.cfi_endproc -.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact -.align 16 -.globl aes_nohw_encrypt -.hidden aes_nohw_encrypt -.type aes_nohw_encrypt,@function -.hidden aes_nohw_encrypt -aes_nohw_encrypt: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - - - leaq -63(%rdx),%rcx - andq $-64,%rsp - subq %rsp,%rcx - negq %rcx - andq $0x3c0,%rcx - subq %rcx,%rsp - subq $32,%rsp - - movq %rsi,16(%rsp) - movq %rax,24(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x18,0x06,0x23,0x08 -.Lenc_prologue: - - movq %rdx,%r15 - movl 240(%r15),%r13d - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - - shll $4,%r13d - leaq (%r15,%r13,1),%rbp - movq %r15,(%rsp) - movq %rbp,8(%rsp) - - - leaq .LAES_Te+2048(%rip),%r14 - leaq 768(%rsp),%rbp - subq %r14,%rbp - andq $0x300,%rbp - leaq (%r14,%rbp,1),%r14 - - call _x86_64_AES_encrypt_compact - - movq 16(%rsp),%r9 - movq 24(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lenc_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_nohw_encrypt,.-aes_nohw_encrypt -.type _x86_64_AES_decrypt,@function -.align 16 -_x86_64_AES_decrypt: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - - movl 240(%r15),%r13d - subl $1,%r13d - jmp .Ldec_loop -.align 16 -.Ldec_loop: - - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movl 0(%r14,%rsi,8),%r10d - movl 0(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r12d - - movzbl %dh,%esi - movzbl %ah,%edi - movzbl %dl,%ebp - xorl 3(%r14,%rsi,8),%r10d - xorl 3(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r8d - - movzbl %bh,%esi - shrl $16,%eax - movzbl %ch,%ebp - xorl 3(%r14,%rsi,8),%r12d - shrl $16,%edx - xorl 3(%r14,%rbp,8),%r8d - - shrl $16,%ebx - leaq 16(%r15),%r15 - shrl $16,%ecx - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - xorl 2(%r14,%rsi,8),%r10d - xorl 2(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r12d - - movzbl %bh,%esi - movzbl %ch,%edi - movzbl %bl,%ebp - xorl 1(%r14,%rsi,8),%r10d - xorl 1(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r8d - - movzbl %dh,%esi - movl 12(%r15),%edx - movzbl %ah,%ebp - xorl 1(%r14,%rsi,8),%r12d - movl 0(%r15),%eax - xorl 1(%r14,%rbp,8),%r8d - - xorl %r10d,%eax - movl 4(%r15),%ebx - movl 8(%r15),%ecx - xorl %r12d,%ecx - xorl %r11d,%ebx - xorl %r8d,%edx - subl $1,%r13d - jnz .Ldec_loop - leaq 2048(%r14),%r14 - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movzbl (%r14,%rsi,1),%r10d - movzbl (%r14,%rdi,1),%r11d - movzbl (%r14,%rbp,1),%r12d - - movzbl %dl,%esi - movzbl %dh,%edi - movzbl %ah,%ebp - movzbl (%r14,%rsi,1),%r8d - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rbp,1),%ebp - - shll $8,%edi - shll $8,%ebp - - xorl %edi,%r10d - xorl %ebp,%r11d - shrl $16,%edx - - movzbl %bh,%esi - movzbl %ch,%edi - shrl $16,%eax - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - - shll $8,%esi - shll $8,%edi - shrl $16,%ebx - xorl %esi,%r12d - xorl %edi,%r8d - shrl $16,%ecx - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rbp,1),%ebp - - shll $16,%esi - shll $16,%edi - shll $16,%ebp - - xorl %esi,%r10d - xorl %edi,%r11d - xorl %ebp,%r12d - - movzbl %bl,%esi - movzbl %bh,%edi - movzbl %ch,%ebp - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rbp,1),%ebp - - shll $16,%esi - shll $24,%edi - shll $24,%ebp - - xorl %esi,%r8d - xorl %edi,%r10d - xorl %ebp,%r11d - - movzbl %dh,%esi - movzbl %ah,%edi - movl 16+12(%r15),%edx - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movl 16+0(%r15),%eax - - shll $24,%esi - shll $24,%edi - - xorl %esi,%r12d - xorl %edi,%r8d - - movl 16+4(%r15),%ebx - movl 16+8(%r15),%ecx - leaq -2048(%r14),%r14 - xorl %r10d,%eax - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx -.byte 0xf3,0xc3 -.size _x86_64_AES_decrypt,.-_x86_64_AES_decrypt -.type _x86_64_AES_decrypt_compact,@function -.align 16 -_x86_64_AES_decrypt_compact: -.cfi_startproc - leaq 128(%r14),%r8 - movl 0-128(%r8),%edi - movl 32-128(%r8),%ebp - movl 64-128(%r8),%r10d - movl 96-128(%r8),%r11d - movl 128-128(%r8),%edi - movl 160-128(%r8),%ebp - movl 192-128(%r8),%r10d - movl 224-128(%r8),%r11d - jmp .Ldec_loop_compact - -.align 16 -.Ldec_loop_compact: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - leaq 16(%r15),%r15 - movzbl %al,%r10d - movzbl %bl,%r11d - movzbl %cl,%r12d - movzbl %dl,%r8d - movzbl %dh,%esi - movzbl %ah,%edi - shrl $16,%edx - movzbl %bh,%ebp - movzbl (%r14,%r10,1),%r10d - movzbl (%r14,%r11,1),%r11d - movzbl (%r14,%r12,1),%r12d - movzbl (%r14,%r8,1),%r8d - - movzbl (%r14,%rsi,1),%r9d - movzbl %ch,%esi - movzbl (%r14,%rdi,1),%r13d - movzbl (%r14,%rbp,1),%ebp - movzbl (%r14,%rsi,1),%esi - - shrl $16,%ecx - shll $8,%r13d - shll $8,%r9d - movzbl %cl,%edi - shrl $16,%eax - xorl %r9d,%r10d - shrl $16,%ebx - movzbl %dl,%r9d - - shll $8,%ebp - xorl %r13d,%r11d - shll $8,%esi - movzbl %al,%r13d - movzbl (%r14,%rdi,1),%edi - xorl %ebp,%r12d - movzbl %bl,%ebp - - shll $16,%edi - xorl %esi,%r8d - movzbl (%r14,%r9,1),%r9d - movzbl %bh,%esi - movzbl (%r14,%rbp,1),%ebp - xorl %edi,%r10d - movzbl (%r14,%r13,1),%r13d - movzbl %ch,%edi - - shll $16,%ebp - shll $16,%r9d - shll $16,%r13d - xorl %ebp,%r8d - movzbl %dh,%ebp - xorl %r9d,%r11d - shrl $8,%eax - xorl %r13d,%r12d - - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%ebx - movzbl (%r14,%rbp,1),%ecx - movzbl (%r14,%rax,1),%edx - - movl %r10d,%eax - shll $24,%esi - shll $24,%ebx - shll $24,%ecx - xorl %esi,%eax - shll $24,%edx - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx - cmpq 16(%rsp),%r15 - je .Ldec_compact_done - - movq 256+0(%r14),%rsi - shlq $32,%rbx - shlq $32,%rdx - movq 256+8(%r14),%rdi - orq %rbx,%rax - orq %rdx,%rcx - movq 256+16(%r14),%rbp - movq %rsi,%r9 - movq %rsi,%r12 - andq %rax,%r9 - andq %rcx,%r12 - movq %r9,%rbx - movq %r12,%rdx - shrq $7,%r9 - leaq (%rax,%rax,1),%r8 - shrq $7,%r12 - leaq (%rcx,%rcx,1),%r11 - subq %r9,%rbx - subq %r12,%rdx - andq %rdi,%r8 - andq %rdi,%r11 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r8 - xorq %rdx,%r11 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r8,%r10 - andq %r11,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - leaq (%r8,%r8,1),%r9 - shrq $7,%r13 - leaq (%r11,%r11,1),%r12 - subq %r10,%rbx - subq %r13,%rdx - andq %rdi,%r9 - andq %rdi,%r12 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r9 - xorq %rdx,%r12 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r9,%r10 - andq %r12,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - xorq %rax,%r8 - shrq $7,%r13 - xorq %rcx,%r11 - subq %r10,%rbx - subq %r13,%rdx - leaq (%r9,%r9,1),%r10 - leaq (%r12,%r12,1),%r13 - xorq %rax,%r9 - xorq %rcx,%r12 - andq %rdi,%r10 - andq %rdi,%r13 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r10 - xorq %rdx,%r13 - - xorq %r10,%rax - xorq %r13,%rcx - xorq %r10,%r8 - xorq %r13,%r11 - movq %rax,%rbx - movq %rcx,%rdx - xorq %r10,%r9 - shrq $32,%rbx - xorq %r13,%r12 - shrq $32,%rdx - xorq %r8,%r10 - roll $8,%eax - xorq %r11,%r13 - roll $8,%ecx - xorq %r9,%r10 - roll $8,%ebx - xorq %r12,%r13 - - roll $8,%edx - xorl %r10d,%eax - shrq $32,%r10 - xorl %r13d,%ecx - shrq $32,%r13 - xorl %r10d,%ebx - xorl %r13d,%edx - - movq %r8,%r10 - roll $24,%r8d - movq %r11,%r13 - roll $24,%r11d - shrq $32,%r10 - xorl %r8d,%eax - shrq $32,%r13 - xorl %r11d,%ecx - roll $24,%r10d - movq %r9,%r8 - roll $24,%r13d - movq %r12,%r11 - shrq $32,%r8 - xorl %r10d,%ebx - shrq $32,%r11 - xorl %r13d,%edx - - movq 0(%r14),%rsi - roll $16,%r9d - movq 64(%r14),%rdi - roll $16,%r12d - movq 128(%r14),%rbp - roll $16,%r8d - movq 192(%r14),%r10 - xorl %r9d,%eax - roll $16,%r11d - xorl %r12d,%ecx - movq 256(%r14),%r13 - xorl %r8d,%ebx - xorl %r11d,%edx - jmp .Ldec_loop_compact -.align 16 -.Ldec_compact_done: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx -.byte 0xf3,0xc3 -.cfi_endproc -.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact -.align 16 -.globl aes_nohw_decrypt -.hidden aes_nohw_decrypt -.type aes_nohw_decrypt,@function -.hidden aes_nohw_decrypt -aes_nohw_decrypt: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - - - leaq -63(%rdx),%rcx - andq $-64,%rsp - subq %rsp,%rcx - negq %rcx - andq $0x3c0,%rcx - subq %rcx,%rsp - subq $32,%rsp - - movq %rsi,16(%rsp) - movq %rax,24(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x18,0x06,0x23,0x08 -.Ldec_prologue: - - movq %rdx,%r15 - movl 240(%r15),%r13d - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - - shll $4,%r13d - leaq (%r15,%r13,1),%rbp - movq %r15,(%rsp) - movq %rbp,8(%rsp) - - - leaq .LAES_Td+2048(%rip),%r14 - leaq 768(%rsp),%rbp - subq %r14,%rbp - andq $0x300,%rbp - leaq (%r14,%rbp,1),%r14 - shrq $3,%rbp - addq %rbp,%r14 - - call _x86_64_AES_decrypt_compact - - movq 16(%rsp),%r9 - movq 24(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Ldec_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_nohw_decrypt,.-aes_nohw_decrypt -.align 16 -.globl aes_nohw_set_encrypt_key -.hidden aes_nohw_set_encrypt_key -.type aes_nohw_set_encrypt_key,@function -aes_nohw_set_encrypt_key: -.cfi_startproc - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $8,%rsp -.cfi_adjust_cfa_offset 8 -.Lenc_key_prologue: - - call _x86_64_AES_set_encrypt_key - - movq 40(%rsp),%rbp -.cfi_restore %rbp - movq 48(%rsp),%rbx -.cfi_restore %rbx - addq $56,%rsp -.cfi_adjust_cfa_offset -56 -.Lenc_key_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_nohw_set_encrypt_key,.-aes_nohw_set_encrypt_key - -.type _x86_64_AES_set_encrypt_key,@function -.align 16 -_x86_64_AES_set_encrypt_key: -.cfi_startproc - movl %esi,%ecx - movq %rdi,%rsi - movq %rdx,%rdi - - testq $-1,%rsi - jz .Lbadpointer - testq $-1,%rdi - jz .Lbadpointer - - leaq .LAES_Te(%rip),%rbp - leaq 2048+128(%rbp),%rbp - - - movl 0-128(%rbp),%eax - movl 32-128(%rbp),%ebx - movl 64-128(%rbp),%r8d - movl 96-128(%rbp),%edx - movl 128-128(%rbp),%eax - movl 160-128(%rbp),%ebx - movl 192-128(%rbp),%r8d - movl 224-128(%rbp),%edx - - cmpl $128,%ecx - je .L10rounds - cmpl $192,%ecx - je .L12rounds - cmpl $256,%ecx - je .L14rounds - movq $-2,%rax - jmp .Lexit - -.L10rounds: - movq 0(%rsi),%rax - movq 8(%rsi),%rdx - movq %rax,0(%rdi) - movq %rdx,8(%rdi) - - shrq $32,%rdx - xorl %ecx,%ecx - jmp .L10shortcut -.align 4 -.L10loop: - movl 0(%rdi),%eax - movl 12(%rdi),%edx -.L10shortcut: - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - - xorl 1024-128(%rbp,%rcx,4),%eax - movl %eax,16(%rdi) - xorl 4(%rdi),%eax - movl %eax,20(%rdi) - xorl 8(%rdi),%eax - movl %eax,24(%rdi) - xorl 12(%rdi),%eax - movl %eax,28(%rdi) - addl $1,%ecx - leaq 16(%rdi),%rdi - cmpl $10,%ecx - jl .L10loop - - movl $10,80(%rdi) - xorq %rax,%rax - jmp .Lexit - -.L12rounds: - movq 0(%rsi),%rax - movq 8(%rsi),%rbx - movq 16(%rsi),%rdx - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rdx,16(%rdi) - - shrq $32,%rdx - xorl %ecx,%ecx - jmp .L12shortcut -.align 4 -.L12loop: - movl 0(%rdi),%eax - movl 20(%rdi),%edx -.L12shortcut: - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - - xorl 1024-128(%rbp,%rcx,4),%eax - movl %eax,24(%rdi) - xorl 4(%rdi),%eax - movl %eax,28(%rdi) - xorl 8(%rdi),%eax - movl %eax,32(%rdi) - xorl 12(%rdi),%eax - movl %eax,36(%rdi) - - cmpl $7,%ecx - je .L12break - addl $1,%ecx - - xorl 16(%rdi),%eax - movl %eax,40(%rdi) - xorl 20(%rdi),%eax - movl %eax,44(%rdi) - - leaq 24(%rdi),%rdi - jmp .L12loop -.L12break: - movl $12,72(%rdi) - xorq %rax,%rax - jmp .Lexit - -.L14rounds: - movq 0(%rsi),%rax - movq 8(%rsi),%rbx - movq 16(%rsi),%rcx - movq 24(%rsi),%rdx - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,16(%rdi) - movq %rdx,24(%rdi) - - shrq $32,%rdx - xorl %ecx,%ecx - jmp .L14shortcut -.align 4 -.L14loop: - movl 0(%rdi),%eax - movl 28(%rdi),%edx -.L14shortcut: - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - - xorl 1024-128(%rbp,%rcx,4),%eax - movl %eax,32(%rdi) - xorl 4(%rdi),%eax - movl %eax,36(%rdi) - xorl 8(%rdi),%eax - movl %eax,40(%rdi) - xorl 12(%rdi),%eax - movl %eax,44(%rdi) - - cmpl $6,%ecx - je .L14break - addl $1,%ecx - - movl %eax,%edx - movl 16(%rdi),%eax - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - shll $8,%ebx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $16,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $24,%ebx - xorl %ebx,%eax - - movl %eax,48(%rdi) - xorl 20(%rdi),%eax - movl %eax,52(%rdi) - xorl 24(%rdi),%eax - movl %eax,56(%rdi) - xorl 28(%rdi),%eax - movl %eax,60(%rdi) - - leaq 32(%rdi),%rdi - jmp .L14loop -.L14break: - movl $14,48(%rdi) - xorq %rax,%rax - jmp .Lexit - -.Lbadpointer: - movq $-1,%rax -.Lexit: -.byte 0xf3,0xc3 -.cfi_endproc -.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key -.align 16 -.globl aes_nohw_set_decrypt_key -.hidden aes_nohw_set_decrypt_key -.type aes_nohw_set_decrypt_key,@function -aes_nohw_set_decrypt_key: -.cfi_startproc - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - pushq %rdx -.cfi_adjust_cfa_offset 8 -.Ldec_key_prologue: - - call _x86_64_AES_set_encrypt_key - movq (%rsp),%r8 - cmpl $0,%eax - jne .Labort - - movl 240(%r8),%r14d - xorq %rdi,%rdi - leaq (%rdi,%r14,4),%rcx - movq %r8,%rsi - leaq (%r8,%rcx,4),%rdi -.align 4 -.Linvert: - movq 0(%rsi),%rax - movq 8(%rsi),%rbx - movq 0(%rdi),%rcx - movq 8(%rdi),%rdx - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,0(%rsi) - movq %rdx,8(%rsi) - leaq 16(%rsi),%rsi - leaq -16(%rdi),%rdi - cmpq %rsi,%rdi - jne .Linvert - - leaq .LAES_Te+2048+1024(%rip),%rax - - movq 40(%rax),%rsi - movq 48(%rax),%rdi - movq 56(%rax),%rbp - - movq %r8,%r15 - subl $1,%r14d -.align 4 -.Lpermute: - leaq 16(%r15),%r15 - movq 0(%r15),%rax - movq 8(%r15),%rcx - movq %rsi,%r9 - movq %rsi,%r12 - andq %rax,%r9 - andq %rcx,%r12 - movq %r9,%rbx - movq %r12,%rdx - shrq $7,%r9 - leaq (%rax,%rax,1),%r8 - shrq $7,%r12 - leaq (%rcx,%rcx,1),%r11 - subq %r9,%rbx - subq %r12,%rdx - andq %rdi,%r8 - andq %rdi,%r11 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r8 - xorq %rdx,%r11 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r8,%r10 - andq %r11,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - leaq (%r8,%r8,1),%r9 - shrq $7,%r13 - leaq (%r11,%r11,1),%r12 - subq %r10,%rbx - subq %r13,%rdx - andq %rdi,%r9 - andq %rdi,%r12 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r9 - xorq %rdx,%r12 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r9,%r10 - andq %r12,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - xorq %rax,%r8 - shrq $7,%r13 - xorq %rcx,%r11 - subq %r10,%rbx - subq %r13,%rdx - leaq (%r9,%r9,1),%r10 - leaq (%r12,%r12,1),%r13 - xorq %rax,%r9 - xorq %rcx,%r12 - andq %rdi,%r10 - andq %rdi,%r13 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r10 - xorq %rdx,%r13 - - xorq %r10,%rax - xorq %r13,%rcx - xorq %r10,%r8 - xorq %r13,%r11 - movq %rax,%rbx - movq %rcx,%rdx - xorq %r10,%r9 - shrq $32,%rbx - xorq %r13,%r12 - shrq $32,%rdx - xorq %r8,%r10 - roll $8,%eax - xorq %r11,%r13 - roll $8,%ecx - xorq %r9,%r10 - roll $8,%ebx - xorq %r12,%r13 - - roll $8,%edx - xorl %r10d,%eax - shrq $32,%r10 - xorl %r13d,%ecx - shrq $32,%r13 - xorl %r10d,%ebx - xorl %r13d,%edx - - movq %r8,%r10 - roll $24,%r8d - movq %r11,%r13 - roll $24,%r11d - shrq $32,%r10 - xorl %r8d,%eax - shrq $32,%r13 - xorl %r11d,%ecx - roll $24,%r10d - movq %r9,%r8 - roll $24,%r13d - movq %r12,%r11 - shrq $32,%r8 - xorl %r10d,%ebx - shrq $32,%r11 - xorl %r13d,%edx - - - roll $16,%r9d - - roll $16,%r12d - - roll $16,%r8d - - xorl %r9d,%eax - roll $16,%r11d - xorl %r12d,%ecx - - xorl %r8d,%ebx - xorl %r11d,%edx - movl %eax,0(%r15) - movl %ebx,4(%r15) - movl %ecx,8(%r15) - movl %edx,12(%r15) - subl $1,%r14d - jnz .Lpermute - - xorq %rax,%rax -.Labort: - movq 8(%rsp),%r15 -.cfi_restore %r15 - movq 16(%rsp),%r14 -.cfi_restore %r14 - movq 24(%rsp),%r13 -.cfi_restore %r13 - movq 32(%rsp),%r12 -.cfi_restore %r12 - movq 40(%rsp),%rbp -.cfi_restore %rbp - movq 48(%rsp),%rbx -.cfi_restore %rbx - addq $56,%rsp -.cfi_adjust_cfa_offset -56 -.Ldec_key_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_nohw_set_decrypt_key,.-aes_nohw_set_decrypt_key -.align 16 -.globl aes_nohw_cbc_encrypt -.hidden aes_nohw_cbc_encrypt -.type aes_nohw_cbc_encrypt,@function -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P -.hidden aes_nohw_cbc_encrypt -aes_nohw_cbc_encrypt: -.cfi_startproc - cmpq $0,%rdx - je .Lcbc_epilogue - pushfq - - -.cfi_adjust_cfa_offset 8 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-32 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-40 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-48 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-56 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-64 -.Lcbc_prologue: - - cld - movl %r9d,%r9d - - leaq .LAES_Te(%rip),%r14 - leaq .LAES_Td(%rip),%r10 - cmpq $0,%r9 - cmoveq %r10,%r14 - -.cfi_remember_state - leaq OPENSSL_ia32cap_P(%rip),%r10 - movl (%r10),%r10d - cmpq $512,%rdx - jb .Lcbc_slow_prologue - testq $15,%rdx - jnz .Lcbc_slow_prologue - btl $28,%r10d - jc .Lcbc_slow_prologue - - - leaq -88-248(%rsp),%r15 - andq $-64,%r15 - - - movq %r14,%r10 - leaq 2304(%r14),%r11 - movq %r15,%r12 - andq $0xFFF,%r10 - andq $0xFFF,%r11 - andq $0xFFF,%r12 - - cmpq %r11,%r12 - jb .Lcbc_te_break_out - subq %r11,%r12 - subq %r12,%r15 - jmp .Lcbc_te_ok -.Lcbc_te_break_out: - subq %r10,%r12 - andq $0xFFF,%r12 - addq $320,%r12 - subq %r12,%r15 -.align 4 -.Lcbc_te_ok: - - xchgq %rsp,%r15 -.cfi_def_cfa_register %r15 - - movq %r15,16(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x40 -.Lcbc_fast_body: - movq %rdi,24(%rsp) - movq %rsi,32(%rsp) - movq %rdx,40(%rsp) - movq %rcx,48(%rsp) - movq %r8,56(%rsp) - movl $0,80+240(%rsp) - movq %r8,%rbp - movq %r9,%rbx - movq %rsi,%r9 - movq %rdi,%r8 - movq %rcx,%r15 - - movl 240(%r15),%eax - - movq %r15,%r10 - subq %r14,%r10 - andq $0xfff,%r10 - cmpq $2304,%r10 - jb .Lcbc_do_ecopy - cmpq $4096-248,%r10 - jb .Lcbc_skip_ecopy -.align 4 -.Lcbc_do_ecopy: - movq %r15,%rsi - leaq 80(%rsp),%rdi - leaq 80(%rsp),%r15 - movl $30,%ecx -.long 0x90A548F3 - movl %eax,(%rdi) -.Lcbc_skip_ecopy: - movq %r15,0(%rsp) - - movl $18,%ecx -.align 4 -.Lcbc_prefetch_te: - movq 0(%r14),%r10 - movq 32(%r14),%r11 - movq 64(%r14),%r12 - movq 96(%r14),%r13 - leaq 128(%r14),%r14 - subl $1,%ecx - jnz .Lcbc_prefetch_te - leaq -2304(%r14),%r14 - - cmpq $0,%rbx - je .LFAST_DECRYPT - - - movl 0(%rbp),%eax - movl 4(%rbp),%ebx - movl 8(%rbp),%ecx - movl 12(%rbp),%edx - -.align 4 -.Lcbc_fast_enc_loop: - xorl 0(%r8),%eax - xorl 4(%r8),%ebx - xorl 8(%r8),%ecx - xorl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - - call _x86_64_AES_encrypt - - movq 24(%rsp),%r8 - movq 40(%rsp),%r10 - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - subq $16,%r10 - testq $-16,%r10 - movq %r10,40(%rsp) - jnz .Lcbc_fast_enc_loop - movq 56(%rsp),%rbp - movl %eax,0(%rbp) - movl %ebx,4(%rbp) - movl %ecx,8(%rbp) - movl %edx,12(%rbp) - - jmp .Lcbc_fast_cleanup - - -.align 16 -.LFAST_DECRYPT: - cmpq %r8,%r9 - je .Lcbc_fast_dec_in_place - - movq %rbp,64(%rsp) -.align 4 -.Lcbc_fast_dec_loop: - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - - call _x86_64_AES_decrypt - - movq 64(%rsp),%rbp - movq 24(%rsp),%r8 - movq 40(%rsp),%r10 - xorl 0(%rbp),%eax - xorl 4(%rbp),%ebx - xorl 8(%rbp),%ecx - xorl 12(%rbp),%edx - movq %r8,%rbp - - subq $16,%r10 - movq %r10,40(%rsp) - movq %rbp,64(%rsp) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - jnz .Lcbc_fast_dec_loop - movq 56(%rsp),%r12 - movq 0(%rbp),%r10 - movq 8(%rbp),%r11 - movq %r10,0(%r12) - movq %r11,8(%r12) - jmp .Lcbc_fast_cleanup - -.align 16 -.Lcbc_fast_dec_in_place: - movq 0(%rbp),%r10 - movq 8(%rbp),%r11 - movq %r10,0+64(%rsp) - movq %r11,8+64(%rsp) -.align 4 -.Lcbc_fast_dec_in_place_loop: - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - - call _x86_64_AES_decrypt - - movq 24(%rsp),%r8 - movq 40(%rsp),%r10 - xorl 0+64(%rsp),%eax - xorl 4+64(%rsp),%ebx - xorl 8+64(%rsp),%ecx - xorl 12+64(%rsp),%edx - - movq 0(%r8),%r11 - movq 8(%r8),%r12 - subq $16,%r10 - jz .Lcbc_fast_dec_in_place_done - - movq %r11,0+64(%rsp) - movq %r12,8+64(%rsp) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - movq %r10,40(%rsp) - jmp .Lcbc_fast_dec_in_place_loop -.Lcbc_fast_dec_in_place_done: - movq 56(%rsp),%rdi - movq %r11,0(%rdi) - movq %r12,8(%rdi) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - -.align 4 -.Lcbc_fast_cleanup: - cmpl $0,80+240(%rsp) - leaq 80(%rsp),%rdi - je .Lcbc_exit - movl $30,%ecx - xorq %rax,%rax -.long 0x90AB48F3 - - jmp .Lcbc_exit - - -.align 16 -.Lcbc_slow_prologue: -.cfi_restore_state - - leaq -88(%rsp),%rbp - andq $-64,%rbp - - leaq -88-63(%rcx),%r10 - subq %rbp,%r10 - negq %r10 - andq $0x3c0,%r10 - subq %r10,%rbp - - xchgq %rsp,%rbp -.cfi_def_cfa_register %rbp - - movq %rbp,16(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x40 -.Lcbc_slow_body: - - - - - movq %r8,56(%rsp) - movq %r8,%rbp - movq %r9,%rbx - movq %rsi,%r9 - movq %rdi,%r8 - movq %rcx,%r15 - movq %rdx,%r10 - - movl 240(%r15),%eax - movq %r15,0(%rsp) - shll $4,%eax - leaq (%r15,%rax,1),%rax - movq %rax,8(%rsp) - - - leaq 2048(%r14),%r14 - leaq 768-8(%rsp),%rax - subq %r14,%rax - andq $0x300,%rax - leaq (%r14,%rax,1),%r14 - - cmpq $0,%rbx - je .LSLOW_DECRYPT - - - testq $-16,%r10 - movl 0(%rbp),%eax - movl 4(%rbp),%ebx - movl 8(%rbp),%ecx - movl 12(%rbp),%edx - jz .Lcbc_slow_enc_tail - -.align 4 -.Lcbc_slow_enc_loop: - xorl 0(%r8),%eax - xorl 4(%r8),%ebx - xorl 8(%r8),%ecx - xorl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - movq %r9,32(%rsp) - movq %r10,40(%rsp) - - call _x86_64_AES_encrypt_compact - - movq 24(%rsp),%r8 - movq 32(%rsp),%r9 - movq 40(%rsp),%r10 - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - subq $16,%r10 - testq $-16,%r10 - jnz .Lcbc_slow_enc_loop - testq $15,%r10 - jnz .Lcbc_slow_enc_tail - movq 56(%rsp),%rbp - movl %eax,0(%rbp) - movl %ebx,4(%rbp) - movl %ecx,8(%rbp) - movl %edx,12(%rbp) - - jmp .Lcbc_exit - -.align 4 -.Lcbc_slow_enc_tail: - movq %rax,%r11 - movq %rcx,%r12 - movq %r10,%rcx - movq %r8,%rsi - movq %r9,%rdi -.long 0x9066A4F3 - movq $16,%rcx - subq %r10,%rcx - xorq %rax,%rax -.long 0x9066AAF3 - movq %r9,%r8 - movq $16,%r10 - movq %r11,%rax - movq %r12,%rcx - jmp .Lcbc_slow_enc_loop - -.align 16 -.LSLOW_DECRYPT: - shrq $3,%rax - addq %rax,%r14 - - movq 0(%rbp),%r11 - movq 8(%rbp),%r12 - movq %r11,0+64(%rsp) - movq %r12,8+64(%rsp) - -.align 4 -.Lcbc_slow_dec_loop: - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - movq %r9,32(%rsp) - movq %r10,40(%rsp) - - call _x86_64_AES_decrypt_compact - - movq 24(%rsp),%r8 - movq 32(%rsp),%r9 - movq 40(%rsp),%r10 - xorl 0+64(%rsp),%eax - xorl 4+64(%rsp),%ebx - xorl 8+64(%rsp),%ecx - xorl 12+64(%rsp),%edx - - movq 0(%r8),%r11 - movq 8(%r8),%r12 - subq $16,%r10 - jc .Lcbc_slow_dec_partial - jz .Lcbc_slow_dec_done - - movq %r11,0+64(%rsp) - movq %r12,8+64(%rsp) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - jmp .Lcbc_slow_dec_loop -.Lcbc_slow_dec_done: - movq 56(%rsp),%rdi - movq %r11,0(%rdi) - movq %r12,8(%rdi) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - jmp .Lcbc_exit - -.align 4 -.Lcbc_slow_dec_partial: - movq 56(%rsp),%rdi - movq %r11,0(%rdi) - movq %r12,8(%rdi) - - movl %eax,0+64(%rsp) - movl %ebx,4+64(%rsp) - movl %ecx,8+64(%rsp) - movl %edx,12+64(%rsp) - - movq %r9,%rdi - leaq 64(%rsp),%rsi - leaq 16(%r10),%rcx -.long 0x9066A4F3 - jmp .Lcbc_exit - -.align 16 -.Lcbc_exit: - movq 16(%rsp),%rsi -.cfi_def_cfa %rsi,64 - movq (%rsi),%r15 -.cfi_restore %r15 - movq 8(%rsi),%r14 -.cfi_restore %r14 - movq 16(%rsi),%r13 -.cfi_restore %r13 - movq 24(%rsi),%r12 -.cfi_restore %r12 - movq 32(%rsi),%rbp -.cfi_restore %rbp - movq 40(%rsi),%rbx -.cfi_restore %rbx - leaq 48(%rsi),%rsp -.cfi_def_cfa %rsp,16 -.Lcbc_popfq: - popfq - - -.cfi_adjust_cfa_offset -8 -.Lcbc_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_nohw_cbc_encrypt,.-aes_nohw_cbc_encrypt -.align 64 -.LAES_Te: -.long 0xa56363c6,0xa56363c6 -.long 0x847c7cf8,0x847c7cf8 -.long 0x997777ee,0x997777ee -.long 0x8d7b7bf6,0x8d7b7bf6 -.long 0x0df2f2ff,0x0df2f2ff -.long 0xbd6b6bd6,0xbd6b6bd6 -.long 0xb16f6fde,0xb16f6fde -.long 0x54c5c591,0x54c5c591 -.long 0x50303060,0x50303060 -.long 0x03010102,0x03010102 -.long 0xa96767ce,0xa96767ce -.long 0x7d2b2b56,0x7d2b2b56 -.long 0x19fefee7,0x19fefee7 -.long 0x62d7d7b5,0x62d7d7b5 -.long 0xe6abab4d,0xe6abab4d -.long 0x9a7676ec,0x9a7676ec -.long 0x45caca8f,0x45caca8f -.long 0x9d82821f,0x9d82821f -.long 0x40c9c989,0x40c9c989 -.long 0x877d7dfa,0x877d7dfa -.long 0x15fafaef,0x15fafaef -.long 0xeb5959b2,0xeb5959b2 -.long 0xc947478e,0xc947478e -.long 0x0bf0f0fb,0x0bf0f0fb -.long 0xecadad41,0xecadad41 -.long 0x67d4d4b3,0x67d4d4b3 -.long 0xfda2a25f,0xfda2a25f -.long 0xeaafaf45,0xeaafaf45 -.long 0xbf9c9c23,0xbf9c9c23 -.long 0xf7a4a453,0xf7a4a453 -.long 0x967272e4,0x967272e4 -.long 0x5bc0c09b,0x5bc0c09b -.long 0xc2b7b775,0xc2b7b775 -.long 0x1cfdfde1,0x1cfdfde1 -.long 0xae93933d,0xae93933d -.long 0x6a26264c,0x6a26264c -.long 0x5a36366c,0x5a36366c -.long 0x413f3f7e,0x413f3f7e -.long 0x02f7f7f5,0x02f7f7f5 -.long 0x4fcccc83,0x4fcccc83 -.long 0x5c343468,0x5c343468 -.long 0xf4a5a551,0xf4a5a551 -.long 0x34e5e5d1,0x34e5e5d1 -.long 0x08f1f1f9,0x08f1f1f9 -.long 0x937171e2,0x937171e2 -.long 0x73d8d8ab,0x73d8d8ab -.long 0x53313162,0x53313162 -.long 0x3f15152a,0x3f15152a -.long 0x0c040408,0x0c040408 -.long 0x52c7c795,0x52c7c795 -.long 0x65232346,0x65232346 -.long 0x5ec3c39d,0x5ec3c39d -.long 0x28181830,0x28181830 -.long 0xa1969637,0xa1969637 -.long 0x0f05050a,0x0f05050a -.long 0xb59a9a2f,0xb59a9a2f -.long 0x0907070e,0x0907070e -.long 0x36121224,0x36121224 -.long 0x9b80801b,0x9b80801b -.long 0x3de2e2df,0x3de2e2df -.long 0x26ebebcd,0x26ebebcd -.long 0x6927274e,0x6927274e -.long 0xcdb2b27f,0xcdb2b27f -.long 0x9f7575ea,0x9f7575ea -.long 0x1b090912,0x1b090912 -.long 0x9e83831d,0x9e83831d -.long 0x742c2c58,0x742c2c58 -.long 0x2e1a1a34,0x2e1a1a34 -.long 0x2d1b1b36,0x2d1b1b36 -.long 0xb26e6edc,0xb26e6edc -.long 0xee5a5ab4,0xee5a5ab4 -.long 0xfba0a05b,0xfba0a05b -.long 0xf65252a4,0xf65252a4 -.long 0x4d3b3b76,0x4d3b3b76 -.long 0x61d6d6b7,0x61d6d6b7 -.long 0xceb3b37d,0xceb3b37d -.long 0x7b292952,0x7b292952 -.long 0x3ee3e3dd,0x3ee3e3dd -.long 0x712f2f5e,0x712f2f5e -.long 0x97848413,0x97848413 -.long 0xf55353a6,0xf55353a6 -.long 0x68d1d1b9,0x68d1d1b9 -.long 0x00000000,0x00000000 -.long 0x2cededc1,0x2cededc1 -.long 0x60202040,0x60202040 -.long 0x1ffcfce3,0x1ffcfce3 -.long 0xc8b1b179,0xc8b1b179 -.long 0xed5b5bb6,0xed5b5bb6 -.long 0xbe6a6ad4,0xbe6a6ad4 -.long 0x46cbcb8d,0x46cbcb8d -.long 0xd9bebe67,0xd9bebe67 -.long 0x4b393972,0x4b393972 -.long 0xde4a4a94,0xde4a4a94 -.long 0xd44c4c98,0xd44c4c98 -.long 0xe85858b0,0xe85858b0 -.long 0x4acfcf85,0x4acfcf85 -.long 0x6bd0d0bb,0x6bd0d0bb -.long 0x2aefefc5,0x2aefefc5 -.long 0xe5aaaa4f,0xe5aaaa4f -.long 0x16fbfbed,0x16fbfbed -.long 0xc5434386,0xc5434386 -.long 0xd74d4d9a,0xd74d4d9a -.long 0x55333366,0x55333366 -.long 0x94858511,0x94858511 -.long 0xcf45458a,0xcf45458a -.long 0x10f9f9e9,0x10f9f9e9 -.long 0x06020204,0x06020204 -.long 0x817f7ffe,0x817f7ffe -.long 0xf05050a0,0xf05050a0 -.long 0x443c3c78,0x443c3c78 -.long 0xba9f9f25,0xba9f9f25 -.long 0xe3a8a84b,0xe3a8a84b -.long 0xf35151a2,0xf35151a2 -.long 0xfea3a35d,0xfea3a35d -.long 0xc0404080,0xc0404080 -.long 0x8a8f8f05,0x8a8f8f05 -.long 0xad92923f,0xad92923f -.long 0xbc9d9d21,0xbc9d9d21 -.long 0x48383870,0x48383870 -.long 0x04f5f5f1,0x04f5f5f1 -.long 0xdfbcbc63,0xdfbcbc63 -.long 0xc1b6b677,0xc1b6b677 -.long 0x75dadaaf,0x75dadaaf -.long 0x63212142,0x63212142 -.long 0x30101020,0x30101020 -.long 0x1affffe5,0x1affffe5 -.long 0x0ef3f3fd,0x0ef3f3fd -.long 0x6dd2d2bf,0x6dd2d2bf -.long 0x4ccdcd81,0x4ccdcd81 -.long 0x140c0c18,0x140c0c18 -.long 0x35131326,0x35131326 -.long 0x2fececc3,0x2fececc3 -.long 0xe15f5fbe,0xe15f5fbe -.long 0xa2979735,0xa2979735 -.long 0xcc444488,0xcc444488 -.long 0x3917172e,0x3917172e -.long 0x57c4c493,0x57c4c493 -.long 0xf2a7a755,0xf2a7a755 -.long 0x827e7efc,0x827e7efc -.long 0x473d3d7a,0x473d3d7a -.long 0xac6464c8,0xac6464c8 -.long 0xe75d5dba,0xe75d5dba -.long 0x2b191932,0x2b191932 -.long 0x957373e6,0x957373e6 -.long 0xa06060c0,0xa06060c0 -.long 0x98818119,0x98818119 -.long 0xd14f4f9e,0xd14f4f9e -.long 0x7fdcdca3,0x7fdcdca3 -.long 0x66222244,0x66222244 -.long 0x7e2a2a54,0x7e2a2a54 -.long 0xab90903b,0xab90903b -.long 0x8388880b,0x8388880b -.long 0xca46468c,0xca46468c -.long 0x29eeeec7,0x29eeeec7 -.long 0xd3b8b86b,0xd3b8b86b -.long 0x3c141428,0x3c141428 -.long 0x79dedea7,0x79dedea7 -.long 0xe25e5ebc,0xe25e5ebc -.long 0x1d0b0b16,0x1d0b0b16 -.long 0x76dbdbad,0x76dbdbad -.long 0x3be0e0db,0x3be0e0db -.long 0x56323264,0x56323264 -.long 0x4e3a3a74,0x4e3a3a74 -.long 0x1e0a0a14,0x1e0a0a14 -.long 0xdb494992,0xdb494992 -.long 0x0a06060c,0x0a06060c -.long 0x6c242448,0x6c242448 -.long 0xe45c5cb8,0xe45c5cb8 -.long 0x5dc2c29f,0x5dc2c29f -.long 0x6ed3d3bd,0x6ed3d3bd -.long 0xefacac43,0xefacac43 -.long 0xa66262c4,0xa66262c4 -.long 0xa8919139,0xa8919139 -.long 0xa4959531,0xa4959531 -.long 0x37e4e4d3,0x37e4e4d3 -.long 0x8b7979f2,0x8b7979f2 -.long 0x32e7e7d5,0x32e7e7d5 -.long 0x43c8c88b,0x43c8c88b -.long 0x5937376e,0x5937376e -.long 0xb76d6dda,0xb76d6dda -.long 0x8c8d8d01,0x8c8d8d01 -.long 0x64d5d5b1,0x64d5d5b1 -.long 0xd24e4e9c,0xd24e4e9c -.long 0xe0a9a949,0xe0a9a949 -.long 0xb46c6cd8,0xb46c6cd8 -.long 0xfa5656ac,0xfa5656ac -.long 0x07f4f4f3,0x07f4f4f3 -.long 0x25eaeacf,0x25eaeacf -.long 0xaf6565ca,0xaf6565ca -.long 0x8e7a7af4,0x8e7a7af4 -.long 0xe9aeae47,0xe9aeae47 -.long 0x18080810,0x18080810 -.long 0xd5baba6f,0xd5baba6f -.long 0x887878f0,0x887878f0 -.long 0x6f25254a,0x6f25254a -.long 0x722e2e5c,0x722e2e5c -.long 0x241c1c38,0x241c1c38 -.long 0xf1a6a657,0xf1a6a657 -.long 0xc7b4b473,0xc7b4b473 -.long 0x51c6c697,0x51c6c697 -.long 0x23e8e8cb,0x23e8e8cb -.long 0x7cdddda1,0x7cdddda1 -.long 0x9c7474e8,0x9c7474e8 -.long 0x211f1f3e,0x211f1f3e -.long 0xdd4b4b96,0xdd4b4b96 -.long 0xdcbdbd61,0xdcbdbd61 -.long 0x868b8b0d,0x868b8b0d -.long 0x858a8a0f,0x858a8a0f -.long 0x907070e0,0x907070e0 -.long 0x423e3e7c,0x423e3e7c -.long 0xc4b5b571,0xc4b5b571 -.long 0xaa6666cc,0xaa6666cc -.long 0xd8484890,0xd8484890 -.long 0x05030306,0x05030306 -.long 0x01f6f6f7,0x01f6f6f7 -.long 0x120e0e1c,0x120e0e1c -.long 0xa36161c2,0xa36161c2 -.long 0x5f35356a,0x5f35356a -.long 0xf95757ae,0xf95757ae -.long 0xd0b9b969,0xd0b9b969 -.long 0x91868617,0x91868617 -.long 0x58c1c199,0x58c1c199 -.long 0x271d1d3a,0x271d1d3a -.long 0xb99e9e27,0xb99e9e27 -.long 0x38e1e1d9,0x38e1e1d9 -.long 0x13f8f8eb,0x13f8f8eb -.long 0xb398982b,0xb398982b -.long 0x33111122,0x33111122 -.long 0xbb6969d2,0xbb6969d2 -.long 0x70d9d9a9,0x70d9d9a9 -.long 0x898e8e07,0x898e8e07 -.long 0xa7949433,0xa7949433 -.long 0xb69b9b2d,0xb69b9b2d -.long 0x221e1e3c,0x221e1e3c -.long 0x92878715,0x92878715 -.long 0x20e9e9c9,0x20e9e9c9 -.long 0x49cece87,0x49cece87 -.long 0xff5555aa,0xff5555aa -.long 0x78282850,0x78282850 -.long 0x7adfdfa5,0x7adfdfa5 -.long 0x8f8c8c03,0x8f8c8c03 -.long 0xf8a1a159,0xf8a1a159 -.long 0x80898909,0x80898909 -.long 0x170d0d1a,0x170d0d1a -.long 0xdabfbf65,0xdabfbf65 -.long 0x31e6e6d7,0x31e6e6d7 -.long 0xc6424284,0xc6424284 -.long 0xb86868d0,0xb86868d0 -.long 0xc3414182,0xc3414182 -.long 0xb0999929,0xb0999929 -.long 0x772d2d5a,0x772d2d5a -.long 0x110f0f1e,0x110f0f1e -.long 0xcbb0b07b,0xcbb0b07b -.long 0xfc5454a8,0xfc5454a8 -.long 0xd6bbbb6d,0xd6bbbb6d -.long 0x3a16162c,0x3a16162c -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.long 0x00000001, 0x00000002, 0x00000004, 0x00000008 -.long 0x00000010, 0x00000020, 0x00000040, 0x00000080 -.long 0x0000001b, 0x00000036, 0x80808080, 0x80808080 -.long 0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b -.align 64 -.LAES_Td: -.long 0x50a7f451,0x50a7f451 -.long 0x5365417e,0x5365417e -.long 0xc3a4171a,0xc3a4171a -.long 0x965e273a,0x965e273a -.long 0xcb6bab3b,0xcb6bab3b -.long 0xf1459d1f,0xf1459d1f -.long 0xab58faac,0xab58faac -.long 0x9303e34b,0x9303e34b -.long 0x55fa3020,0x55fa3020 -.long 0xf66d76ad,0xf66d76ad -.long 0x9176cc88,0x9176cc88 -.long 0x254c02f5,0x254c02f5 -.long 0xfcd7e54f,0xfcd7e54f -.long 0xd7cb2ac5,0xd7cb2ac5 -.long 0x80443526,0x80443526 -.long 0x8fa362b5,0x8fa362b5 -.long 0x495ab1de,0x495ab1de -.long 0x671bba25,0x671bba25 -.long 0x980eea45,0x980eea45 -.long 0xe1c0fe5d,0xe1c0fe5d -.long 0x02752fc3,0x02752fc3 -.long 0x12f04c81,0x12f04c81 -.long 0xa397468d,0xa397468d -.long 0xc6f9d36b,0xc6f9d36b -.long 0xe75f8f03,0xe75f8f03 -.long 0x959c9215,0x959c9215 -.long 0xeb7a6dbf,0xeb7a6dbf -.long 0xda595295,0xda595295 -.long 0x2d83bed4,0x2d83bed4 -.long 0xd3217458,0xd3217458 -.long 0x2969e049,0x2969e049 -.long 0x44c8c98e,0x44c8c98e -.long 0x6a89c275,0x6a89c275 -.long 0x78798ef4,0x78798ef4 -.long 0x6b3e5899,0x6b3e5899 -.long 0xdd71b927,0xdd71b927 -.long 0xb64fe1be,0xb64fe1be -.long 0x17ad88f0,0x17ad88f0 -.long 0x66ac20c9,0x66ac20c9 -.long 0xb43ace7d,0xb43ace7d -.long 0x184adf63,0x184adf63 -.long 0x82311ae5,0x82311ae5 -.long 0x60335197,0x60335197 -.long 0x457f5362,0x457f5362 -.long 0xe07764b1,0xe07764b1 -.long 0x84ae6bbb,0x84ae6bbb -.long 0x1ca081fe,0x1ca081fe -.long 0x942b08f9,0x942b08f9 -.long 0x58684870,0x58684870 -.long 0x19fd458f,0x19fd458f -.long 0x876cde94,0x876cde94 -.long 0xb7f87b52,0xb7f87b52 -.long 0x23d373ab,0x23d373ab -.long 0xe2024b72,0xe2024b72 -.long 0x578f1fe3,0x578f1fe3 -.long 0x2aab5566,0x2aab5566 -.long 0x0728ebb2,0x0728ebb2 -.long 0x03c2b52f,0x03c2b52f -.long 0x9a7bc586,0x9a7bc586 -.long 0xa50837d3,0xa50837d3 -.long 0xf2872830,0xf2872830 -.long 0xb2a5bf23,0xb2a5bf23 -.long 0xba6a0302,0xba6a0302 -.long 0x5c8216ed,0x5c8216ed -.long 0x2b1ccf8a,0x2b1ccf8a -.long 0x92b479a7,0x92b479a7 -.long 0xf0f207f3,0xf0f207f3 -.long 0xa1e2694e,0xa1e2694e -.long 0xcdf4da65,0xcdf4da65 -.long 0xd5be0506,0xd5be0506 -.long 0x1f6234d1,0x1f6234d1 -.long 0x8afea6c4,0x8afea6c4 -.long 0x9d532e34,0x9d532e34 -.long 0xa055f3a2,0xa055f3a2 -.long 0x32e18a05,0x32e18a05 -.long 0x75ebf6a4,0x75ebf6a4 -.long 0x39ec830b,0x39ec830b -.long 0xaaef6040,0xaaef6040 -.long 0x069f715e,0x069f715e -.long 0x51106ebd,0x51106ebd -.long 0xf98a213e,0xf98a213e -.long 0x3d06dd96,0x3d06dd96 -.long 0xae053edd,0xae053edd -.long 0x46bde64d,0x46bde64d -.long 0xb58d5491,0xb58d5491 -.long 0x055dc471,0x055dc471 -.long 0x6fd40604,0x6fd40604 -.long 0xff155060,0xff155060 -.long 0x24fb9819,0x24fb9819 -.long 0x97e9bdd6,0x97e9bdd6 -.long 0xcc434089,0xcc434089 -.long 0x779ed967,0x779ed967 -.long 0xbd42e8b0,0xbd42e8b0 -.long 0x888b8907,0x888b8907 -.long 0x385b19e7,0x385b19e7 -.long 0xdbeec879,0xdbeec879 -.long 0x470a7ca1,0x470a7ca1 -.long 0xe90f427c,0xe90f427c -.long 0xc91e84f8,0xc91e84f8 -.long 0x00000000,0x00000000 -.long 0x83868009,0x83868009 -.long 0x48ed2b32,0x48ed2b32 -.long 0xac70111e,0xac70111e -.long 0x4e725a6c,0x4e725a6c -.long 0xfbff0efd,0xfbff0efd -.long 0x5638850f,0x5638850f -.long 0x1ed5ae3d,0x1ed5ae3d -.long 0x27392d36,0x27392d36 -.long 0x64d90f0a,0x64d90f0a -.long 0x21a65c68,0x21a65c68 -.long 0xd1545b9b,0xd1545b9b -.long 0x3a2e3624,0x3a2e3624 -.long 0xb1670a0c,0xb1670a0c -.long 0x0fe75793,0x0fe75793 -.long 0xd296eeb4,0xd296eeb4 -.long 0x9e919b1b,0x9e919b1b -.long 0x4fc5c080,0x4fc5c080 -.long 0xa220dc61,0xa220dc61 -.long 0x694b775a,0x694b775a -.long 0x161a121c,0x161a121c -.long 0x0aba93e2,0x0aba93e2 -.long 0xe52aa0c0,0xe52aa0c0 -.long 0x43e0223c,0x43e0223c -.long 0x1d171b12,0x1d171b12 -.long 0x0b0d090e,0x0b0d090e -.long 0xadc78bf2,0xadc78bf2 -.long 0xb9a8b62d,0xb9a8b62d -.long 0xc8a91e14,0xc8a91e14 -.long 0x8519f157,0x8519f157 -.long 0x4c0775af,0x4c0775af -.long 0xbbdd99ee,0xbbdd99ee -.long 0xfd607fa3,0xfd607fa3 -.long 0x9f2601f7,0x9f2601f7 -.long 0xbcf5725c,0xbcf5725c -.long 0xc53b6644,0xc53b6644 -.long 0x347efb5b,0x347efb5b -.long 0x7629438b,0x7629438b -.long 0xdcc623cb,0xdcc623cb -.long 0x68fcedb6,0x68fcedb6 -.long 0x63f1e4b8,0x63f1e4b8 -.long 0xcadc31d7,0xcadc31d7 -.long 0x10856342,0x10856342 -.long 0x40229713,0x40229713 -.long 0x2011c684,0x2011c684 -.long 0x7d244a85,0x7d244a85 -.long 0xf83dbbd2,0xf83dbbd2 -.long 0x1132f9ae,0x1132f9ae -.long 0x6da129c7,0x6da129c7 -.long 0x4b2f9e1d,0x4b2f9e1d -.long 0xf330b2dc,0xf330b2dc -.long 0xec52860d,0xec52860d -.long 0xd0e3c177,0xd0e3c177 -.long 0x6c16b32b,0x6c16b32b -.long 0x99b970a9,0x99b970a9 -.long 0xfa489411,0xfa489411 -.long 0x2264e947,0x2264e947 -.long 0xc48cfca8,0xc48cfca8 -.long 0x1a3ff0a0,0x1a3ff0a0 -.long 0xd82c7d56,0xd82c7d56 -.long 0xef903322,0xef903322 -.long 0xc74e4987,0xc74e4987 -.long 0xc1d138d9,0xc1d138d9 -.long 0xfea2ca8c,0xfea2ca8c -.long 0x360bd498,0x360bd498 -.long 0xcf81f5a6,0xcf81f5a6 -.long 0x28de7aa5,0x28de7aa5 -.long 0x268eb7da,0x268eb7da -.long 0xa4bfad3f,0xa4bfad3f -.long 0xe49d3a2c,0xe49d3a2c -.long 0x0d927850,0x0d927850 -.long 0x9bcc5f6a,0x9bcc5f6a -.long 0x62467e54,0x62467e54 -.long 0xc2138df6,0xc2138df6 -.long 0xe8b8d890,0xe8b8d890 -.long 0x5ef7392e,0x5ef7392e -.long 0xf5afc382,0xf5afc382 -.long 0xbe805d9f,0xbe805d9f -.long 0x7c93d069,0x7c93d069 -.long 0xa92dd56f,0xa92dd56f -.long 0xb31225cf,0xb31225cf -.long 0x3b99acc8,0x3b99acc8 -.long 0xa77d1810,0xa77d1810 -.long 0x6e639ce8,0x6e639ce8 -.long 0x7bbb3bdb,0x7bbb3bdb -.long 0x097826cd,0x097826cd -.long 0xf418596e,0xf418596e -.long 0x01b79aec,0x01b79aec -.long 0xa89a4f83,0xa89a4f83 -.long 0x656e95e6,0x656e95e6 -.long 0x7ee6ffaa,0x7ee6ffaa -.long 0x08cfbc21,0x08cfbc21 -.long 0xe6e815ef,0xe6e815ef -.long 0xd99be7ba,0xd99be7ba -.long 0xce366f4a,0xce366f4a -.long 0xd4099fea,0xd4099fea -.long 0xd67cb029,0xd67cb029 -.long 0xafb2a431,0xafb2a431 -.long 0x31233f2a,0x31233f2a -.long 0x3094a5c6,0x3094a5c6 -.long 0xc066a235,0xc066a235 -.long 0x37bc4e74,0x37bc4e74 -.long 0xa6ca82fc,0xa6ca82fc -.long 0xb0d090e0,0xb0d090e0 -.long 0x15d8a733,0x15d8a733 -.long 0x4a9804f1,0x4a9804f1 -.long 0xf7daec41,0xf7daec41 -.long 0x0e50cd7f,0x0e50cd7f -.long 0x2ff69117,0x2ff69117 -.long 0x8dd64d76,0x8dd64d76 -.long 0x4db0ef43,0x4db0ef43 -.long 0x544daacc,0x544daacc -.long 0xdf0496e4,0xdf0496e4 -.long 0xe3b5d19e,0xe3b5d19e -.long 0x1b886a4c,0x1b886a4c -.long 0xb81f2cc1,0xb81f2cc1 -.long 0x7f516546,0x7f516546 -.long 0x04ea5e9d,0x04ea5e9d -.long 0x5d358c01,0x5d358c01 -.long 0x737487fa,0x737487fa -.long 0x2e410bfb,0x2e410bfb -.long 0x5a1d67b3,0x5a1d67b3 -.long 0x52d2db92,0x52d2db92 -.long 0x335610e9,0x335610e9 -.long 0x1347d66d,0x1347d66d -.long 0x8c61d79a,0x8c61d79a -.long 0x7a0ca137,0x7a0ca137 -.long 0x8e14f859,0x8e14f859 -.long 0x893c13eb,0x893c13eb -.long 0xee27a9ce,0xee27a9ce -.long 0x35c961b7,0x35c961b7 -.long 0xede51ce1,0xede51ce1 -.long 0x3cb1477a,0x3cb1477a -.long 0x59dfd29c,0x59dfd29c -.long 0x3f73f255,0x3f73f255 -.long 0x79ce1418,0x79ce1418 -.long 0xbf37c773,0xbf37c773 -.long 0xeacdf753,0xeacdf753 -.long 0x5baafd5f,0x5baafd5f -.long 0x146f3ddf,0x146f3ddf -.long 0x86db4478,0x86db4478 -.long 0x81f3afca,0x81f3afca -.long 0x3ec468b9,0x3ec468b9 -.long 0x2c342438,0x2c342438 -.long 0x5f40a3c2,0x5f40a3c2 -.long 0x72c31d16,0x72c31d16 -.long 0x0c25e2bc,0x0c25e2bc -.long 0x8b493c28,0x8b493c28 -.long 0x41950dff,0x41950dff -.long 0x7101a839,0x7101a839 -.long 0xdeb30c08,0xdeb30c08 -.long 0x9ce4b4d8,0x9ce4b4d8 -.long 0x90c15664,0x90c15664 -.long 0x6184cb7b,0x6184cb7b -.long 0x70b632d5,0x70b632d5 -.long 0x745c6c48,0x745c6c48 -.long 0x4257b8d0,0x4257b8d0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 65,69,83,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S deleted file mode 100644 index 65ab5c78fe..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S +++ /dev/null @@ -1,852 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.type _aesni_ctr32_ghash_6x,@function -.align 32 -_aesni_ctr32_ghash_6x: -.cfi_startproc - vmovdqu 32(%r11),%xmm2 - subq $6,%rdx - vpxor %xmm4,%xmm4,%xmm4 - vmovdqu 0-128(%rcx),%xmm15 - vpaddb %xmm2,%xmm1,%xmm10 - vpaddb %xmm2,%xmm10,%xmm11 - vpaddb %xmm2,%xmm11,%xmm12 - vpaddb %xmm2,%xmm12,%xmm13 - vpaddb %xmm2,%xmm13,%xmm14 - vpxor %xmm15,%xmm1,%xmm9 - vmovdqu %xmm4,16+8(%rsp) - jmp .Loop6x - -.align 32 -.Loop6x: - addl $100663296,%ebx - jc .Lhandle_ctr32 - vmovdqu 0-32(%r9),%xmm3 - vpaddb %xmm2,%xmm14,%xmm1 - vpxor %xmm15,%xmm10,%xmm10 - vpxor %xmm15,%xmm11,%xmm11 - -.Lresume_ctr32: - vmovdqu %xmm1,(%r8) - vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 - vpxor %xmm15,%xmm12,%xmm12 - vmovups 16-128(%rcx),%xmm2 - vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 - - - - - - - - - - - - - - - - - - xorq %r12,%r12 - cmpq %r14,%r15 - - vaesenc %xmm2,%xmm9,%xmm9 - vmovdqu 48+8(%rsp),%xmm0 - vpxor %xmm15,%xmm13,%xmm13 - vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 - vaesenc %xmm2,%xmm10,%xmm10 - vpxor %xmm15,%xmm14,%xmm14 - setnc %r12b - vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vmovdqu 16-32(%r9),%xmm3 - negq %r12 - vaesenc %xmm2,%xmm12,%xmm12 - vpxor %xmm5,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 - vpxor %xmm4,%xmm8,%xmm8 - vaesenc %xmm2,%xmm13,%xmm13 - vpxor %xmm5,%xmm1,%xmm4 - andq $0x60,%r12 - vmovups 32-128(%rcx),%xmm15 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 - vaesenc %xmm2,%xmm14,%xmm14 - - vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 - leaq (%r14,%r12,1),%r14 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor 16+8(%rsp),%xmm8,%xmm8 - vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 - vmovdqu 64+8(%rsp),%xmm0 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 88(%r14),%r13 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 80(%r14),%r12 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,32+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,40+8(%rsp) - vmovdqu 48-32(%r9),%xmm5 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 48-128(%rcx),%xmm15 - vpxor %xmm1,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm2,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 - vaesenc %xmm15,%xmm10,%xmm10 - vpxor %xmm3,%xmm7,%xmm7 - vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 - vaesenc %xmm15,%xmm11,%xmm11 - vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 - vmovdqu 80+8(%rsp),%xmm0 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vpxor %xmm1,%xmm4,%xmm4 - vmovdqu 64-32(%r9),%xmm1 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 64-128(%rcx),%xmm15 - vpxor %xmm2,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm3,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 72(%r14),%r13 - vpxor %xmm5,%xmm7,%xmm7 - vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 64(%r14),%r12 - vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 - vmovdqu 96+8(%rsp),%xmm0 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,48+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,56+8(%rsp) - vpxor %xmm2,%xmm4,%xmm4 - vmovdqu 96-32(%r9),%xmm2 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 80-128(%rcx),%xmm15 - vpxor %xmm3,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm5,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 56(%r14),%r13 - vpxor %xmm1,%xmm7,%xmm7 - vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 - vpxor 112+8(%rsp),%xmm8,%xmm8 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 48(%r14),%r12 - vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,64+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,72+8(%rsp) - vpxor %xmm3,%xmm4,%xmm4 - vmovdqu 112-32(%r9),%xmm3 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 96-128(%rcx),%xmm15 - vpxor %xmm5,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm1,%xmm6,%xmm6 - vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 40(%r14),%r13 - vpxor %xmm2,%xmm7,%xmm7 - vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 32(%r14),%r12 - vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,80+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,88+8(%rsp) - vpxor %xmm5,%xmm6,%xmm6 - vaesenc %xmm15,%xmm14,%xmm14 - vpxor %xmm1,%xmm6,%xmm6 - - vmovups 112-128(%rcx),%xmm15 - vpslldq $8,%xmm6,%xmm5 - vpxor %xmm2,%xmm4,%xmm4 - vmovdqu 16(%r11),%xmm3 - - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm8,%xmm7,%xmm7 - vaesenc %xmm15,%xmm10,%xmm10 - vpxor %xmm5,%xmm4,%xmm4 - movbeq 24(%r14),%r13 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 16(%r14),%r12 - vpalignr $8,%xmm4,%xmm4,%xmm0 - vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 - movq %r13,96+8(%rsp) - vaesenc %xmm15,%xmm12,%xmm12 - movq %r12,104+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - vmovups 128-128(%rcx),%xmm1 - vaesenc %xmm15,%xmm14,%xmm14 - - vaesenc %xmm1,%xmm9,%xmm9 - vmovups 144-128(%rcx),%xmm15 - vaesenc %xmm1,%xmm10,%xmm10 - vpsrldq $8,%xmm6,%xmm6 - vaesenc %xmm1,%xmm11,%xmm11 - vpxor %xmm6,%xmm7,%xmm7 - vaesenc %xmm1,%xmm12,%xmm12 - vpxor %xmm0,%xmm4,%xmm4 - movbeq 8(%r14),%r13 - vaesenc %xmm1,%xmm13,%xmm13 - movbeq 0(%r14),%r12 - vaesenc %xmm1,%xmm14,%xmm14 - vmovups 160-128(%rcx),%xmm1 - cmpl $11,%ebp - jb .Lenc_tail - - vaesenc %xmm15,%xmm9,%xmm9 - vaesenc %xmm15,%xmm10,%xmm10 - vaesenc %xmm15,%xmm11,%xmm11 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vaesenc %xmm15,%xmm14,%xmm14 - - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - vmovups 176-128(%rcx),%xmm15 - vaesenc %xmm1,%xmm14,%xmm14 - vmovups 192-128(%rcx),%xmm1 - je .Lenc_tail - - vaesenc %xmm15,%xmm9,%xmm9 - vaesenc %xmm15,%xmm10,%xmm10 - vaesenc %xmm15,%xmm11,%xmm11 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vaesenc %xmm15,%xmm14,%xmm14 - - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - vmovups 208-128(%rcx),%xmm15 - vaesenc %xmm1,%xmm14,%xmm14 - vmovups 224-128(%rcx),%xmm1 - jmp .Lenc_tail - -.align 32 -.Lhandle_ctr32: - vmovdqu (%r11),%xmm0 - vpshufb %xmm0,%xmm1,%xmm6 - vmovdqu 48(%r11),%xmm5 - vpaddd 64(%r11),%xmm6,%xmm10 - vpaddd %xmm5,%xmm6,%xmm11 - vmovdqu 0-32(%r9),%xmm3 - vpaddd %xmm5,%xmm10,%xmm12 - vpshufb %xmm0,%xmm10,%xmm10 - vpaddd %xmm5,%xmm11,%xmm13 - vpshufb %xmm0,%xmm11,%xmm11 - vpxor %xmm15,%xmm10,%xmm10 - vpaddd %xmm5,%xmm12,%xmm14 - vpshufb %xmm0,%xmm12,%xmm12 - vpxor %xmm15,%xmm11,%xmm11 - vpaddd %xmm5,%xmm13,%xmm1 - vpshufb %xmm0,%xmm13,%xmm13 - vpshufb %xmm0,%xmm14,%xmm14 - vpshufb %xmm0,%xmm1,%xmm1 - jmp .Lresume_ctr32 - -.align 32 -.Lenc_tail: - vaesenc %xmm15,%xmm9,%xmm9 - vmovdqu %xmm7,16+8(%rsp) - vpalignr $8,%xmm4,%xmm4,%xmm8 - vaesenc %xmm15,%xmm10,%xmm10 - vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 - vpxor 0(%rdi),%xmm1,%xmm2 - vaesenc %xmm15,%xmm11,%xmm11 - vpxor 16(%rdi),%xmm1,%xmm0 - vaesenc %xmm15,%xmm12,%xmm12 - vpxor 32(%rdi),%xmm1,%xmm5 - vaesenc %xmm15,%xmm13,%xmm13 - vpxor 48(%rdi),%xmm1,%xmm6 - vaesenc %xmm15,%xmm14,%xmm14 - vpxor 64(%rdi),%xmm1,%xmm7 - vpxor 80(%rdi),%xmm1,%xmm3 - vmovdqu (%r8),%xmm1 - - vaesenclast %xmm2,%xmm9,%xmm9 - vmovdqu 32(%r11),%xmm2 - vaesenclast %xmm0,%xmm10,%xmm10 - vpaddb %xmm2,%xmm1,%xmm0 - movq %r13,112+8(%rsp) - leaq 96(%rdi),%rdi - vaesenclast %xmm5,%xmm11,%xmm11 - vpaddb %xmm2,%xmm0,%xmm5 - movq %r12,120+8(%rsp) - leaq 96(%rsi),%rsi - vmovdqu 0-128(%rcx),%xmm15 - vaesenclast %xmm6,%xmm12,%xmm12 - vpaddb %xmm2,%xmm5,%xmm6 - vaesenclast %xmm7,%xmm13,%xmm13 - vpaddb %xmm2,%xmm6,%xmm7 - vaesenclast %xmm3,%xmm14,%xmm14 - vpaddb %xmm2,%xmm7,%xmm3 - - addq $0x60,%r10 - subq $0x6,%rdx - jc .L6x_done - - vmovups %xmm9,-96(%rsi) - vpxor %xmm15,%xmm1,%xmm9 - vmovups %xmm10,-80(%rsi) - vmovdqa %xmm0,%xmm10 - vmovups %xmm11,-64(%rsi) - vmovdqa %xmm5,%xmm11 - vmovups %xmm12,-48(%rsi) - vmovdqa %xmm6,%xmm12 - vmovups %xmm13,-32(%rsi) - vmovdqa %xmm7,%xmm13 - vmovups %xmm14,-16(%rsi) - vmovdqa %xmm3,%xmm14 - vmovdqu 32+8(%rsp),%xmm7 - jmp .Loop6x - -.L6x_done: - vpxor 16+8(%rsp),%xmm8,%xmm8 - vpxor %xmm4,%xmm8,%xmm8 - - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x -.globl aesni_gcm_decrypt -.hidden aesni_gcm_decrypt -.type aesni_gcm_decrypt,@function -.align 32 -aesni_gcm_decrypt: -.cfi_startproc - xorq %r10,%r10 - - - - cmpq $0x60,%rdx - jb .Lgcm_dec_abort - - leaq (%rsp),%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - vzeroupper - - vmovdqu (%r8),%xmm1 - addq $-128,%rsp - movl 12(%r8),%ebx - leaq .Lbswap_mask(%rip),%r11 - leaq -128(%rcx),%r14 - movq $0xf80,%r15 - vmovdqu (%r9),%xmm8 - andq $-128,%rsp - vmovdqu (%r11),%xmm0 - leaq 128(%rcx),%rcx - leaq 32+32(%r9),%r9 - movl 240-128(%rcx),%ebp - vpshufb %xmm0,%xmm8,%xmm8 - - andq %r15,%r14 - andq %rsp,%r15 - subq %r14,%r15 - jc .Ldec_no_key_aliasing - cmpq $768,%r15 - jnc .Ldec_no_key_aliasing - subq %r15,%rsp -.Ldec_no_key_aliasing: - - vmovdqu 80(%rdi),%xmm7 - leaq (%rdi),%r14 - vmovdqu 64(%rdi),%xmm4 - - - - - - - - leaq -192(%rdi,%rdx,1),%r15 - - vmovdqu 48(%rdi),%xmm5 - shrq $4,%rdx - xorq %r10,%r10 - vmovdqu 32(%rdi),%xmm6 - vpshufb %xmm0,%xmm7,%xmm7 - vmovdqu 16(%rdi),%xmm2 - vpshufb %xmm0,%xmm4,%xmm4 - vmovdqu (%rdi),%xmm3 - vpshufb %xmm0,%xmm5,%xmm5 - vmovdqu %xmm4,48(%rsp) - vpshufb %xmm0,%xmm6,%xmm6 - vmovdqu %xmm5,64(%rsp) - vpshufb %xmm0,%xmm2,%xmm2 - vmovdqu %xmm6,80(%rsp) - vpshufb %xmm0,%xmm3,%xmm3 - vmovdqu %xmm2,96(%rsp) - vmovdqu %xmm3,112(%rsp) - - call _aesni_ctr32_ghash_6x - - vmovups %xmm9,-96(%rsi) - vmovups %xmm10,-80(%rsi) - vmovups %xmm11,-64(%rsi) - vmovups %xmm12,-48(%rsi) - vmovups %xmm13,-32(%rsi) - vmovups %xmm14,-16(%rsi) - - vpshufb (%r11),%xmm8,%xmm8 - vmovdqu %xmm8,-64(%r9) - - vzeroupper - movq -48(%rax),%r15 -.cfi_restore %r15 - movq -40(%rax),%r14 -.cfi_restore %r14 - movq -32(%rax),%r13 -.cfi_restore %r13 - movq -24(%rax),%r12 -.cfi_restore %r12 - movq -16(%rax),%rbp -.cfi_restore %rbp - movq -8(%rax),%rbx -.cfi_restore %rbx - leaq (%rax),%rsp -.cfi_def_cfa_register %rsp -.Lgcm_dec_abort: - movq %r10,%rax - .byte 0xf3,0xc3 -.cfi_endproc -.size aesni_gcm_decrypt,.-aesni_gcm_decrypt -.type _aesni_ctr32_6x,@function -.align 32 -_aesni_ctr32_6x: -.cfi_startproc - vmovdqu 0-128(%rcx),%xmm4 - vmovdqu 32(%r11),%xmm2 - leaq -1(%rbp),%r13 - vmovups 16-128(%rcx),%xmm15 - leaq 32-128(%rcx),%r12 - vpxor %xmm4,%xmm1,%xmm9 - addl $100663296,%ebx - jc .Lhandle_ctr32_2 - vpaddb %xmm2,%xmm1,%xmm10 - vpaddb %xmm2,%xmm10,%xmm11 - vpxor %xmm4,%xmm10,%xmm10 - vpaddb %xmm2,%xmm11,%xmm12 - vpxor %xmm4,%xmm11,%xmm11 - vpaddb %xmm2,%xmm12,%xmm13 - vpxor %xmm4,%xmm12,%xmm12 - vpaddb %xmm2,%xmm13,%xmm14 - vpxor %xmm4,%xmm13,%xmm13 - vpaddb %xmm2,%xmm14,%xmm1 - vpxor %xmm4,%xmm14,%xmm14 - jmp .Loop_ctr32 - -.align 16 -.Loop_ctr32: - vaesenc %xmm15,%xmm9,%xmm9 - vaesenc %xmm15,%xmm10,%xmm10 - vaesenc %xmm15,%xmm11,%xmm11 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vaesenc %xmm15,%xmm14,%xmm14 - vmovups (%r12),%xmm15 - leaq 16(%r12),%r12 - decl %r13d - jnz .Loop_ctr32 - - vmovdqu (%r12),%xmm3 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor 0(%rdi),%xmm3,%xmm4 - vaesenc %xmm15,%xmm10,%xmm10 - vpxor 16(%rdi),%xmm3,%xmm5 - vaesenc %xmm15,%xmm11,%xmm11 - vpxor 32(%rdi),%xmm3,%xmm6 - vaesenc %xmm15,%xmm12,%xmm12 - vpxor 48(%rdi),%xmm3,%xmm8 - vaesenc %xmm15,%xmm13,%xmm13 - vpxor 64(%rdi),%xmm3,%xmm2 - vaesenc %xmm15,%xmm14,%xmm14 - vpxor 80(%rdi),%xmm3,%xmm3 - leaq 96(%rdi),%rdi - - vaesenclast %xmm4,%xmm9,%xmm9 - vaesenclast %xmm5,%xmm10,%xmm10 - vaesenclast %xmm6,%xmm11,%xmm11 - vaesenclast %xmm8,%xmm12,%xmm12 - vaesenclast %xmm2,%xmm13,%xmm13 - vaesenclast %xmm3,%xmm14,%xmm14 - vmovups %xmm9,0(%rsi) - vmovups %xmm10,16(%rsi) - vmovups %xmm11,32(%rsi) - vmovups %xmm12,48(%rsi) - vmovups %xmm13,64(%rsi) - vmovups %xmm14,80(%rsi) - leaq 96(%rsi),%rsi - - .byte 0xf3,0xc3 -.align 32 -.Lhandle_ctr32_2: - vpshufb %xmm0,%xmm1,%xmm6 - vmovdqu 48(%r11),%xmm5 - vpaddd 64(%r11),%xmm6,%xmm10 - vpaddd %xmm5,%xmm6,%xmm11 - vpaddd %xmm5,%xmm10,%xmm12 - vpshufb %xmm0,%xmm10,%xmm10 - vpaddd %xmm5,%xmm11,%xmm13 - vpshufb %xmm0,%xmm11,%xmm11 - vpxor %xmm4,%xmm10,%xmm10 - vpaddd %xmm5,%xmm12,%xmm14 - vpshufb %xmm0,%xmm12,%xmm12 - vpxor %xmm4,%xmm11,%xmm11 - vpaddd %xmm5,%xmm13,%xmm1 - vpshufb %xmm0,%xmm13,%xmm13 - vpxor %xmm4,%xmm12,%xmm12 - vpshufb %xmm0,%xmm14,%xmm14 - vpxor %xmm4,%xmm13,%xmm13 - vpshufb %xmm0,%xmm1,%xmm1 - vpxor %xmm4,%xmm14,%xmm14 - jmp .Loop_ctr32 -.cfi_endproc -.size _aesni_ctr32_6x,.-_aesni_ctr32_6x - -.globl aesni_gcm_encrypt -.hidden aesni_gcm_encrypt -.type aesni_gcm_encrypt,@function -.align 32 -aesni_gcm_encrypt: -.cfi_startproc -#ifdef BORINGSSL_DISPATCH_TEST -.extern BORINGSSL_function_hit -.hidden BORINGSSL_function_hit - movb $1,BORINGSSL_function_hit+2(%rip) -#endif - xorq %r10,%r10 - - - - - cmpq $288,%rdx - jb .Lgcm_enc_abort - - leaq (%rsp),%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - vzeroupper - - vmovdqu (%r8),%xmm1 - addq $-128,%rsp - movl 12(%r8),%ebx - leaq .Lbswap_mask(%rip),%r11 - leaq -128(%rcx),%r14 - movq $0xf80,%r15 - leaq 128(%rcx),%rcx - vmovdqu (%r11),%xmm0 - andq $-128,%rsp - movl 240-128(%rcx),%ebp - - andq %r15,%r14 - andq %rsp,%r15 - subq %r14,%r15 - jc .Lenc_no_key_aliasing - cmpq $768,%r15 - jnc .Lenc_no_key_aliasing - subq %r15,%rsp -.Lenc_no_key_aliasing: - - leaq (%rsi),%r14 - - - - - - - - - leaq -192(%rsi,%rdx,1),%r15 - - shrq $4,%rdx - - call _aesni_ctr32_6x - vpshufb %xmm0,%xmm9,%xmm8 - vpshufb %xmm0,%xmm10,%xmm2 - vmovdqu %xmm8,112(%rsp) - vpshufb %xmm0,%xmm11,%xmm4 - vmovdqu %xmm2,96(%rsp) - vpshufb %xmm0,%xmm12,%xmm5 - vmovdqu %xmm4,80(%rsp) - vpshufb %xmm0,%xmm13,%xmm6 - vmovdqu %xmm5,64(%rsp) - vpshufb %xmm0,%xmm14,%xmm7 - vmovdqu %xmm6,48(%rsp) - - call _aesni_ctr32_6x - - vmovdqu (%r9),%xmm8 - leaq 32+32(%r9),%r9 - subq $12,%rdx - movq $192,%r10 - vpshufb %xmm0,%xmm8,%xmm8 - - call _aesni_ctr32_ghash_6x - vmovdqu 32(%rsp),%xmm7 - vmovdqu (%r11),%xmm0 - vmovdqu 0-32(%r9),%xmm3 - vpunpckhqdq %xmm7,%xmm7,%xmm1 - vmovdqu 32-32(%r9),%xmm15 - vmovups %xmm9,-96(%rsi) - vpshufb %xmm0,%xmm9,%xmm9 - vpxor %xmm7,%xmm1,%xmm1 - vmovups %xmm10,-80(%rsi) - vpshufb %xmm0,%xmm10,%xmm10 - vmovups %xmm11,-64(%rsi) - vpshufb %xmm0,%xmm11,%xmm11 - vmovups %xmm12,-48(%rsi) - vpshufb %xmm0,%xmm12,%xmm12 - vmovups %xmm13,-32(%rsi) - vpshufb %xmm0,%xmm13,%xmm13 - vmovups %xmm14,-16(%rsi) - vpshufb %xmm0,%xmm14,%xmm14 - vmovdqu %xmm9,16(%rsp) - vmovdqu 48(%rsp),%xmm6 - vmovdqu 16-32(%r9),%xmm0 - vpunpckhqdq %xmm6,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 - vpxor %xmm6,%xmm2,%xmm2 - vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 - vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 - - vmovdqu 64(%rsp),%xmm9 - vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 - vmovdqu 48-32(%r9),%xmm3 - vpxor %xmm5,%xmm4,%xmm4 - vpunpckhqdq %xmm9,%xmm9,%xmm5 - vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 - vpxor %xmm9,%xmm5,%xmm5 - vpxor %xmm7,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 - vmovdqu 80-32(%r9),%xmm15 - vpxor %xmm1,%xmm2,%xmm2 - - vmovdqu 80(%rsp),%xmm1 - vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 - vmovdqu 64-32(%r9),%xmm0 - vpxor %xmm4,%xmm7,%xmm7 - vpunpckhqdq %xmm1,%xmm1,%xmm4 - vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 - vpxor %xmm1,%xmm4,%xmm4 - vpxor %xmm6,%xmm9,%xmm9 - vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu 96(%rsp),%xmm2 - vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 - vmovdqu 96-32(%r9),%xmm3 - vpxor %xmm7,%xmm6,%xmm6 - vpunpckhqdq %xmm2,%xmm2,%xmm7 - vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 - vpxor %xmm2,%xmm7,%xmm7 - vpxor %xmm9,%xmm1,%xmm1 - vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 - vmovdqu 128-32(%r9),%xmm15 - vpxor %xmm5,%xmm4,%xmm4 - - vpxor 112(%rsp),%xmm8,%xmm8 - vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 - vmovdqu 112-32(%r9),%xmm0 - vpunpckhqdq %xmm8,%xmm8,%xmm9 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 - vpxor %xmm8,%xmm9,%xmm9 - vpxor %xmm1,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 - vpxor %xmm4,%xmm7,%xmm4 - - vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 - vmovdqu 0-32(%r9),%xmm3 - vpunpckhqdq %xmm14,%xmm14,%xmm1 - vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 - vpxor %xmm14,%xmm1,%xmm1 - vpxor %xmm5,%xmm6,%xmm5 - vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 - vmovdqu 32-32(%r9),%xmm15 - vpxor %xmm2,%xmm8,%xmm7 - vpxor %xmm4,%xmm9,%xmm6 - - vmovdqu 16-32(%r9),%xmm0 - vpxor %xmm5,%xmm7,%xmm9 - vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 - vpxor %xmm9,%xmm6,%xmm6 - vpunpckhqdq %xmm13,%xmm13,%xmm2 - vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 - vpxor %xmm13,%xmm2,%xmm2 - vpslldq $8,%xmm6,%xmm9 - vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 - vpxor %xmm9,%xmm5,%xmm8 - vpsrldq $8,%xmm6,%xmm6 - vpxor %xmm6,%xmm7,%xmm7 - - vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 - vmovdqu 48-32(%r9),%xmm3 - vpxor %xmm4,%xmm5,%xmm5 - vpunpckhqdq %xmm12,%xmm12,%xmm9 - vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 - vpxor %xmm12,%xmm9,%xmm9 - vpxor %xmm14,%xmm13,%xmm13 - vpalignr $8,%xmm8,%xmm8,%xmm14 - vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 - vmovdqu 80-32(%r9),%xmm15 - vpxor %xmm1,%xmm2,%xmm2 - - vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 - vmovdqu 64-32(%r9),%xmm0 - vpxor %xmm5,%xmm4,%xmm4 - vpunpckhqdq %xmm11,%xmm11,%xmm1 - vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 - vpxor %xmm11,%xmm1,%xmm1 - vpxor %xmm13,%xmm12,%xmm12 - vxorps 16(%rsp),%xmm7,%xmm7 - vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 - vpxor %xmm2,%xmm9,%xmm9 - - vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 - vxorps %xmm14,%xmm8,%xmm8 - - vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 - vmovdqu 96-32(%r9),%xmm3 - vpxor %xmm4,%xmm5,%xmm5 - vpunpckhqdq %xmm10,%xmm10,%xmm2 - vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 - vpxor %xmm10,%xmm2,%xmm2 - vpalignr $8,%xmm8,%xmm8,%xmm14 - vpxor %xmm12,%xmm11,%xmm11 - vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 - vmovdqu 128-32(%r9),%xmm15 - vpxor %xmm9,%xmm1,%xmm1 - - vxorps %xmm7,%xmm14,%xmm14 - vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 - vxorps %xmm14,%xmm8,%xmm8 - - vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 - vmovdqu 112-32(%r9),%xmm0 - vpxor %xmm5,%xmm4,%xmm4 - vpunpckhqdq %xmm8,%xmm8,%xmm9 - vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 - vpxor %xmm8,%xmm9,%xmm9 - vpxor %xmm11,%xmm10,%xmm10 - vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 - vpxor %xmm1,%xmm2,%xmm2 - - vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 - vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 - vpxor %xmm4,%xmm5,%xmm5 - vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 - vpxor %xmm10,%xmm7,%xmm7 - vpxor %xmm2,%xmm6,%xmm6 - - vpxor %xmm5,%xmm7,%xmm4 - vpxor %xmm4,%xmm6,%xmm6 - vpslldq $8,%xmm6,%xmm1 - vmovdqu 16(%r11),%xmm3 - vpsrldq $8,%xmm6,%xmm6 - vpxor %xmm1,%xmm5,%xmm8 - vpxor %xmm6,%xmm7,%xmm7 - - vpalignr $8,%xmm8,%xmm8,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 - vpxor %xmm2,%xmm8,%xmm8 - - vpalignr $8,%xmm8,%xmm8,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 - vpxor %xmm7,%xmm2,%xmm2 - vpxor %xmm2,%xmm8,%xmm8 - vpshufb (%r11),%xmm8,%xmm8 - vmovdqu %xmm8,-64(%r9) - - vzeroupper - movq -48(%rax),%r15 -.cfi_restore %r15 - movq -40(%rax),%r14 -.cfi_restore %r14 - movq -32(%rax),%r13 -.cfi_restore %r13 - movq -24(%rax),%r12 -.cfi_restore %r12 - movq -16(%rax),%rbp -.cfi_restore %rbp - movq -8(%rax),%rbx -.cfi_restore %rbx - leaq (%rax),%rsp -.cfi_def_cfa_register %rsp -.Lgcm_enc_abort: - movq %r10,%rax - .byte 0xf3,0xc3 -.cfi_endproc -.size aesni_gcm_encrypt,.-aesni_gcm_encrypt -.align 64 -.Lbswap_mask: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.Lpoly: -.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 -.Lone_msb: -.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 -.Ltwo_lsb: -.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -.Lone_lsb: -.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aesni-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aesni-x86_64.S deleted file mode 100644 index b98107f369..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/aesni-x86_64.S +++ /dev/null @@ -1,2506 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P -.globl aes_hw_encrypt -.hidden aes_hw_encrypt -.type aes_hw_encrypt,@function -.align 16 -aes_hw_encrypt: -.cfi_startproc -#ifdef BORINGSSL_DISPATCH_TEST -.extern BORINGSSL_function_hit -.hidden BORINGSSL_function_hit - movb $1,BORINGSSL_function_hit+1(%rip) -#endif - movups (%rdi),%xmm2 - movl 240(%rdx),%eax - movups (%rdx),%xmm0 - movups 16(%rdx),%xmm1 - leaq 32(%rdx),%rdx - xorps %xmm0,%xmm2 -.Loop_enc1_1: -.byte 102,15,56,220,209 - decl %eax - movups (%rdx),%xmm1 - leaq 16(%rdx),%rdx - jnz .Loop_enc1_1 -.byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_hw_encrypt,.-aes_hw_encrypt - -.globl aes_hw_decrypt -.hidden aes_hw_decrypt -.type aes_hw_decrypt,@function -.align 16 -aes_hw_decrypt: -.cfi_startproc - movups (%rdi),%xmm2 - movl 240(%rdx),%eax - movups (%rdx),%xmm0 - movups 16(%rdx),%xmm1 - leaq 32(%rdx),%rdx - xorps %xmm0,%xmm2 -.Loop_dec1_2: -.byte 102,15,56,222,209 - decl %eax - movups (%rdx),%xmm1 - leaq 16(%rdx),%rdx - jnz .Loop_dec1_2 -.byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_hw_decrypt, .-aes_hw_decrypt -.type _aesni_encrypt2,@function -.align 16 -_aesni_encrypt2: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -.Lenc_loop2: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Lenc_loop2 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_encrypt2,.-_aesni_encrypt2 -.type _aesni_decrypt2,@function -.align 16 -_aesni_decrypt2: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -.Ldec_loop2: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Ldec_loop2 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_decrypt2,.-_aesni_decrypt2 -.type _aesni_encrypt3,@function -.align 16 -_aesni_encrypt3: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -.Lenc_loop3: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Lenc_loop3 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_encrypt3,.-_aesni_encrypt3 -.type _aesni_decrypt3,@function -.align 16 -_aesni_decrypt3: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -.Ldec_loop3: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Ldec_loop3 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_decrypt3,.-_aesni_decrypt3 -.type _aesni_encrypt4,@function -.align 16 -_aesni_encrypt4: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - xorps %xmm0,%xmm5 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 0x0f,0x1f,0x00 - addq $16,%rax - -.Lenc_loop4: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Lenc_loop4 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_encrypt4,.-_aesni_encrypt4 -.type _aesni_decrypt4,@function -.align 16 -_aesni_decrypt4: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - xorps %xmm0,%xmm5 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 0x0f,0x1f,0x00 - addq $16,%rax - -.Ldec_loop4: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Ldec_loop4 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_decrypt4,.-_aesni_decrypt4 -.type _aesni_encrypt6,@function -.align 16 -_aesni_encrypt6: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,220,209 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,220,217 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,220,225 - pxor %xmm0,%xmm7 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp .Lenc_loop6_enter -.align 16 -.Lenc_loop6: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.Lenc_loop6_enter: -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Lenc_loop6 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 -.byte 102,15,56,221,240 -.byte 102,15,56,221,248 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_encrypt6,.-_aesni_encrypt6 -.type _aesni_decrypt6,@function -.align 16 -_aesni_decrypt6: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,222,209 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,222,217 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,222,225 - pxor %xmm0,%xmm7 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp .Ldec_loop6_enter -.align 16 -.Ldec_loop6: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.Ldec_loop6_enter: -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Ldec_loop6 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 -.byte 102,15,56,223,240 -.byte 102,15,56,223,248 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_decrypt6,.-_aesni_decrypt6 -.type _aesni_encrypt8,@function -.align 16 -_aesni_encrypt8: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,220,209 - pxor %xmm0,%xmm7 - pxor %xmm0,%xmm8 -.byte 102,15,56,220,217 - pxor %xmm0,%xmm9 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp .Lenc_loop8_inner -.align 16 -.Lenc_loop8: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.Lenc_loop8_inner: -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 -.Lenc_loop8_enter: - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Lenc_loop8 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 -.byte 102,15,56,221,240 -.byte 102,15,56,221,248 -.byte 102,68,15,56,221,192 -.byte 102,68,15,56,221,200 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_encrypt8,.-_aesni_encrypt8 -.type _aesni_decrypt8,@function -.align 16 -_aesni_decrypt8: -.cfi_startproc - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,222,209 - pxor %xmm0,%xmm7 - pxor %xmm0,%xmm8 -.byte 102,15,56,222,217 - pxor %xmm0,%xmm9 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp .Ldec_loop8_inner -.align 16 -.Ldec_loop8: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.Ldec_loop8_inner: -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 -.Ldec_loop8_enter: - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups -16(%rcx,%rax,1),%xmm0 - jnz .Ldec_loop8 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 -.byte 102,15,56,223,240 -.byte 102,15,56,223,248 -.byte 102,68,15,56,223,192 -.byte 102,68,15,56,223,200 - .byte 0xf3,0xc3 -.cfi_endproc -.size _aesni_decrypt8,.-_aesni_decrypt8 -.globl aes_hw_ecb_encrypt -.hidden aes_hw_ecb_encrypt -.type aes_hw_ecb_encrypt,@function -.align 16 -aes_hw_ecb_encrypt: -.cfi_startproc - andq $-16,%rdx - jz .Lecb_ret - - movl 240(%rcx),%eax - movups (%rcx),%xmm0 - movq %rcx,%r11 - movl %eax,%r10d - testl %r8d,%r8d - jz .Lecb_decrypt - - cmpq $0x80,%rdx - jb .Lecb_enc_tail - - movdqu (%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqu 32(%rdi),%xmm4 - movdqu 48(%rdi),%xmm5 - movdqu 64(%rdi),%xmm6 - movdqu 80(%rdi),%xmm7 - movdqu 96(%rdi),%xmm8 - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi - subq $0x80,%rdx - jmp .Lecb_enc_loop8_enter -.align 16 -.Lecb_enc_loop8: - movups %xmm2,(%rsi) - movq %r11,%rcx - movdqu (%rdi),%xmm2 - movl %r10d,%eax - movups %xmm3,16(%rsi) - movdqu 16(%rdi),%xmm3 - movups %xmm4,32(%rsi) - movdqu 32(%rdi),%xmm4 - movups %xmm5,48(%rsi) - movdqu 48(%rdi),%xmm5 - movups %xmm6,64(%rsi) - movdqu 64(%rdi),%xmm6 - movups %xmm7,80(%rsi) - movdqu 80(%rdi),%xmm7 - movups %xmm8,96(%rsi) - movdqu 96(%rdi),%xmm8 - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi -.Lecb_enc_loop8_enter: - - call _aesni_encrypt8 - - subq $0x80,%rdx - jnc .Lecb_enc_loop8 - - movups %xmm2,(%rsi) - movq %r11,%rcx - movups %xmm3,16(%rsi) - movl %r10d,%eax - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - movups %xmm7,80(%rsi) - movups %xmm8,96(%rsi) - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - addq $0x80,%rdx - jz .Lecb_ret - -.Lecb_enc_tail: - movups (%rdi),%xmm2 - cmpq $0x20,%rdx - jb .Lecb_enc_one - movups 16(%rdi),%xmm3 - je .Lecb_enc_two - movups 32(%rdi),%xmm4 - cmpq $0x40,%rdx - jb .Lecb_enc_three - movups 48(%rdi),%xmm5 - je .Lecb_enc_four - movups 64(%rdi),%xmm6 - cmpq $0x60,%rdx - jb .Lecb_enc_five - movups 80(%rdi),%xmm7 - je .Lecb_enc_six - movdqu 96(%rdi),%xmm8 - xorps %xmm9,%xmm9 - call _aesni_encrypt8 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - movups %xmm7,80(%rsi) - movups %xmm8,96(%rsi) - jmp .Lecb_ret -.align 16 -.Lecb_enc_one: - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -.Loop_enc1_3: -.byte 102,15,56,220,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz .Loop_enc1_3 -.byte 102,15,56,221,209 - movups %xmm2,(%rsi) - jmp .Lecb_ret -.align 16 -.Lecb_enc_two: - call _aesni_encrypt2 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - jmp .Lecb_ret -.align 16 -.Lecb_enc_three: - call _aesni_encrypt3 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - jmp .Lecb_ret -.align 16 -.Lecb_enc_four: - call _aesni_encrypt4 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - jmp .Lecb_ret -.align 16 -.Lecb_enc_five: - xorps %xmm7,%xmm7 - call _aesni_encrypt6 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - jmp .Lecb_ret -.align 16 -.Lecb_enc_six: - call _aesni_encrypt6 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - movups %xmm7,80(%rsi) - jmp .Lecb_ret - -.align 16 -.Lecb_decrypt: - cmpq $0x80,%rdx - jb .Lecb_dec_tail - - movdqu (%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqu 32(%rdi),%xmm4 - movdqu 48(%rdi),%xmm5 - movdqu 64(%rdi),%xmm6 - movdqu 80(%rdi),%xmm7 - movdqu 96(%rdi),%xmm8 - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi - subq $0x80,%rdx - jmp .Lecb_dec_loop8_enter -.align 16 -.Lecb_dec_loop8: - movups %xmm2,(%rsi) - movq %r11,%rcx - movdqu (%rdi),%xmm2 - movl %r10d,%eax - movups %xmm3,16(%rsi) - movdqu 16(%rdi),%xmm3 - movups %xmm4,32(%rsi) - movdqu 32(%rdi),%xmm4 - movups %xmm5,48(%rsi) - movdqu 48(%rdi),%xmm5 - movups %xmm6,64(%rsi) - movdqu 64(%rdi),%xmm6 - movups %xmm7,80(%rsi) - movdqu 80(%rdi),%xmm7 - movups %xmm8,96(%rsi) - movdqu 96(%rdi),%xmm8 - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi -.Lecb_dec_loop8_enter: - - call _aesni_decrypt8 - - movups (%r11),%xmm0 - subq $0x80,%rdx - jnc .Lecb_dec_loop8 - - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movq %r11,%rcx - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movl %r10d,%eax - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - movups %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - movups %xmm8,96(%rsi) - pxor %xmm8,%xmm8 - movups %xmm9,112(%rsi) - pxor %xmm9,%xmm9 - leaq 128(%rsi),%rsi - addq $0x80,%rdx - jz .Lecb_ret - -.Lecb_dec_tail: - movups (%rdi),%xmm2 - cmpq $0x20,%rdx - jb .Lecb_dec_one - movups 16(%rdi),%xmm3 - je .Lecb_dec_two - movups 32(%rdi),%xmm4 - cmpq $0x40,%rdx - jb .Lecb_dec_three - movups 48(%rdi),%xmm5 - je .Lecb_dec_four - movups 64(%rdi),%xmm6 - cmpq $0x60,%rdx - jb .Lecb_dec_five - movups 80(%rdi),%xmm7 - je .Lecb_dec_six - movups 96(%rdi),%xmm8 - movups (%rcx),%xmm0 - xorps %xmm9,%xmm9 - call _aesni_decrypt8 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - movups %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - movups %xmm8,96(%rsi) - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 - jmp .Lecb_ret -.align 16 -.Lecb_dec_one: - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -.Loop_dec1_4: -.byte 102,15,56,222,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz .Loop_dec1_4 -.byte 102,15,56,223,209 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - jmp .Lecb_ret -.align 16 -.Lecb_dec_two: - call _aesni_decrypt2 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - jmp .Lecb_ret -.align 16 -.Lecb_dec_three: - call _aesni_decrypt3 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - jmp .Lecb_ret -.align 16 -.Lecb_dec_four: - call _aesni_decrypt4 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - jmp .Lecb_ret -.align 16 -.Lecb_dec_five: - xorps %xmm7,%xmm7 - call _aesni_decrypt6 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - jmp .Lecb_ret -.align 16 -.Lecb_dec_six: - call _aesni_decrypt6 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - movups %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - -.Lecb_ret: - xorps %xmm0,%xmm0 - pxor %xmm1,%xmm1 - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_hw_ecb_encrypt,.-aes_hw_ecb_encrypt -.globl aes_hw_ctr32_encrypt_blocks -.hidden aes_hw_ctr32_encrypt_blocks -.type aes_hw_ctr32_encrypt_blocks,@function -.align 16 -aes_hw_ctr32_encrypt_blocks: -.cfi_startproc -#ifdef BORINGSSL_DISPATCH_TEST - movb $1,BORINGSSL_function_hit(%rip) -#endif - cmpq $1,%rdx - jne .Lctr32_bulk - - - - movups (%r8),%xmm2 - movups (%rdi),%xmm3 - movl 240(%rcx),%edx - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -.Loop_enc1_5: -.byte 102,15,56,220,209 - decl %edx - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz .Loop_enc1_5 -.byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - xorps %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movups %xmm2,(%rsi) - xorps %xmm2,%xmm2 - jmp .Lctr32_epilogue - -.align 16 -.Lctr32_bulk: - leaq (%rsp),%r11 -.cfi_def_cfa_register %r11 - pushq %rbp -.cfi_offset %rbp,-16 - subq $128,%rsp - andq $-16,%rsp - - - - - movdqu (%r8),%xmm2 - movdqu (%rcx),%xmm0 - movl 12(%r8),%r8d - pxor %xmm0,%xmm2 - movl 12(%rcx),%ebp - movdqa %xmm2,0(%rsp) - bswapl %r8d - movdqa %xmm2,%xmm3 - movdqa %xmm2,%xmm4 - movdqa %xmm2,%xmm5 - movdqa %xmm2,64(%rsp) - movdqa %xmm2,80(%rsp) - movdqa %xmm2,96(%rsp) - movq %rdx,%r10 - movdqa %xmm2,112(%rsp) - - leaq 1(%r8),%rax - leaq 2(%r8),%rdx - bswapl %eax - bswapl %edx - xorl %ebp,%eax - xorl %ebp,%edx -.byte 102,15,58,34,216,3 - leaq 3(%r8),%rax - movdqa %xmm3,16(%rsp) -.byte 102,15,58,34,226,3 - bswapl %eax - movq %r10,%rdx - leaq 4(%r8),%r10 - movdqa %xmm4,32(%rsp) - xorl %ebp,%eax - bswapl %r10d -.byte 102,15,58,34,232,3 - xorl %ebp,%r10d - movdqa %xmm5,48(%rsp) - leaq 5(%r8),%r9 - movl %r10d,64+12(%rsp) - bswapl %r9d - leaq 6(%r8),%r10 - movl 240(%rcx),%eax - xorl %ebp,%r9d - bswapl %r10d - movl %r9d,80+12(%rsp) - xorl %ebp,%r10d - leaq 7(%r8),%r9 - movl %r10d,96+12(%rsp) - bswapl %r9d - leaq OPENSSL_ia32cap_P(%rip),%r10 - movl 4(%r10),%r10d - xorl %ebp,%r9d - andl $71303168,%r10d - movl %r9d,112+12(%rsp) - - movups 16(%rcx),%xmm1 - - movdqa 64(%rsp),%xmm6 - movdqa 80(%rsp),%xmm7 - - cmpq $8,%rdx - jb .Lctr32_tail - - subq $6,%rdx - cmpl $4194304,%r10d - je .Lctr32_6x - - leaq 128(%rcx),%rcx - subq $2,%rdx - jmp .Lctr32_loop8 - -.align 16 -.Lctr32_6x: - shll $4,%eax - movl $48,%r10d - bswapl %ebp - leaq 32(%rcx,%rax,1),%rcx - subq %rax,%r10 - jmp .Lctr32_loop6 - -.align 16 -.Lctr32_loop6: - addl $6,%r8d - movups -48(%rcx,%r10,1),%xmm0 -.byte 102,15,56,220,209 - movl %r8d,%eax - xorl %ebp,%eax -.byte 102,15,56,220,217 -.byte 0x0f,0x38,0xf1,0x44,0x24,12 - leal 1(%r8),%eax -.byte 102,15,56,220,225 - xorl %ebp,%eax -.byte 0x0f,0x38,0xf1,0x44,0x24,28 -.byte 102,15,56,220,233 - leal 2(%r8),%eax - xorl %ebp,%eax -.byte 102,15,56,220,241 -.byte 0x0f,0x38,0xf1,0x44,0x24,44 - leal 3(%r8),%eax -.byte 102,15,56,220,249 - movups -32(%rcx,%r10,1),%xmm1 - xorl %ebp,%eax - -.byte 102,15,56,220,208 -.byte 0x0f,0x38,0xf1,0x44,0x24,60 - leal 4(%r8),%eax -.byte 102,15,56,220,216 - xorl %ebp,%eax -.byte 0x0f,0x38,0xf1,0x44,0x24,76 -.byte 102,15,56,220,224 - leal 5(%r8),%eax - xorl %ebp,%eax -.byte 102,15,56,220,232 -.byte 0x0f,0x38,0xf1,0x44,0x24,92 - movq %r10,%rax -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 - movups -16(%rcx,%r10,1),%xmm0 - - call .Lenc_loop6 - - movdqu (%rdi),%xmm8 - movdqu 16(%rdi),%xmm9 - movdqu 32(%rdi),%xmm10 - movdqu 48(%rdi),%xmm11 - movdqu 64(%rdi),%xmm12 - movdqu 80(%rdi),%xmm13 - leaq 96(%rdi),%rdi - movups -64(%rcx,%r10,1),%xmm1 - pxor %xmm2,%xmm8 - movaps 0(%rsp),%xmm2 - pxor %xmm3,%xmm9 - movaps 16(%rsp),%xmm3 - pxor %xmm4,%xmm10 - movaps 32(%rsp),%xmm4 - pxor %xmm5,%xmm11 - movaps 48(%rsp),%xmm5 - pxor %xmm6,%xmm12 - movaps 64(%rsp),%xmm6 - pxor %xmm7,%xmm13 - movaps 80(%rsp),%xmm7 - movdqu %xmm8,(%rsi) - movdqu %xmm9,16(%rsi) - movdqu %xmm10,32(%rsi) - movdqu %xmm11,48(%rsi) - movdqu %xmm12,64(%rsi) - movdqu %xmm13,80(%rsi) - leaq 96(%rsi),%rsi - - subq $6,%rdx - jnc .Lctr32_loop6 - - addq $6,%rdx - jz .Lctr32_done - - leal -48(%r10),%eax - leaq -80(%rcx,%r10,1),%rcx - negl %eax - shrl $4,%eax - jmp .Lctr32_tail - -.align 32 -.Lctr32_loop8: - addl $8,%r8d - movdqa 96(%rsp),%xmm8 -.byte 102,15,56,220,209 - movl %r8d,%r9d - movdqa 112(%rsp),%xmm9 -.byte 102,15,56,220,217 - bswapl %r9d - movups 32-128(%rcx),%xmm0 -.byte 102,15,56,220,225 - xorl %ebp,%r9d - nop -.byte 102,15,56,220,233 - movl %r9d,0+12(%rsp) - leaq 1(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 48-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movl %r9d,16+12(%rsp) - leaq 2(%r8),%r9 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 64-128(%rcx),%xmm0 - bswapl %r9d -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movl %r9d,32+12(%rsp) - leaq 3(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 80-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movl %r9d,48+12(%rsp) - leaq 4(%r8),%r9 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 96-128(%rcx),%xmm0 - bswapl %r9d -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movl %r9d,64+12(%rsp) - leaq 5(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 112-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movl %r9d,80+12(%rsp) - leaq 6(%r8),%r9 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 128-128(%rcx),%xmm0 - bswapl %r9d -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movl %r9d,96+12(%rsp) - leaq 7(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 144-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 - xorl %ebp,%r9d - movdqu 0(%rdi),%xmm10 -.byte 102,15,56,220,232 - movl %r9d,112+12(%rsp) - cmpl $11,%eax -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 160-128(%rcx),%xmm0 - - jb .Lctr32_enc_done - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 176-128(%rcx),%xmm1 - -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 192-128(%rcx),%xmm0 - je .Lctr32_enc_done - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 208-128(%rcx),%xmm1 - -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 224-128(%rcx),%xmm0 - jmp .Lctr32_enc_done - -.align 16 -.Lctr32_enc_done: - movdqu 16(%rdi),%xmm11 - pxor %xmm0,%xmm10 - movdqu 32(%rdi),%xmm12 - pxor %xmm0,%xmm11 - movdqu 48(%rdi),%xmm13 - pxor %xmm0,%xmm12 - movdqu 64(%rdi),%xmm14 - pxor %xmm0,%xmm13 - movdqu 80(%rdi),%xmm15 - pxor %xmm0,%xmm14 - pxor %xmm0,%xmm15 -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movdqu 96(%rdi),%xmm1 - leaq 128(%rdi),%rdi - -.byte 102,65,15,56,221,210 - pxor %xmm0,%xmm1 - movdqu 112-128(%rdi),%xmm10 -.byte 102,65,15,56,221,219 - pxor %xmm0,%xmm10 - movdqa 0(%rsp),%xmm11 -.byte 102,65,15,56,221,228 -.byte 102,65,15,56,221,237 - movdqa 16(%rsp),%xmm12 - movdqa 32(%rsp),%xmm13 -.byte 102,65,15,56,221,246 -.byte 102,65,15,56,221,255 - movdqa 48(%rsp),%xmm14 - movdqa 64(%rsp),%xmm15 -.byte 102,68,15,56,221,193 - movdqa 80(%rsp),%xmm0 - movups 16-128(%rcx),%xmm1 -.byte 102,69,15,56,221,202 - - movups %xmm2,(%rsi) - movdqa %xmm11,%xmm2 - movups %xmm3,16(%rsi) - movdqa %xmm12,%xmm3 - movups %xmm4,32(%rsi) - movdqa %xmm13,%xmm4 - movups %xmm5,48(%rsi) - movdqa %xmm14,%xmm5 - movups %xmm6,64(%rsi) - movdqa %xmm15,%xmm6 - movups %xmm7,80(%rsi) - movdqa %xmm0,%xmm7 - movups %xmm8,96(%rsi) - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - - subq $8,%rdx - jnc .Lctr32_loop8 - - addq $8,%rdx - jz .Lctr32_done - leaq -128(%rcx),%rcx - -.Lctr32_tail: - - - leaq 16(%rcx),%rcx - cmpq $4,%rdx - jb .Lctr32_loop3 - je .Lctr32_loop4 - - - shll $4,%eax - movdqa 96(%rsp),%xmm8 - pxor %xmm9,%xmm9 - - movups 16(%rcx),%xmm0 -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - leaq 32-16(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,220,225 - addq $16,%rax - movups (%rdi),%xmm10 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 - movups 16(%rdi),%xmm11 - movups 32(%rdi),%xmm12 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 - - call .Lenc_loop8_enter - - movdqu 48(%rdi),%xmm13 - pxor %xmm10,%xmm2 - movdqu 64(%rdi),%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm10,%xmm6 - movdqu %xmm5,48(%rsi) - movdqu %xmm6,64(%rsi) - cmpq $6,%rdx - jb .Lctr32_done - - movups 80(%rdi),%xmm11 - xorps %xmm11,%xmm7 - movups %xmm7,80(%rsi) - je .Lctr32_done - - movups 96(%rdi),%xmm12 - xorps %xmm12,%xmm8 - movups %xmm8,96(%rsi) - jmp .Lctr32_done - -.align 32 -.Lctr32_loop4: -.byte 102,15,56,220,209 - leaq 16(%rcx),%rcx - decl %eax -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movups (%rcx),%xmm1 - jnz .Lctr32_loop4 -.byte 102,15,56,221,209 -.byte 102,15,56,221,217 - movups (%rdi),%xmm10 - movups 16(%rdi),%xmm11 -.byte 102,15,56,221,225 -.byte 102,15,56,221,233 - movups 32(%rdi),%xmm12 - movups 48(%rdi),%xmm13 - - xorps %xmm10,%xmm2 - movups %xmm2,(%rsi) - xorps %xmm11,%xmm3 - movups %xmm3,16(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm4,32(%rsi) - pxor %xmm13,%xmm5 - movdqu %xmm5,48(%rsi) - jmp .Lctr32_done - -.align 32 -.Lctr32_loop3: -.byte 102,15,56,220,209 - leaq 16(%rcx),%rcx - decl %eax -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 - movups (%rcx),%xmm1 - jnz .Lctr32_loop3 -.byte 102,15,56,221,209 -.byte 102,15,56,221,217 -.byte 102,15,56,221,225 - - movups (%rdi),%xmm10 - xorps %xmm10,%xmm2 - movups %xmm2,(%rsi) - cmpq $2,%rdx - jb .Lctr32_done - - movups 16(%rdi),%xmm11 - xorps %xmm11,%xmm3 - movups %xmm3,16(%rsi) - je .Lctr32_done - - movups 32(%rdi),%xmm12 - xorps %xmm12,%xmm4 - movups %xmm4,32(%rsi) - -.Lctr32_done: - xorps %xmm0,%xmm0 - xorl %ebp,%ebp - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - movaps %xmm0,0(%rsp) - pxor %xmm8,%xmm8 - movaps %xmm0,16(%rsp) - pxor %xmm9,%xmm9 - movaps %xmm0,32(%rsp) - pxor %xmm10,%xmm10 - movaps %xmm0,48(%rsp) - pxor %xmm11,%xmm11 - movaps %xmm0,64(%rsp) - pxor %xmm12,%xmm12 - movaps %xmm0,80(%rsp) - pxor %xmm13,%xmm13 - movaps %xmm0,96(%rsp) - pxor %xmm14,%xmm14 - movaps %xmm0,112(%rsp) - pxor %xmm15,%xmm15 - movq -8(%r11),%rbp -.cfi_restore %rbp - leaq (%r11),%rsp -.cfi_def_cfa_register %rsp -.Lctr32_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks -.globl aes_hw_cbc_encrypt -.hidden aes_hw_cbc_encrypt -.type aes_hw_cbc_encrypt,@function -.align 16 -aes_hw_cbc_encrypt: -.cfi_startproc - testq %rdx,%rdx - jz .Lcbc_ret - - movl 240(%rcx),%r10d - movq %rcx,%r11 - testl %r9d,%r9d - jz .Lcbc_decrypt - - movups (%r8),%xmm2 - movl %r10d,%eax - cmpq $16,%rdx - jb .Lcbc_enc_tail - subq $16,%rdx - jmp .Lcbc_enc_loop -.align 16 -.Lcbc_enc_loop: - movups (%rdi),%xmm3 - leaq 16(%rdi),%rdi - - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm3 - leaq 32(%rcx),%rcx - xorps %xmm3,%xmm2 -.Loop_enc1_6: -.byte 102,15,56,220,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz .Loop_enc1_6 -.byte 102,15,56,221,209 - movl %r10d,%eax - movq %r11,%rcx - movups %xmm2,0(%rsi) - leaq 16(%rsi),%rsi - subq $16,%rdx - jnc .Lcbc_enc_loop - addq $16,%rdx - jnz .Lcbc_enc_tail - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%r8) - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - jmp .Lcbc_ret - -.Lcbc_enc_tail: - movq %rdx,%rcx - xchgq %rdi,%rsi -.long 0x9066A4F3 - movl $16,%ecx - subq %rdx,%rcx - xorl %eax,%eax -.long 0x9066AAF3 - leaq -16(%rdi),%rdi - movl %r10d,%eax - movq %rdi,%rsi - movq %r11,%rcx - xorq %rdx,%rdx - jmp .Lcbc_enc_loop - -.align 16 -.Lcbc_decrypt: - cmpq $16,%rdx - jne .Lcbc_decrypt_bulk - - - - movdqu (%rdi),%xmm2 - movdqu (%r8),%xmm3 - movdqa %xmm2,%xmm4 - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -.Loop_dec1_7: -.byte 102,15,56,222,209 - decl %r10d - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz .Loop_dec1_7 -.byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movdqu %xmm4,(%r8) - xorps %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - jmp .Lcbc_ret -.align 16 -.Lcbc_decrypt_bulk: - leaq (%rsp),%r11 -.cfi_def_cfa_register %r11 - pushq %rbp -.cfi_offset %rbp,-16 - subq $16,%rsp - andq $-16,%rsp - movq %rcx,%rbp - movups (%r8),%xmm10 - movl %r10d,%eax - cmpq $0x50,%rdx - jbe .Lcbc_dec_tail - - movups (%rcx),%xmm0 - movdqu 0(%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqa %xmm2,%xmm11 - movdqu 32(%rdi),%xmm4 - movdqa %xmm3,%xmm12 - movdqu 48(%rdi),%xmm5 - movdqa %xmm4,%xmm13 - movdqu 64(%rdi),%xmm6 - movdqa %xmm5,%xmm14 - movdqu 80(%rdi),%xmm7 - movdqa %xmm6,%xmm15 - leaq OPENSSL_ia32cap_P(%rip),%r9 - movl 4(%r9),%r9d - cmpq $0x70,%rdx - jbe .Lcbc_dec_six_or_seven - - andl $71303168,%r9d - subq $0x50,%rdx - cmpl $4194304,%r9d - je .Lcbc_dec_loop6_enter - subq $0x20,%rdx - leaq 112(%rcx),%rcx - jmp .Lcbc_dec_loop8_enter -.align 16 -.Lcbc_dec_loop8: - movups %xmm9,(%rsi) - leaq 16(%rsi),%rsi -.Lcbc_dec_loop8_enter: - movdqu 96(%rdi),%xmm8 - pxor %xmm0,%xmm2 - movdqu 112(%rdi),%xmm9 - pxor %xmm0,%xmm3 - movups 16-112(%rcx),%xmm1 - pxor %xmm0,%xmm4 - movq $-1,%rbp - cmpq $0x70,%rdx - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 - pxor %xmm0,%xmm8 - -.byte 102,15,56,222,209 - pxor %xmm0,%xmm9 - movups 32-112(%rcx),%xmm0 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 - adcq $0,%rbp - andq $128,%rbp -.byte 102,68,15,56,222,201 - addq %rdi,%rbp - movups 48-112(%rcx),%xmm1 -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 64-112(%rcx),%xmm0 - nop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 80-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 96-112(%rcx),%xmm0 - nop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 112-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 128-112(%rcx),%xmm0 - nop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 144-112(%rcx),%xmm1 - cmpl $11,%eax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 160-112(%rcx),%xmm0 - jb .Lcbc_dec_done -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 176-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 192-112(%rcx),%xmm0 - je .Lcbc_dec_done -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 208-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 224-112(%rcx),%xmm0 - jmp .Lcbc_dec_done -.align 16 -.Lcbc_dec_done: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - pxor %xmm0,%xmm10 - pxor %xmm0,%xmm11 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 - pxor %xmm0,%xmm12 - pxor %xmm0,%xmm13 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 - pxor %xmm0,%xmm14 - pxor %xmm0,%xmm15 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movdqu 80(%rdi),%xmm1 - -.byte 102,65,15,56,223,210 - movdqu 96(%rdi),%xmm10 - pxor %xmm0,%xmm1 -.byte 102,65,15,56,223,219 - pxor %xmm0,%xmm10 - movdqu 112(%rdi),%xmm0 -.byte 102,65,15,56,223,228 - leaq 128(%rdi),%rdi - movdqu 0(%rbp),%xmm11 -.byte 102,65,15,56,223,237 -.byte 102,65,15,56,223,246 - movdqu 16(%rbp),%xmm12 - movdqu 32(%rbp),%xmm13 -.byte 102,65,15,56,223,255 -.byte 102,68,15,56,223,193 - movdqu 48(%rbp),%xmm14 - movdqu 64(%rbp),%xmm15 -.byte 102,69,15,56,223,202 - movdqa %xmm0,%xmm10 - movdqu 80(%rbp),%xmm1 - movups -112(%rcx),%xmm0 - - movups %xmm2,(%rsi) - movdqa %xmm11,%xmm2 - movups %xmm3,16(%rsi) - movdqa %xmm12,%xmm3 - movups %xmm4,32(%rsi) - movdqa %xmm13,%xmm4 - movups %xmm5,48(%rsi) - movdqa %xmm14,%xmm5 - movups %xmm6,64(%rsi) - movdqa %xmm15,%xmm6 - movups %xmm7,80(%rsi) - movdqa %xmm1,%xmm7 - movups %xmm8,96(%rsi) - leaq 112(%rsi),%rsi - - subq $0x80,%rdx - ja .Lcbc_dec_loop8 - - movaps %xmm9,%xmm2 - leaq -112(%rcx),%rcx - addq $0x70,%rdx - jle .Lcbc_dec_clear_tail_collected - movups %xmm9,(%rsi) - leaq 16(%rsi),%rsi - cmpq $0x50,%rdx - jbe .Lcbc_dec_tail - - movaps %xmm11,%xmm2 -.Lcbc_dec_six_or_seven: - cmpq $0x60,%rdx - ja .Lcbc_dec_seven - - movaps %xmm7,%xmm8 - call _aesni_decrypt6 - pxor %xmm10,%xmm2 - movaps %xmm8,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - pxor %xmm14,%xmm6 - movdqu %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - pxor %xmm15,%xmm7 - movdqu %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - leaq 80(%rsi),%rsi - movdqa %xmm7,%xmm2 - pxor %xmm7,%xmm7 - jmp .Lcbc_dec_tail_collected - -.align 16 -.Lcbc_dec_seven: - movups 96(%rdi),%xmm8 - xorps %xmm9,%xmm9 - call _aesni_decrypt8 - movups 80(%rdi),%xmm9 - pxor %xmm10,%xmm2 - movups 96(%rdi),%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - pxor %xmm14,%xmm6 - movdqu %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - pxor %xmm15,%xmm7 - movdqu %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - pxor %xmm9,%xmm8 - movdqu %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - leaq 96(%rsi),%rsi - movdqa %xmm8,%xmm2 - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 - jmp .Lcbc_dec_tail_collected - -.align 16 -.Lcbc_dec_loop6: - movups %xmm7,(%rsi) - leaq 16(%rsi),%rsi - movdqu 0(%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqa %xmm2,%xmm11 - movdqu 32(%rdi),%xmm4 - movdqa %xmm3,%xmm12 - movdqu 48(%rdi),%xmm5 - movdqa %xmm4,%xmm13 - movdqu 64(%rdi),%xmm6 - movdqa %xmm5,%xmm14 - movdqu 80(%rdi),%xmm7 - movdqa %xmm6,%xmm15 -.Lcbc_dec_loop6_enter: - leaq 96(%rdi),%rdi - movdqa %xmm7,%xmm8 - - call _aesni_decrypt6 - - pxor %xmm10,%xmm2 - movdqa %xmm8,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm14,%xmm6 - movq %rbp,%rcx - movdqu %xmm5,48(%rsi) - pxor %xmm15,%xmm7 - movl %r10d,%eax - movdqu %xmm6,64(%rsi) - leaq 80(%rsi),%rsi - subq $0x60,%rdx - ja .Lcbc_dec_loop6 - - movdqa %xmm7,%xmm2 - addq $0x50,%rdx - jle .Lcbc_dec_clear_tail_collected - movups %xmm7,(%rsi) - leaq 16(%rsi),%rsi - -.Lcbc_dec_tail: - movups (%rdi),%xmm2 - subq $0x10,%rdx - jbe .Lcbc_dec_one - - movups 16(%rdi),%xmm3 - movaps %xmm2,%xmm11 - subq $0x10,%rdx - jbe .Lcbc_dec_two - - movups 32(%rdi),%xmm4 - movaps %xmm3,%xmm12 - subq $0x10,%rdx - jbe .Lcbc_dec_three - - movups 48(%rdi),%xmm5 - movaps %xmm4,%xmm13 - subq $0x10,%rdx - jbe .Lcbc_dec_four - - movups 64(%rdi),%xmm6 - movaps %xmm5,%xmm14 - movaps %xmm6,%xmm15 - xorps %xmm7,%xmm7 - call _aesni_decrypt6 - pxor %xmm10,%xmm2 - movaps %xmm15,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - pxor %xmm14,%xmm6 - movdqu %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - leaq 64(%rsi),%rsi - movdqa %xmm6,%xmm2 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - subq $0x10,%rdx - jmp .Lcbc_dec_tail_collected - -.align 16 -.Lcbc_dec_one: - movaps %xmm2,%xmm11 - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -.Loop_dec1_8: -.byte 102,15,56,222,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz .Loop_dec1_8 -.byte 102,15,56,223,209 - xorps %xmm10,%xmm2 - movaps %xmm11,%xmm10 - jmp .Lcbc_dec_tail_collected -.align 16 -.Lcbc_dec_two: - movaps %xmm3,%xmm12 - call _aesni_decrypt2 - pxor %xmm10,%xmm2 - movaps %xmm12,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - movdqa %xmm3,%xmm2 - pxor %xmm3,%xmm3 - leaq 16(%rsi),%rsi - jmp .Lcbc_dec_tail_collected -.align 16 -.Lcbc_dec_three: - movaps %xmm4,%xmm13 - call _aesni_decrypt3 - pxor %xmm10,%xmm2 - movaps %xmm13,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movdqa %xmm4,%xmm2 - pxor %xmm4,%xmm4 - leaq 32(%rsi),%rsi - jmp .Lcbc_dec_tail_collected -.align 16 -.Lcbc_dec_four: - movaps %xmm5,%xmm14 - call _aesni_decrypt4 - pxor %xmm10,%xmm2 - movaps %xmm14,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movdqa %xmm5,%xmm2 - pxor %xmm5,%xmm5 - leaq 48(%rsi),%rsi - jmp .Lcbc_dec_tail_collected - -.align 16 -.Lcbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 -.Lcbc_dec_tail_collected: - movups %xmm10,(%r8) - andq $15,%rdx - jnz .Lcbc_dec_tail_partial - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - jmp .Lcbc_dec_ret -.align 16 -.Lcbc_dec_tail_partial: - movaps %xmm2,(%rsp) - pxor %xmm2,%xmm2 - movq $16,%rcx - movq %rsi,%rdi - subq %rdx,%rcx - leaq (%rsp),%rsi -.long 0x9066A4F3 - movdqa %xmm2,(%rsp) - -.Lcbc_dec_ret: - xorps %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movq -8(%r11),%rbp -.cfi_restore %rbp - leaq (%r11),%rsp -.cfi_def_cfa_register %rsp -.Lcbc_ret: - .byte 0xf3,0xc3 -.cfi_endproc -.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt -.globl aes_hw_set_decrypt_key -.hidden aes_hw_set_decrypt_key -.type aes_hw_set_decrypt_key,@function -.align 16 -aes_hw_set_decrypt_key: -.cfi_startproc -.byte 0x48,0x83,0xEC,0x08 -.cfi_adjust_cfa_offset 8 - call __aesni_set_encrypt_key - shll $4,%esi - testl %eax,%eax - jnz .Ldec_key_ret - leaq 16(%rdx,%rsi,1),%rdi - - movups (%rdx),%xmm0 - movups (%rdi),%xmm1 - movups %xmm0,(%rdi) - movups %xmm1,(%rdx) - leaq 16(%rdx),%rdx - leaq -16(%rdi),%rdi - -.Ldec_key_inverse: - movups (%rdx),%xmm0 - movups (%rdi),%xmm1 -.byte 102,15,56,219,192 -.byte 102,15,56,219,201 - leaq 16(%rdx),%rdx - leaq -16(%rdi),%rdi - movups %xmm0,16(%rdi) - movups %xmm1,-16(%rdx) - cmpq %rdx,%rdi - ja .Ldec_key_inverse - - movups (%rdx),%xmm0 -.byte 102,15,56,219,192 - pxor %xmm1,%xmm1 - movups %xmm0,(%rdi) - pxor %xmm0,%xmm0 -.Ldec_key_ret: - addq $8,%rsp -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_set_decrypt_key: -.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key -.globl aes_hw_set_encrypt_key -.hidden aes_hw_set_encrypt_key -.type aes_hw_set_encrypt_key,@function -.align 16 -aes_hw_set_encrypt_key: -__aesni_set_encrypt_key: -.cfi_startproc -#ifdef BORINGSSL_DISPATCH_TEST - movb $1,BORINGSSL_function_hit+3(%rip) -#endif -.byte 0x48,0x83,0xEC,0x08 -.cfi_adjust_cfa_offset 8 - movq $-1,%rax - testq %rdi,%rdi - jz .Lenc_key_ret - testq %rdx,%rdx - jz .Lenc_key_ret - - movups (%rdi),%xmm0 - xorps %xmm4,%xmm4 - leaq OPENSSL_ia32cap_P(%rip),%r10 - movl 4(%r10),%r10d - andl $268437504,%r10d - leaq 16(%rdx),%rax - cmpl $256,%esi - je .L14rounds - cmpl $192,%esi - je .L12rounds - cmpl $128,%esi - jne .Lbad_keybits - -.L10rounds: - movl $9,%esi - cmpl $268435456,%r10d - je .L10rounds_alt - - movups %xmm0,(%rdx) -.byte 102,15,58,223,200,1 - call .Lkey_expansion_128_cold -.byte 102,15,58,223,200,2 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,4 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,8 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,16 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,32 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,64 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,128 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,27 - call .Lkey_expansion_128 -.byte 102,15,58,223,200,54 - call .Lkey_expansion_128 - movups %xmm0,(%rax) - movl %esi,80(%rax) - xorl %eax,%eax - jmp .Lenc_key_ret - -.align 16 -.L10rounds_alt: - movdqa .Lkey_rotate(%rip),%xmm5 - movl $8,%r10d - movdqa .Lkey_rcon1(%rip),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,(%rdx) - jmp .Loop_key128 - -.align 16 -.Loop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leaq 16(%rax),%rax - - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%rax) - movdqa %xmm0,%xmm2 - - decl %r10d - jnz .Loop_key128 - - movdqa .Lkey_rcon1b(%rip),%xmm4 - -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - - pxor %xmm2,%xmm0 - movdqu %xmm0,(%rax) - - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%rax) - - movl %esi,96(%rax) - xorl %eax,%eax - jmp .Lenc_key_ret - -.align 16 -.L12rounds: - movq 16(%rdi),%xmm2 - movl $11,%esi - cmpl $268435456,%r10d - je .L12rounds_alt - - movups %xmm0,(%rdx) -.byte 102,15,58,223,202,1 - call .Lkey_expansion_192a_cold -.byte 102,15,58,223,202,2 - call .Lkey_expansion_192b -.byte 102,15,58,223,202,4 - call .Lkey_expansion_192a -.byte 102,15,58,223,202,8 - call .Lkey_expansion_192b -.byte 102,15,58,223,202,16 - call .Lkey_expansion_192a -.byte 102,15,58,223,202,32 - call .Lkey_expansion_192b -.byte 102,15,58,223,202,64 - call .Lkey_expansion_192a -.byte 102,15,58,223,202,128 - call .Lkey_expansion_192b - movups %xmm0,(%rax) - movl %esi,48(%rax) - xorq %rax,%rax - jmp .Lenc_key_ret - -.align 16 -.L12rounds_alt: - movdqa .Lkey_rotate192(%rip),%xmm5 - movdqa .Lkey_rcon1(%rip),%xmm4 - movl $8,%r10d - movdqu %xmm0,(%rdx) - jmp .Loop_key192 - -.align 16 -.Loop_key192: - movq %xmm2,0(%rax) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leaq 24(%rax),%rax - - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - - pshufd $0xff,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%rax) - - decl %r10d - jnz .Loop_key192 - - movl %esi,32(%rax) - xorl %eax,%eax - jmp .Lenc_key_ret - -.align 16 -.L14rounds: - movups 16(%rdi),%xmm2 - movl $13,%esi - leaq 16(%rax),%rax - cmpl $268435456,%r10d - je .L14rounds_alt - - movups %xmm0,(%rdx) - movups %xmm2,16(%rdx) -.byte 102,15,58,223,202,1 - call .Lkey_expansion_256a_cold -.byte 102,15,58,223,200,1 - call .Lkey_expansion_256b -.byte 102,15,58,223,202,2 - call .Lkey_expansion_256a -.byte 102,15,58,223,200,2 - call .Lkey_expansion_256b -.byte 102,15,58,223,202,4 - call .Lkey_expansion_256a -.byte 102,15,58,223,200,4 - call .Lkey_expansion_256b -.byte 102,15,58,223,202,8 - call .Lkey_expansion_256a -.byte 102,15,58,223,200,8 - call .Lkey_expansion_256b -.byte 102,15,58,223,202,16 - call .Lkey_expansion_256a -.byte 102,15,58,223,200,16 - call .Lkey_expansion_256b -.byte 102,15,58,223,202,32 - call .Lkey_expansion_256a -.byte 102,15,58,223,200,32 - call .Lkey_expansion_256b -.byte 102,15,58,223,202,64 - call .Lkey_expansion_256a - movups %xmm0,(%rax) - movl %esi,16(%rax) - xorq %rax,%rax - jmp .Lenc_key_ret - -.align 16 -.L14rounds_alt: - movdqa .Lkey_rotate(%rip),%xmm5 - movdqa .Lkey_rcon1(%rip),%xmm4 - movl $7,%r10d - movdqu %xmm0,0(%rdx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,16(%rdx) - jmp .Loop_key256 - -.align 16 -.Loop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - - pxor %xmm2,%xmm0 - movdqu %xmm0,(%rax) - - decl %r10d - jz .Ldone_key256 - - pshufd $0xff,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%rax) - leaq 32(%rax),%rax - movdqa %xmm2,%xmm1 - - jmp .Loop_key256 - -.Ldone_key256: - movl %esi,16(%rax) - xorl %eax,%eax - jmp .Lenc_key_ret - -.align 16 -.Lbad_keybits: - movq $-2,%rax -.Lenc_key_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - addq $8,%rsp -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_set_encrypt_key: - -.align 16 -.Lkey_expansion_128: - movups %xmm0,(%rax) - leaq 16(%rax),%rax -.Lkey_expansion_128_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - .byte 0xf3,0xc3 - -.align 16 -.Lkey_expansion_192a: - movups %xmm0,(%rax) - leaq 16(%rax),%rax -.Lkey_expansion_192a_cold: - movaps %xmm2,%xmm5 -.Lkey_expansion_192b_warm: - shufps $16,%xmm0,%xmm4 - movdqa %xmm2,%xmm3 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - pslldq $4,%xmm3 - xorps %xmm4,%xmm0 - pshufd $85,%xmm1,%xmm1 - pxor %xmm3,%xmm2 - pxor %xmm1,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm3,%xmm2 - .byte 0xf3,0xc3 - -.align 16 -.Lkey_expansion_192b: - movaps %xmm0,%xmm3 - shufps $68,%xmm0,%xmm5 - movups %xmm5,(%rax) - shufps $78,%xmm2,%xmm3 - movups %xmm3,16(%rax) - leaq 32(%rax),%rax - jmp .Lkey_expansion_192b_warm - -.align 16 -.Lkey_expansion_256a: - movups %xmm2,(%rax) - leaq 16(%rax),%rax -.Lkey_expansion_256a_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - .byte 0xf3,0xc3 - -.align 16 -.Lkey_expansion_256b: - movups %xmm0,(%rax) - leaq 16(%rax),%rax - - shufps $16,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $140,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $170,%xmm1,%xmm1 - xorps %xmm1,%xmm2 - .byte 0xf3,0xc3 -.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key -.size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key -.align 64 -.Lbswap_mask: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.Lincrement32: -.long 6,6,6,0 -.Lincrement64: -.long 1,0,0,0 -.Lxts_magic: -.long 0x87,0,1,0 -.Lincrement1: -.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 -.Lkey_rotate: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -.Lkey_rotate192: -.long 0x04070605,0x04070605,0x04070605,0x04070605 -.Lkey_rcon1: -.long 1,1,1,1 -.Lkey_rcon1b: -.long 0x1b,0x1b,0x1b,0x1b - -.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S deleted file mode 100644 index a44790b169..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S +++ /dev/null @@ -1,427 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - - -.type gcm_gmult_ssse3, @function -.globl gcm_gmult_ssse3 -.hidden gcm_gmult_ssse3 -.align 16 -gcm_gmult_ssse3: -.cfi_startproc -.Lgmult_seh_begin: - movdqu (%rdi),%xmm0 - movdqa .Lreverse_bytes(%rip),%xmm10 - movdqa .Llow4_mask(%rip),%xmm2 - - -.byte 102,65,15,56,0,194 - - - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - - - - - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - movq $5,%rax -.Loop_row_1: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz .Loop_row_1 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $5,%rax -.Loop_row_2: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz .Loop_row_2 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $6,%rax -.Loop_row_3: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz .Loop_row_3 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - -.byte 102,65,15,56,0,210 - movdqu %xmm2,(%rdi) - - - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - .byte 0xf3,0xc3 -.Lgmult_seh_end: -.cfi_endproc -.size gcm_gmult_ssse3,.-gcm_gmult_ssse3 - - - - - -.type gcm_ghash_ssse3, @function -.globl gcm_ghash_ssse3 -.hidden gcm_ghash_ssse3 -.align 16 -gcm_ghash_ssse3: -.Lghash_seh_begin: -.cfi_startproc - movdqu (%rdi),%xmm0 - movdqa .Lreverse_bytes(%rip),%xmm10 - movdqa .Llow4_mask(%rip),%xmm11 - - - andq $-16,%rcx - - - -.byte 102,65,15,56,0,194 - - - pxor %xmm3,%xmm3 -.Loop_ghash: - - movdqu (%rdx),%xmm1 -.byte 102,65,15,56,0,202 - pxor %xmm1,%xmm0 - - - movdqa %xmm11,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm11,%xmm0 - - - - - pxor %xmm2,%xmm2 - - movq $5,%rax -.Loop_row_4: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz .Loop_row_4 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $5,%rax -.Loop_row_5: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz .Loop_row_5 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $6,%rax -.Loop_row_6: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz .Loop_row_6 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movdqa %xmm2,%xmm0 - - - leaq -256(%rsi),%rsi - - - leaq 16(%rdx),%rdx - subq $16,%rcx - jnz .Loop_ghash - - -.byte 102,65,15,56,0,194 - movdqu %xmm0,(%rdi) - - - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - .byte 0xf3,0xc3 -.Lghash_seh_end: -.cfi_endproc -.size gcm_ghash_ssse3,.-gcm_ghash_ssse3 - -.align 16 - - -.Lreverse_bytes: -.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - -.Llow4_mask: -.quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/ghash-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/ghash-x86_64.S deleted file mode 100644 index 674e2dabed..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/ghash-x86_64.S +++ /dev/null @@ -1,1872 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - -.globl gcm_gmult_4bit -.hidden gcm_gmult_4bit -.type gcm_gmult_4bit,@function -.align 16 -gcm_gmult_4bit: -.cfi_startproc - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $280,%rsp -.cfi_adjust_cfa_offset 280 -.Lgmult_prologue: - - movzbq 15(%rdi),%r8 - leaq .Lrem_4bit(%rip),%r11 - xorq %rax,%rax - xorq %rbx,%rbx - movb %r8b,%al - movb %r8b,%bl - shlb $4,%al - movq $14,%rcx - movq 8(%rsi,%rax,1),%r8 - movq (%rsi,%rax,1),%r9 - andb $0xf0,%bl - movq %r8,%rdx - jmp .Loop1 - -.align 16 -.Loop1: - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - movb (%rdi,%rcx,1),%al - shrq $4,%r9 - xorq 8(%rsi,%rbx,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rbx,1),%r9 - movb %al,%bl - xorq (%r11,%rdx,8),%r9 - movq %r8,%rdx - shlb $4,%al - xorq %r10,%r8 - decq %rcx - js .Lbreak1 - - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - shrq $4,%r9 - xorq 8(%rsi,%rax,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rax,1),%r9 - andb $0xf0,%bl - xorq (%r11,%rdx,8),%r9 - movq %r8,%rdx - xorq %r10,%r8 - jmp .Loop1 - -.align 16 -.Lbreak1: - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - shrq $4,%r9 - xorq 8(%rsi,%rax,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rax,1),%r9 - andb $0xf0,%bl - xorq (%r11,%rdx,8),%r9 - movq %r8,%rdx - xorq %r10,%r8 - - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - shrq $4,%r9 - xorq 8(%rsi,%rbx,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rbx,1),%r9 - xorq %r10,%r8 - xorq (%r11,%rdx,8),%r9 - - bswapq %r8 - bswapq %r9 - movq %r8,8(%rdi) - movq %r9,(%rdi) - - leaq 280+48(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lgmult_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_gmult_4bit,.-gcm_gmult_4bit -.globl gcm_ghash_4bit -.hidden gcm_ghash_4bit -.type gcm_ghash_4bit,@function -.align 16 -gcm_ghash_4bit: -.cfi_startproc - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $280,%rsp -.cfi_adjust_cfa_offset 280 -.Lghash_prologue: - movq %rdx,%r14 - movq %rcx,%r15 - subq $-128,%rsi - leaq 16+128(%rsp),%rbp - xorl %edx,%edx - movq 0+0-128(%rsi),%r8 - movq 0+8-128(%rsi),%rax - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq 16+0-128(%rsi),%r9 - shlb $4,%dl - movq 16+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,0(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,0(%rbp) - movq 32+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,0-128(%rbp) - movq 32+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,1(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,8(%rbp) - movq 48+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,8-128(%rbp) - movq 48+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,2(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,16(%rbp) - movq 64+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,16-128(%rbp) - movq 64+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,3(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,24(%rbp) - movq 80+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,24-128(%rbp) - movq 80+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,4(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,32(%rbp) - movq 96+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,32-128(%rbp) - movq 96+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,5(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,40(%rbp) - movq 112+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,40-128(%rbp) - movq 112+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,6(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,48(%rbp) - movq 128+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,48-128(%rbp) - movq 128+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,7(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,56(%rbp) - movq 144+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,56-128(%rbp) - movq 144+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,8(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,64(%rbp) - movq 160+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,64-128(%rbp) - movq 160+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,9(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,72(%rbp) - movq 176+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,72-128(%rbp) - movq 176+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,10(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,80(%rbp) - movq 192+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,80-128(%rbp) - movq 192+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,11(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,88(%rbp) - movq 208+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,88-128(%rbp) - movq 208+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,12(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,96(%rbp) - movq 224+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,96-128(%rbp) - movq 224+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,13(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,104(%rbp) - movq 240+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,104-128(%rbp) - movq 240+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,14(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,112(%rbp) - shlb $4,%dl - movq %rax,112-128(%rbp) - shlq $60,%r10 - movb %dl,15(%rsp) - orq %r10,%rbx - movq %r9,120(%rbp) - movq %rbx,120-128(%rbp) - addq $-128,%rsi - movq 8(%rdi),%r8 - movq 0(%rdi),%r9 - addq %r14,%r15 - leaq .Lrem_8bit(%rip),%r11 - jmp .Louter_loop -.align 16 -.Louter_loop: - xorq (%r14),%r9 - movq 8(%r14),%rdx - leaq 16(%r14),%r14 - xorq %r8,%rdx - movq %r9,(%rdi) - movq %rdx,8(%rdi) - shrq $32,%rdx - xorq %rax,%rax - roll $8,%edx - movb %dl,%al - movzbl %dl,%ebx - shlb $4,%al - shrl $4,%ebx - roll $8,%edx - movq 8(%rsi,%rax,1),%r8 - movq (%rsi,%rax,1),%r9 - movb %dl,%al - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - xorq %r8,%r12 - movq %r9,%r10 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl 8(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl 4(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl 0(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - andl $240,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl -4(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - movzwq (%r11,%r12,2),%r12 - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - shlq $48,%r12 - xorq %r10,%r8 - xorq %r12,%r9 - movzbq %r8b,%r13 - shrq $4,%r8 - movq %r9,%r10 - shlb $4,%r13b - shrq $4,%r9 - xorq 8(%rsi,%rcx,1),%r8 - movzwq (%r11,%r13,2),%r13 - shlq $60,%r10 - xorq (%rsi,%rcx,1),%r9 - xorq %r10,%r8 - shlq $48,%r13 - bswapq %r8 - xorq %r13,%r9 - bswapq %r9 - cmpq %r15,%r14 - jb .Louter_loop - movq %r8,8(%rdi) - movq %r9,(%rdi) - - leaq 280+48(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq 0(%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lghash_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_ghash_4bit,.-gcm_ghash_4bit -.globl gcm_init_clmul -.hidden gcm_init_clmul -.type gcm_init_clmul,@function -.align 16 -gcm_init_clmul: -.cfi_startproc -.L_init_clmul: - movdqu (%rsi),%xmm2 - pshufd $78,%xmm2,%xmm2 - - - pshufd $255,%xmm2,%xmm4 - movdqa %xmm2,%xmm3 - psllq $1,%xmm2 - pxor %xmm5,%xmm5 - psrlq $63,%xmm3 - pcmpgtd %xmm4,%xmm5 - pslldq $8,%xmm3 - por %xmm3,%xmm2 - - - pand .L0x1c2_polynomial(%rip),%xmm5 - pxor %xmm5,%xmm2 - - - pshufd $78,%xmm2,%xmm6 - movdqa %xmm2,%xmm0 - pxor %xmm2,%xmm6 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,222,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - pshufd $78,%xmm2,%xmm3 - pshufd $78,%xmm0,%xmm4 - pxor %xmm2,%xmm3 - movdqu %xmm2,0(%rdi) - pxor %xmm0,%xmm4 - movdqu %xmm0,16(%rdi) -.byte 102,15,58,15,227,8 - movdqu %xmm4,32(%rdi) - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,222,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - movdqa %xmm0,%xmm5 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,222,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - pshufd $78,%xmm5,%xmm3 - pshufd $78,%xmm0,%xmm4 - pxor %xmm5,%xmm3 - movdqu %xmm5,48(%rdi) - pxor %xmm0,%xmm4 - movdqu %xmm0,64(%rdi) -.byte 102,15,58,15,227,8 - movdqu %xmm4,80(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_init_clmul,.-gcm_init_clmul -.globl gcm_gmult_clmul -.hidden gcm_gmult_clmul -.type gcm_gmult_clmul,@function -.align 16 -gcm_gmult_clmul: -.cfi_startproc -.L_gmult_clmul: - movdqu (%rdi),%xmm0 - movdqa .Lbswap_mask(%rip),%xmm5 - movdqu (%rsi),%xmm2 - movdqu 32(%rsi),%xmm4 -.byte 102,15,56,0,197 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,197 - movdqu %xmm0,(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_gmult_clmul,.-gcm_gmult_clmul -.globl gcm_ghash_clmul -.hidden gcm_ghash_clmul -.type gcm_ghash_clmul,@function -.align 32 -gcm_ghash_clmul: -.cfi_startproc -.L_ghash_clmul: - movdqa .Lbswap_mask(%rip),%xmm10 - - movdqu (%rdi),%xmm0 - movdqu (%rsi),%xmm2 - movdqu 32(%rsi),%xmm7 -.byte 102,65,15,56,0,194 - - subq $0x10,%rcx - jz .Lodd_tail - - movdqu 16(%rsi),%xmm6 - leaq OPENSSL_ia32cap_P(%rip),%rax - movl 4(%rax),%eax - cmpq $0x30,%rcx - jb .Lskip4x - - andl $71303168,%eax - cmpl $4194304,%eax - je .Lskip4x - - subq $0x30,%rcx - movq $0xA040608020C0E000,%rax - movdqu 48(%rsi),%xmm14 - movdqu 64(%rsi),%xmm15 - - - - - movdqu 48(%rdx),%xmm3 - movdqu 32(%rdx),%xmm11 -.byte 102,65,15,56,0,218 -.byte 102,69,15,56,0,218 - movdqa %xmm3,%xmm5 - pshufd $78,%xmm3,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,68,218,0 -.byte 102,15,58,68,234,17 -.byte 102,15,58,68,231,0 - - movdqa %xmm11,%xmm13 - pshufd $78,%xmm11,%xmm12 - pxor %xmm11,%xmm12 -.byte 102,68,15,58,68,222,0 -.byte 102,68,15,58,68,238,17 -.byte 102,68,15,58,68,231,16 - xorps %xmm11,%xmm3 - xorps %xmm13,%xmm5 - movups 80(%rsi),%xmm7 - xorps %xmm12,%xmm4 - - movdqu 16(%rdx),%xmm11 - movdqu 0(%rdx),%xmm8 -.byte 102,69,15,56,0,218 -.byte 102,69,15,56,0,194 - movdqa %xmm11,%xmm13 - pshufd $78,%xmm11,%xmm12 - pxor %xmm8,%xmm0 - pxor %xmm11,%xmm12 -.byte 102,69,15,58,68,222,0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm8 - pxor %xmm0,%xmm8 -.byte 102,69,15,58,68,238,17 -.byte 102,68,15,58,68,231,0 - xorps %xmm11,%xmm3 - xorps %xmm13,%xmm5 - - leaq 64(%rdx),%rdx - subq $0x40,%rcx - jc .Ltail4x - - jmp .Lmod4_loop -.align 32 -.Lmod4_loop: -.byte 102,65,15,58,68,199,0 - xorps %xmm12,%xmm4 - movdqu 48(%rdx),%xmm11 -.byte 102,69,15,56,0,218 -.byte 102,65,15,58,68,207,17 - xorps %xmm3,%xmm0 - movdqu 32(%rdx),%xmm3 - movdqa %xmm11,%xmm13 -.byte 102,68,15,58,68,199,16 - pshufd $78,%xmm11,%xmm12 - xorps %xmm5,%xmm1 - pxor %xmm11,%xmm12 -.byte 102,65,15,56,0,218 - movups 32(%rsi),%xmm7 - xorps %xmm4,%xmm8 -.byte 102,68,15,58,68,218,0 - pshufd $78,%xmm3,%xmm4 - - pxor %xmm0,%xmm8 - movdqa %xmm3,%xmm5 - pxor %xmm1,%xmm8 - pxor %xmm3,%xmm4 - movdqa %xmm8,%xmm9 -.byte 102,68,15,58,68,234,17 - pslldq $8,%xmm8 - psrldq $8,%xmm9 - pxor %xmm8,%xmm0 - movdqa .L7_mask(%rip),%xmm8 - pxor %xmm9,%xmm1 -.byte 102,76,15,110,200 - - pand %xmm0,%xmm8 -.byte 102,69,15,56,0,200 - pxor %xmm0,%xmm9 -.byte 102,68,15,58,68,231,0 - psllq $57,%xmm9 - movdqa %xmm9,%xmm8 - pslldq $8,%xmm9 -.byte 102,15,58,68,222,0 - psrldq $8,%xmm8 - pxor %xmm9,%xmm0 - pxor %xmm8,%xmm1 - movdqu 0(%rdx),%xmm8 - - movdqa %xmm0,%xmm9 - psrlq $1,%xmm0 -.byte 102,15,58,68,238,17 - xorps %xmm11,%xmm3 - movdqu 16(%rdx),%xmm11 -.byte 102,69,15,56,0,218 -.byte 102,15,58,68,231,16 - xorps %xmm13,%xmm5 - movups 80(%rsi),%xmm7 -.byte 102,69,15,56,0,194 - pxor %xmm9,%xmm1 - pxor %xmm0,%xmm9 - psrlq $5,%xmm0 - - movdqa %xmm11,%xmm13 - pxor %xmm12,%xmm4 - pshufd $78,%xmm11,%xmm12 - pxor %xmm9,%xmm0 - pxor %xmm8,%xmm1 - pxor %xmm11,%xmm12 -.byte 102,69,15,58,68,222,0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - movdqa %xmm0,%xmm1 -.byte 102,69,15,58,68,238,17 - xorps %xmm11,%xmm3 - pshufd $78,%xmm0,%xmm8 - pxor %xmm0,%xmm8 - -.byte 102,68,15,58,68,231,0 - xorps %xmm13,%xmm5 - - leaq 64(%rdx),%rdx - subq $0x40,%rcx - jnc .Lmod4_loop - -.Ltail4x: -.byte 102,65,15,58,68,199,0 -.byte 102,65,15,58,68,207,17 -.byte 102,68,15,58,68,199,16 - xorps %xmm12,%xmm4 - xorps %xmm3,%xmm0 - xorps %xmm5,%xmm1 - pxor %xmm0,%xmm1 - pxor %xmm4,%xmm8 - - pxor %xmm1,%xmm8 - pxor %xmm0,%xmm1 - - movdqa %xmm8,%xmm9 - psrldq $8,%xmm8 - pslldq $8,%xmm9 - pxor %xmm8,%xmm1 - pxor %xmm9,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - addq $0x40,%rcx - jz .Ldone - movdqu 32(%rsi),%xmm7 - subq $0x10,%rcx - jz .Lodd_tail -.Lskip4x: - - - - - - movdqu (%rdx),%xmm8 - movdqu 16(%rdx),%xmm3 -.byte 102,69,15,56,0,194 -.byte 102,65,15,56,0,218 - pxor %xmm8,%xmm0 - - movdqa %xmm3,%xmm5 - pshufd $78,%xmm3,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,68,218,0 -.byte 102,15,58,68,234,17 -.byte 102,15,58,68,231,0 - - leaq 32(%rdx),%rdx - nop - subq $0x20,%rcx - jbe .Leven_tail - nop - jmp .Lmod_loop - -.align 32 -.Lmod_loop: - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm8 - pshufd $78,%xmm0,%xmm4 - pxor %xmm0,%xmm4 - -.byte 102,15,58,68,198,0 -.byte 102,15,58,68,206,17 -.byte 102,15,58,68,231,16 - - pxor %xmm3,%xmm0 - pxor %xmm5,%xmm1 - movdqu (%rdx),%xmm9 - pxor %xmm0,%xmm8 -.byte 102,69,15,56,0,202 - movdqu 16(%rdx),%xmm3 - - pxor %xmm1,%xmm8 - pxor %xmm9,%xmm1 - pxor %xmm8,%xmm4 -.byte 102,65,15,56,0,218 - movdqa %xmm4,%xmm8 - psrldq $8,%xmm8 - pslldq $8,%xmm4 - pxor %xmm8,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm3,%xmm5 - - movdqa %xmm0,%xmm9 - movdqa %xmm0,%xmm8 - psllq $5,%xmm0 - pxor %xmm0,%xmm8 -.byte 102,15,58,68,218,0 - psllq $1,%xmm0 - pxor %xmm8,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm8 - pslldq $8,%xmm0 - psrldq $8,%xmm8 - pxor %xmm9,%xmm0 - pshufd $78,%xmm5,%xmm4 - pxor %xmm8,%xmm1 - pxor %xmm5,%xmm4 - - movdqa %xmm0,%xmm9 - psrlq $1,%xmm0 -.byte 102,15,58,68,234,17 - pxor %xmm9,%xmm1 - pxor %xmm0,%xmm9 - psrlq $5,%xmm0 - pxor %xmm9,%xmm0 - leaq 32(%rdx),%rdx - psrlq $1,%xmm0 -.byte 102,15,58,68,231,0 - pxor %xmm1,%xmm0 - - subq $0x20,%rcx - ja .Lmod_loop - -.Leven_tail: - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm8 - pshufd $78,%xmm0,%xmm4 - pxor %xmm0,%xmm4 - -.byte 102,15,58,68,198,0 -.byte 102,15,58,68,206,17 -.byte 102,15,58,68,231,16 - - pxor %xmm3,%xmm0 - pxor %xmm5,%xmm1 - pxor %xmm0,%xmm8 - pxor %xmm1,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm8 - psrldq $8,%xmm8 - pslldq $8,%xmm4 - pxor %xmm8,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - testq %rcx,%rcx - jnz .Ldone - -.Lodd_tail: - movdqu (%rdx),%xmm8 -.byte 102,69,15,56,0,194 - pxor %xmm8,%xmm0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,223,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.Ldone: -.byte 102,65,15,56,0,194 - movdqu %xmm0,(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_ghash_clmul,.-gcm_ghash_clmul -.globl gcm_init_avx -.hidden gcm_init_avx -.type gcm_init_avx,@function -.align 32 -gcm_init_avx: -.cfi_startproc - vzeroupper - - vmovdqu (%rsi),%xmm2 - vpshufd $78,%xmm2,%xmm2 - - - vpshufd $255,%xmm2,%xmm4 - vpsrlq $63,%xmm2,%xmm3 - vpsllq $1,%xmm2,%xmm2 - vpxor %xmm5,%xmm5,%xmm5 - vpcmpgtd %xmm4,%xmm5,%xmm5 - vpslldq $8,%xmm3,%xmm3 - vpor %xmm3,%xmm2,%xmm2 - - - vpand .L0x1c2_polynomial(%rip),%xmm5,%xmm5 - vpxor %xmm5,%xmm2,%xmm2 - - vpunpckhqdq %xmm2,%xmm2,%xmm6 - vmovdqa %xmm2,%xmm0 - vpxor %xmm2,%xmm6,%xmm6 - movq $4,%r10 - jmp .Linit_start_avx -.align 32 -.Linit_loop_avx: - vpalignr $8,%xmm3,%xmm4,%xmm5 - vmovdqu %xmm5,-16(%rdi) - vpunpckhqdq %xmm0,%xmm0,%xmm3 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 - vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 - vpxor %xmm0,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - - vpslldq $8,%xmm3,%xmm4 - vpsrldq $8,%xmm3,%xmm3 - vpxor %xmm4,%xmm0,%xmm0 - vpxor %xmm3,%xmm1,%xmm1 - vpsllq $57,%xmm0,%xmm3 - vpsllq $62,%xmm0,%xmm4 - vpxor %xmm3,%xmm4,%xmm4 - vpsllq $63,%xmm0,%xmm3 - vpxor %xmm3,%xmm4,%xmm4 - vpslldq $8,%xmm4,%xmm3 - vpsrldq $8,%xmm4,%xmm4 - vpxor %xmm3,%xmm0,%xmm0 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrlq $1,%xmm0,%xmm4 - vpxor %xmm0,%xmm1,%xmm1 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $5,%xmm4,%xmm4 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $1,%xmm0,%xmm0 - vpxor %xmm1,%xmm0,%xmm0 -.Linit_start_avx: - vmovdqa %xmm0,%xmm5 - vpunpckhqdq %xmm0,%xmm0,%xmm3 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 - vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 - vpxor %xmm0,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - - vpslldq $8,%xmm3,%xmm4 - vpsrldq $8,%xmm3,%xmm3 - vpxor %xmm4,%xmm0,%xmm0 - vpxor %xmm3,%xmm1,%xmm1 - vpsllq $57,%xmm0,%xmm3 - vpsllq $62,%xmm0,%xmm4 - vpxor %xmm3,%xmm4,%xmm4 - vpsllq $63,%xmm0,%xmm3 - vpxor %xmm3,%xmm4,%xmm4 - vpslldq $8,%xmm4,%xmm3 - vpsrldq $8,%xmm4,%xmm4 - vpxor %xmm3,%xmm0,%xmm0 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrlq $1,%xmm0,%xmm4 - vpxor %xmm0,%xmm1,%xmm1 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $5,%xmm4,%xmm4 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $1,%xmm0,%xmm0 - vpxor %xmm1,%xmm0,%xmm0 - vpshufd $78,%xmm5,%xmm3 - vpshufd $78,%xmm0,%xmm4 - vpxor %xmm5,%xmm3,%xmm3 - vmovdqu %xmm5,0(%rdi) - vpxor %xmm0,%xmm4,%xmm4 - vmovdqu %xmm0,16(%rdi) - leaq 48(%rdi),%rdi - subq $1,%r10 - jnz .Linit_loop_avx - - vpalignr $8,%xmm4,%xmm3,%xmm5 - vmovdqu %xmm5,-16(%rdi) - - vzeroupper - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_init_avx,.-gcm_init_avx -.globl gcm_gmult_avx -.hidden gcm_gmult_avx -.type gcm_gmult_avx,@function -.align 32 -gcm_gmult_avx: -.cfi_startproc - jmp .L_gmult_clmul -.cfi_endproc -.size gcm_gmult_avx,.-gcm_gmult_avx -.globl gcm_ghash_avx -.hidden gcm_ghash_avx -.type gcm_ghash_avx,@function -.align 32 -gcm_ghash_avx: -.cfi_startproc - vzeroupper - - vmovdqu (%rdi),%xmm10 - leaq .L0x1c2_polynomial(%rip),%r10 - leaq 64(%rsi),%rsi - vmovdqu .Lbswap_mask(%rip),%xmm13 - vpshufb %xmm13,%xmm10,%xmm10 - cmpq $0x80,%rcx - jb .Lshort_avx - subq $0x80,%rcx - - vmovdqu 112(%rdx),%xmm14 - vmovdqu 0-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm14 - vmovdqu 32-64(%rsi),%xmm7 - - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vmovdqu 96(%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm14,%xmm9,%xmm9 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 16-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vmovdqu 80(%rdx),%xmm14 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - - vpshufb %xmm13,%xmm14,%xmm14 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 48-64(%rsi),%xmm6 - vpxor %xmm14,%xmm9,%xmm9 - vmovdqu 64(%rdx),%xmm15 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 80-64(%rsi),%xmm7 - - vpshufb %xmm13,%xmm15,%xmm15 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm1,%xmm4,%xmm4 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 64-64(%rsi),%xmm6 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - - vmovdqu 48(%rdx),%xmm14 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb %xmm13,%xmm14,%xmm14 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 96-64(%rsi),%xmm6 - vpxor %xmm5,%xmm2,%xmm2 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 128-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - - vmovdqu 32(%rdx),%xmm15 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm1,%xmm4,%xmm4 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 112-64(%rsi),%xmm6 - vpxor %xmm2,%xmm5,%xmm5 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - - vmovdqu 16(%rdx),%xmm14 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb %xmm13,%xmm14,%xmm14 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 144-64(%rsi),%xmm6 - vpxor %xmm5,%xmm2,%xmm2 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 176-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - - vmovdqu (%rdx),%xmm15 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm1,%xmm4,%xmm4 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 160-64(%rsi),%xmm6 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 - - leaq 128(%rdx),%rdx - cmpq $0x80,%rcx - jb .Ltail_avx - - vpxor %xmm10,%xmm15,%xmm15 - subq $0x80,%rcx - jmp .Loop8x_avx - -.align 32 -.Loop8x_avx: - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vmovdqu 112(%rdx),%xmm14 - vpxor %xmm0,%xmm3,%xmm3 - vpxor %xmm15,%xmm8,%xmm8 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10 - vpshufb %xmm13,%xmm14,%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11 - vmovdqu 0-64(%rsi),%xmm6 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12 - vmovdqu 32-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - - vmovdqu 96(%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm3,%xmm10,%xmm10 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vxorps %xmm4,%xmm11,%xmm11 - vmovdqu 16-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm5,%xmm12,%xmm12 - vxorps %xmm15,%xmm8,%xmm8 - - vmovdqu 80(%rdx),%xmm14 - vpxor %xmm10,%xmm12,%xmm12 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpxor %xmm11,%xmm12,%xmm12 - vpslldq $8,%xmm12,%xmm9 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vpsrldq $8,%xmm12,%xmm12 - vpxor %xmm9,%xmm10,%xmm10 - vmovdqu 48-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm14 - vxorps %xmm12,%xmm11,%xmm11 - vpxor %xmm1,%xmm4,%xmm4 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 80-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu 64(%rdx),%xmm15 - vpalignr $8,%xmm10,%xmm10,%xmm12 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpshufb %xmm13,%xmm15,%xmm15 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 64-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vxorps %xmm15,%xmm8,%xmm8 - vpxor %xmm5,%xmm2,%xmm2 - - vmovdqu 48(%rdx),%xmm14 - vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpshufb %xmm13,%xmm14,%xmm14 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 96-64(%rsi),%xmm6 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 128-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu 32(%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpshufb %xmm13,%xmm15,%xmm15 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 112-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - vpxor %xmm5,%xmm2,%xmm2 - vxorps %xmm12,%xmm10,%xmm10 - - vmovdqu 16(%rdx),%xmm14 - vpalignr $8,%xmm10,%xmm10,%xmm12 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpshufb %xmm13,%xmm14,%xmm14 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 144-64(%rsi),%xmm6 - vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 - vxorps %xmm11,%xmm12,%xmm12 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 176-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu (%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 160-64(%rsi),%xmm6 - vpxor %xmm12,%xmm15,%xmm15 - vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 - vpxor %xmm10,%xmm15,%xmm15 - - leaq 128(%rdx),%rdx - subq $0x80,%rcx - jnc .Loop8x_avx - - addq $0x80,%rcx - jmp .Ltail_no_xor_avx - -.align 32 -.Lshort_avx: - vmovdqu -16(%rdx,%rcx,1),%xmm14 - leaq (%rdx,%rcx,1),%rdx - vmovdqu 0-64(%rsi),%xmm6 - vmovdqu 32-64(%rsi),%xmm7 - vpshufb %xmm13,%xmm14,%xmm15 - - vmovdqa %xmm0,%xmm3 - vmovdqa %xmm1,%xmm4 - vmovdqa %xmm2,%xmm5 - subq $0x10,%rcx - jz .Ltail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -32(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 16-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vpsrldq $8,%xmm7,%xmm7 - subq $0x10,%rcx - jz .Ltail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -48(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 48-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vmovdqu 80-64(%rsi),%xmm7 - subq $0x10,%rcx - jz .Ltail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -64(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 64-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vpsrldq $8,%xmm7,%xmm7 - subq $0x10,%rcx - jz .Ltail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -80(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 96-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vmovdqu 128-64(%rsi),%xmm7 - subq $0x10,%rcx - jz .Ltail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -96(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 112-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vpsrldq $8,%xmm7,%xmm7 - subq $0x10,%rcx - jz .Ltail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -112(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 144-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vmovq 184-64(%rsi),%xmm7 - subq $0x10,%rcx - jmp .Ltail_avx - -.align 32 -.Ltail_avx: - vpxor %xmm10,%xmm15,%xmm15 -.Ltail_no_xor_avx: - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - - vmovdqu (%r10),%xmm12 - - vpxor %xmm0,%xmm3,%xmm10 - vpxor %xmm1,%xmm4,%xmm11 - vpxor %xmm2,%xmm5,%xmm5 - - vpxor %xmm10,%xmm5,%xmm5 - vpxor %xmm11,%xmm5,%xmm5 - vpslldq $8,%xmm5,%xmm9 - vpsrldq $8,%xmm5,%xmm5 - vpxor %xmm9,%xmm10,%xmm10 - vpxor %xmm5,%xmm11,%xmm11 - - vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 - vpalignr $8,%xmm10,%xmm10,%xmm10 - vpxor %xmm9,%xmm10,%xmm10 - - vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 - vpalignr $8,%xmm10,%xmm10,%xmm10 - vpxor %xmm11,%xmm10,%xmm10 - vpxor %xmm9,%xmm10,%xmm10 - - cmpq $0,%rcx - jne .Lshort_avx - - vpshufb %xmm13,%xmm10,%xmm10 - vmovdqu %xmm10,(%rdi) - vzeroupper - .byte 0xf3,0xc3 -.cfi_endproc -.size gcm_ghash_avx,.-gcm_ghash_avx -.align 64 -.Lbswap_mask: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.L0x1c2_polynomial: -.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 -.L7_mask: -.long 7,0,7,0 -.L7_mask_poly: -.long 7,0,450,0 -.align 64 -.type .Lrem_4bit,@object -.Lrem_4bit: -.long 0,0,0,471859200,0,943718400,0,610271232 -.long 0,1887436800,0,1822425088,0,1220542464,0,1423966208 -.long 0,3774873600,0,4246732800,0,3644850176,0,3311403008 -.long 0,2441084928,0,2376073216,0,2847932416,0,3051356160 -.type .Lrem_8bit,@object -.Lrem_8bit: -.value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E -.value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E -.value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E -.value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E -.value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E -.value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E -.value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E -.value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E -.value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE -.value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE -.value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE -.value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE -.value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E -.value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E -.value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE -.value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE -.value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E -.value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E -.value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E -.value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E -.value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E -.value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E -.value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E -.value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E -.value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE -.value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE -.value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE -.value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE -.value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E -.value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E -.value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE -.value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE - -.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/md5-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/md5-x86_64.S deleted file mode 100644 index 04aaf057e6..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/md5-x86_64.S +++ /dev/null @@ -1,702 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.align 16 - -.globl md5_block_asm_data_order -.hidden md5_block_asm_data_order -.type md5_block_asm_data_order,@function -md5_block_asm_data_order: -.cfi_startproc - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12,-32 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14,-40 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset r15,-48 -.Lprologue: - - - - - movq %rdi,%rbp - shlq $6,%rdx - leaq (%rsi,%rdx,1),%rdi - movl 0(%rbp),%eax - movl 4(%rbp),%ebx - movl 8(%rbp),%ecx - movl 12(%rbp),%edx - - - - - - - - cmpq %rdi,%rsi - je .Lend - - -.Lloop: - movl %eax,%r8d - movl %ebx,%r9d - movl %ecx,%r14d - movl %edx,%r15d - movl 0(%rsi),%r10d - movl %edx,%r11d - xorl %ecx,%r11d - leal -680876936(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 4(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal -389564586(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 8(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal 606105819(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 12(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal -1044525330(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 16(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - xorl %ecx,%r11d - leal -176418897(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 20(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal 1200080426(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 24(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal -1473231341(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 28(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal -45705983(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 32(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - xorl %ecx,%r11d - leal 1770035416(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 36(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal -1958414417(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 40(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal -42063(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 44(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal -1990404162(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 48(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - xorl %ecx,%r11d - leal 1804603682(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 52(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal -40341101(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 56(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal -1502002290(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 60(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal 1236535329(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 0(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - movl 4(%rsi),%r10d - movl %edx,%r11d - movl %edx,%r12d - notl %r11d - leal -165796510(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 24(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal -1069501632(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 44(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal 643717713(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 0(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal -373897302(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 20(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - notl %r11d - leal -701558691(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 40(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal 38016083(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 60(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal -660478335(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 16(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal -405537848(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 36(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - notl %r11d - leal 568446438(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 56(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal -1019803690(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 12(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal -187363961(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 32(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal 1163531501(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 52(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - notl %r11d - leal -1444681467(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 8(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal -51403784(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 28(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal 1735328473(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 48(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal -1926607734(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 0(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - movl 20(%rsi),%r10d - movl %ecx,%r11d - leal -378558(%rax,%r10,1),%eax - movl 32(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal -2022574463(%rdx,%r10,1),%edx - movl 44(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal 1839030562(%rcx,%r10,1),%ecx - movl 56(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal -35309556(%rbx,%r10,1),%ebx - movl 4(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - leal -1530992060(%rax,%r10,1),%eax - movl 16(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal 1272893353(%rdx,%r10,1),%edx - movl 28(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal -155497632(%rcx,%r10,1),%ecx - movl 40(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal -1094730640(%rbx,%r10,1),%ebx - movl 52(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - leal 681279174(%rax,%r10,1),%eax - movl 0(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal -358537222(%rdx,%r10,1),%edx - movl 12(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal -722521979(%rcx,%r10,1),%ecx - movl 24(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal 76029189(%rbx,%r10,1),%ebx - movl 36(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - leal -640364487(%rax,%r10,1),%eax - movl 48(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal -421815835(%rdx,%r10,1),%edx - movl 60(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal 530742520(%rcx,%r10,1),%ecx - movl 8(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal -995338651(%rbx,%r10,1),%ebx - movl 0(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - movl 0(%rsi),%r10d - movl $0xffffffff,%r11d - xorl %edx,%r11d - leal -198630844(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 28(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal 1126891415(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 56(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal -1416354905(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 20(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal -57434055(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 48(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - leal 1700485571(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 12(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal -1894986606(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 40(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal -1051523(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 4(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal -2054922799(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 32(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - leal 1873313359(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 60(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal -30611744(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 24(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal -1560198380(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 52(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal 1309151649(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 16(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - leal -145523070(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 44(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal -1120210379(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 8(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal 718787259(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 36(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal -343485551(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 0(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - - addl %r8d,%eax - addl %r9d,%ebx - addl %r14d,%ecx - addl %r15d,%edx - - - addq $64,%rsi - cmpq %rdi,%rsi - jb .Lloop - - -.Lend: - movl %eax,0(%rbp) - movl %ebx,4(%rbp) - movl %ecx,8(%rbp) - movl %edx,12(%rbp) - - movq (%rsp),%r15 -.cfi_restore r15 - movq 8(%rsp),%r14 -.cfi_restore r14 - movq 16(%rsp),%r12 -.cfi_restore r12 - movq 24(%rsp),%rbx -.cfi_restore rbx - movq 32(%rsp),%rbp -.cfi_restore rbp - addq $40,%rsp -.cfi_adjust_cfa_offset -40 -.Lepilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size md5_block_asm_data_order,.-md5_block_asm_data_order -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S deleted file mode 100644 index 85f4899012..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S +++ /dev/null @@ -1,4543 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - - -.align 64 -.Lpoly: -.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001 - -.LOne: -.long 1,1,1,1,1,1,1,1 -.LTwo: -.long 2,2,2,2,2,2,2,2 -.LThree: -.long 3,3,3,3,3,3,3,3 -.LONE_mont: -.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe - - -.Lord: -.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000 -.LordK: -.quad 0xccd1c8aaee00bc4f - - - -.globl ecp_nistz256_neg -.hidden ecp_nistz256_neg -.type ecp_nistz256_neg,@function -.align 32 -ecp_nistz256_neg: -.cfi_startproc - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-24 -.Lneg_body: - - xorq %r8,%r8 - xorq %r9,%r9 - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r13,%r13 - - subq 0(%rsi),%r8 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - movq %r8,%rax - sbbq 24(%rsi),%r11 - leaq .Lpoly(%rip),%rsi - movq %r9,%rdx - sbbq $0,%r13 - - addq 0(%rsi),%r8 - movq %r10,%rcx - adcq 8(%rsi),%r9 - adcq 16(%rsi),%r10 - movq %r11,%r12 - adcq 24(%rsi),%r11 - testq %r13,%r13 - - cmovzq %rax,%r8 - cmovzq %rdx,%r9 - movq %r8,0(%rdi) - cmovzq %rcx,%r10 - movq %r9,8(%rdi) - cmovzq %r12,%r11 - movq %r10,16(%rdi) - movq %r11,24(%rdi) - - movq 0(%rsp),%r13 -.cfi_restore %r13 - movq 8(%rsp),%r12 -.cfi_restore %r12 - leaq 16(%rsp),%rsp -.cfi_adjust_cfa_offset -16 -.Lneg_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_neg,.-ecp_nistz256_neg - - - - - - -.globl ecp_nistz256_ord_mul_mont -.hidden ecp_nistz256_ord_mul_mont -.type ecp_nistz256_ord_mul_mont,@function -.align 32 -ecp_nistz256_ord_mul_mont: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lecp_nistz256_ord_mul_montx - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 -.Lord_mul_body: - - movq 0(%rdx),%rax - movq %rdx,%rbx - leaq .Lord(%rip),%r14 - movq .LordK(%rip),%r15 - - - movq %rax,%rcx - mulq 0(%rsi) - movq %rax,%r8 - movq %rcx,%rax - movq %rdx,%r9 - - mulq 8(%rsi) - addq %rax,%r9 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq 16(%rsi) - addq %rax,%r10 - movq %rcx,%rax - adcq $0,%rdx - - movq %r8,%r13 - imulq %r15,%r8 - - movq %rdx,%r11 - mulq 24(%rsi) - addq %rax,%r11 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%r12 - - - mulq 0(%r14) - movq %r8,%rbp - addq %rax,%r13 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%rcx - - subq %r8,%r10 - sbbq $0,%r8 - - mulq 8(%r14) - addq %rcx,%r9 - adcq $0,%rdx - addq %rax,%r9 - movq %rbp,%rax - adcq %rdx,%r10 - movq %rbp,%rdx - adcq $0,%r8 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r11 - movq 8(%rbx),%rax - sbbq %rdx,%rbp - - addq %r8,%r11 - adcq %rbp,%r12 - adcq $0,%r13 - - - movq %rax,%rcx - mulq 0(%rsi) - addq %rax,%r9 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 8(%rsi) - addq %rbp,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 16(%rsi) - addq %rbp,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rcx,%rax - adcq $0,%rdx - - movq %r9,%rcx - imulq %r15,%r9 - - movq %rdx,%rbp - mulq 24(%rsi) - addq %rbp,%r12 - adcq $0,%rdx - xorq %r8,%r8 - addq %rax,%r12 - movq %r9,%rax - adcq %rdx,%r13 - adcq $0,%r8 - - - mulq 0(%r14) - movq %r9,%rbp - addq %rax,%rcx - movq %r9,%rax - adcq %rdx,%rcx - - subq %r9,%r11 - sbbq $0,%r9 - - mulq 8(%r14) - addq %rcx,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %rbp,%rax - adcq %rdx,%r11 - movq %rbp,%rdx - adcq $0,%r9 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r12 - movq 16(%rbx),%rax - sbbq %rdx,%rbp - - addq %r9,%r12 - adcq %rbp,%r13 - adcq $0,%r8 - - - movq %rax,%rcx - mulq 0(%rsi) - addq %rax,%r10 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 8(%rsi) - addq %rbp,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 16(%rsi) - addq %rbp,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rcx,%rax - adcq $0,%rdx - - movq %r10,%rcx - imulq %r15,%r10 - - movq %rdx,%rbp - mulq 24(%rsi) - addq %rbp,%r13 - adcq $0,%rdx - xorq %r9,%r9 - addq %rax,%r13 - movq %r10,%rax - adcq %rdx,%r8 - adcq $0,%r9 - - - mulq 0(%r14) - movq %r10,%rbp - addq %rax,%rcx - movq %r10,%rax - adcq %rdx,%rcx - - subq %r10,%r12 - sbbq $0,%r10 - - mulq 8(%r14) - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rbp,%rax - adcq %rdx,%r12 - movq %rbp,%rdx - adcq $0,%r10 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r13 - movq 24(%rbx),%rax - sbbq %rdx,%rbp - - addq %r10,%r13 - adcq %rbp,%r8 - adcq $0,%r9 - - - movq %rax,%rcx - mulq 0(%rsi) - addq %rax,%r11 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 8(%rsi) - addq %rbp,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 16(%rsi) - addq %rbp,%r13 - adcq $0,%rdx - addq %rax,%r13 - movq %rcx,%rax - adcq $0,%rdx - - movq %r11,%rcx - imulq %r15,%r11 - - movq %rdx,%rbp - mulq 24(%rsi) - addq %rbp,%r8 - adcq $0,%rdx - xorq %r10,%r10 - addq %rax,%r8 - movq %r11,%rax - adcq %rdx,%r9 - adcq $0,%r10 - - - mulq 0(%r14) - movq %r11,%rbp - addq %rax,%rcx - movq %r11,%rax - adcq %rdx,%rcx - - subq %r11,%r13 - sbbq $0,%r11 - - mulq 8(%r14) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rbp,%rax - adcq %rdx,%r13 - movq %rbp,%rdx - adcq $0,%r11 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r8 - sbbq %rdx,%rbp - - addq %r11,%r8 - adcq %rbp,%r9 - adcq $0,%r10 - - - movq %r12,%rsi - subq 0(%r14),%r12 - movq %r13,%r11 - sbbq 8(%r14),%r13 - movq %r8,%rcx - sbbq 16(%r14),%r8 - movq %r9,%rbp - sbbq 24(%r14),%r9 - sbbq $0,%r10 - - cmovcq %rsi,%r12 - cmovcq %r11,%r13 - cmovcq %rcx,%r8 - cmovcq %rbp,%r9 - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - movq 0(%rsp),%r15 -.cfi_restore %r15 - movq 8(%rsp),%r14 -.cfi_restore %r14 - movq 16(%rsp),%r13 -.cfi_restore %r13 - movq 24(%rsp),%r12 -.cfi_restore %r12 - movq 32(%rsp),%rbx -.cfi_restore %rbx - movq 40(%rsp),%rbp -.cfi_restore %rbp - leaq 48(%rsp),%rsp -.cfi_adjust_cfa_offset -48 -.Lord_mul_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont - - - - - - - -.globl ecp_nistz256_ord_sqr_mont -.hidden ecp_nistz256_ord_sqr_mont -.type ecp_nistz256_ord_sqr_mont,@function -.align 32 -ecp_nistz256_ord_sqr_mont: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lecp_nistz256_ord_sqr_montx - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 -.Lord_sqr_body: - - movq 0(%rsi),%r8 - movq 8(%rsi),%rax - movq 16(%rsi),%r14 - movq 24(%rsi),%r15 - leaq .Lord(%rip),%rsi - movq %rdx,%rbx - jmp .Loop_ord_sqr - -.align 32 -.Loop_ord_sqr: - - movq %rax,%rbp - mulq %r8 - movq %rax,%r9 -.byte 102,72,15,110,205 - movq %r14,%rax - movq %rdx,%r10 - - mulq %r8 - addq %rax,%r10 - movq %r15,%rax -.byte 102,73,15,110,214 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %r8 - addq %rax,%r11 - movq %r15,%rax -.byte 102,73,15,110,223 - adcq $0,%rdx - movq %rdx,%r12 - - - mulq %r14 - movq %rax,%r13 - movq %r14,%rax - movq %rdx,%r14 - - - mulq %rbp - addq %rax,%r11 - movq %r15,%rax - adcq $0,%rdx - movq %rdx,%r15 - - mulq %rbp - addq %rax,%r12 - adcq $0,%rdx - - addq %r15,%r12 - adcq %rdx,%r13 - adcq $0,%r14 - - - xorq %r15,%r15 - movq %r8,%rax - addq %r9,%r9 - adcq %r10,%r10 - adcq %r11,%r11 - adcq %r12,%r12 - adcq %r13,%r13 - adcq %r14,%r14 - adcq $0,%r15 - - - mulq %rax - movq %rax,%r8 -.byte 102,72,15,126,200 - movq %rdx,%rbp - - mulq %rax - addq %rbp,%r9 - adcq %rax,%r10 -.byte 102,72,15,126,208 - adcq $0,%rdx - movq %rdx,%rbp - - mulq %rax - addq %rbp,%r11 - adcq %rax,%r12 -.byte 102,72,15,126,216 - adcq $0,%rdx - movq %rdx,%rbp - - movq %r8,%rcx - imulq 32(%rsi),%r8 - - mulq %rax - addq %rbp,%r13 - adcq %rax,%r14 - movq 0(%rsi),%rax - adcq %rdx,%r15 - - - mulq %r8 - movq %r8,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r8,%r10 - sbbq $0,%rbp - - mulq %r8 - addq %rcx,%r9 - adcq $0,%rdx - addq %rax,%r9 - movq %r8,%rax - adcq %rdx,%r10 - movq %r8,%rdx - adcq $0,%rbp - - movq %r9,%rcx - imulq 32(%rsi),%r9 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r11 - movq 0(%rsi),%rax - sbbq %rdx,%r8 - - addq %rbp,%r11 - adcq $0,%r8 - - - mulq %r9 - movq %r9,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r9,%r11 - sbbq $0,%rbp - - mulq %r9 - addq %rcx,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %r9,%rax - adcq %rdx,%r11 - movq %r9,%rdx - adcq $0,%rbp - - movq %r10,%rcx - imulq 32(%rsi),%r10 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r8 - movq 0(%rsi),%rax - sbbq %rdx,%r9 - - addq %rbp,%r8 - adcq $0,%r9 - - - mulq %r10 - movq %r10,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r10,%r8 - sbbq $0,%rbp - - mulq %r10 - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %r10,%rax - adcq %rdx,%r8 - movq %r10,%rdx - adcq $0,%rbp - - movq %r11,%rcx - imulq 32(%rsi),%r11 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r9 - movq 0(%rsi),%rax - sbbq %rdx,%r10 - - addq %rbp,%r9 - adcq $0,%r10 - - - mulq %r11 - movq %r11,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r11,%r9 - sbbq $0,%rbp - - mulq %r11 - addq %rcx,%r8 - adcq $0,%rdx - addq %rax,%r8 - movq %r11,%rax - adcq %rdx,%r9 - movq %r11,%rdx - adcq $0,%rbp - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r10 - sbbq %rdx,%r11 - - addq %rbp,%r10 - adcq $0,%r11 - - - xorq %rdx,%rdx - addq %r12,%r8 - adcq %r13,%r9 - movq %r8,%r12 - adcq %r14,%r10 - adcq %r15,%r11 - movq %r9,%rax - adcq $0,%rdx - - - subq 0(%rsi),%r8 - movq %r10,%r14 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - movq %r11,%r15 - sbbq 24(%rsi),%r11 - sbbq $0,%rdx - - cmovcq %r12,%r8 - cmovncq %r9,%rax - cmovncq %r10,%r14 - cmovncq %r11,%r15 - - decq %rbx - jnz .Loop_ord_sqr - - movq %r8,0(%rdi) - movq %rax,8(%rdi) - pxor %xmm1,%xmm1 - movq %r14,16(%rdi) - pxor %xmm2,%xmm2 - movq %r15,24(%rdi) - pxor %xmm3,%xmm3 - - movq 0(%rsp),%r15 -.cfi_restore %r15 - movq 8(%rsp),%r14 -.cfi_restore %r14 - movq 16(%rsp),%r13 -.cfi_restore %r13 - movq 24(%rsp),%r12 -.cfi_restore %r12 - movq 32(%rsp),%rbx -.cfi_restore %rbx - movq 40(%rsp),%rbp -.cfi_restore %rbp - leaq 48(%rsp),%rsp -.cfi_adjust_cfa_offset -48 -.Lord_sqr_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont - -.type ecp_nistz256_ord_mul_montx,@function -.align 32 -ecp_nistz256_ord_mul_montx: -.cfi_startproc -.Lecp_nistz256_ord_mul_montx: - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 -.Lord_mulx_body: - - movq %rdx,%rbx - movq 0(%rdx),%rdx - movq 0(%rsi),%r9 - movq 8(%rsi),%r10 - movq 16(%rsi),%r11 - movq 24(%rsi),%r12 - leaq -128(%rsi),%rsi - leaq .Lord-128(%rip),%r14 - movq .LordK(%rip),%r15 - - - mulxq %r9,%r8,%r9 - mulxq %r10,%rcx,%r10 - mulxq %r11,%rbp,%r11 - addq %rcx,%r9 - mulxq %r12,%rcx,%r12 - movq %r8,%rdx - mulxq %r15,%rdx,%rax - adcq %rbp,%r10 - adcq %rcx,%r11 - adcq $0,%r12 - - - xorq %r13,%r13 - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r8 - adoxq %rbp,%r9 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 24+128(%r14),%rcx,%rbp - movq 8(%rbx),%rdx - adcxq %rcx,%r11 - adoxq %rbp,%r12 - adcxq %r8,%r12 - adoxq %r8,%r13 - adcq $0,%r13 - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r9,%rdx - mulxq %r15,%rdx,%rax - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - adcxq %r8,%r13 - adoxq %r8,%r8 - adcq $0,%r8 - - - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 24+128(%r14),%rcx,%rbp - movq 16(%rbx),%rdx - adcxq %rcx,%r12 - adoxq %rbp,%r13 - adcxq %r9,%r13 - adoxq %r9,%r8 - adcq $0,%r8 - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r10,%rdx - mulxq %r15,%rdx,%rax - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - adcxq %r9,%r8 - adoxq %r9,%r9 - adcq $0,%r9 - - - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 24+128(%r14),%rcx,%rbp - movq 24(%rbx),%rdx - adcxq %rcx,%r13 - adoxq %rbp,%r8 - adcxq %r10,%r8 - adoxq %r10,%r9 - adcq $0,%r9 - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r11,%rdx - mulxq %r15,%rdx,%rax - adcxq %rcx,%r8 - adoxq %rbp,%r9 - - adcxq %r10,%r9 - adoxq %r10,%r10 - adcq $0,%r10 - - - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - mulxq 24+128(%r14),%rcx,%rbp - leaq 128(%r14),%r14 - movq %r12,%rbx - adcxq %rcx,%r8 - adoxq %rbp,%r9 - movq %r13,%rdx - adcxq %r11,%r9 - adoxq %r11,%r10 - adcq $0,%r10 - - - - movq %r8,%rcx - subq 0(%r14),%r12 - sbbq 8(%r14),%r13 - sbbq 16(%r14),%r8 - movq %r9,%rbp - sbbq 24(%r14),%r9 - sbbq $0,%r10 - - cmovcq %rbx,%r12 - cmovcq %rdx,%r13 - cmovcq %rcx,%r8 - cmovcq %rbp,%r9 - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - movq 0(%rsp),%r15 -.cfi_restore %r15 - movq 8(%rsp),%r14 -.cfi_restore %r14 - movq 16(%rsp),%r13 -.cfi_restore %r13 - movq 24(%rsp),%r12 -.cfi_restore %r12 - movq 32(%rsp),%rbx -.cfi_restore %rbx - movq 40(%rsp),%rbp -.cfi_restore %rbp - leaq 48(%rsp),%rsp -.cfi_adjust_cfa_offset -48 -.Lord_mulx_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_ord_mul_montx,.-ecp_nistz256_ord_mul_montx - -.type ecp_nistz256_ord_sqr_montx,@function -.align 32 -ecp_nistz256_ord_sqr_montx: -.cfi_startproc -.Lecp_nistz256_ord_sqr_montx: - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 -.Lord_sqrx_body: - - movq %rdx,%rbx - movq 0(%rsi),%rdx - movq 8(%rsi),%r14 - movq 16(%rsi),%r15 - movq 24(%rsi),%r8 - leaq .Lord(%rip),%rsi - jmp .Loop_ord_sqrx - -.align 32 -.Loop_ord_sqrx: - mulxq %r14,%r9,%r10 - mulxq %r15,%rcx,%r11 - movq %rdx,%rax -.byte 102,73,15,110,206 - mulxq %r8,%rbp,%r12 - movq %r14,%rdx - addq %rcx,%r10 -.byte 102,73,15,110,215 - adcq %rbp,%r11 - adcq $0,%r12 - xorq %r13,%r13 - - mulxq %r15,%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq %r8,%rcx,%rbp - movq %r15,%rdx - adcxq %rcx,%r12 - adoxq %rbp,%r13 - adcq $0,%r13 - - mulxq %r8,%rcx,%r14 - movq %rax,%rdx -.byte 102,73,15,110,216 - xorq %r15,%r15 - adcxq %r9,%r9 - adoxq %rcx,%r13 - adcxq %r10,%r10 - adoxq %r15,%r14 - - - mulxq %rdx,%r8,%rbp -.byte 102,72,15,126,202 - adcxq %r11,%r11 - adoxq %rbp,%r9 - adcxq %r12,%r12 - mulxq %rdx,%rcx,%rax -.byte 102,72,15,126,210 - adcxq %r13,%r13 - adoxq %rcx,%r10 - adcxq %r14,%r14 - mulxq %rdx,%rcx,%rbp -.byte 0x67 -.byte 102,72,15,126,218 - adoxq %rax,%r11 - adcxq %r15,%r15 - adoxq %rcx,%r12 - adoxq %rbp,%r13 - mulxq %rdx,%rcx,%rax - adoxq %rcx,%r14 - adoxq %rax,%r15 - - - movq %r8,%rdx - mulxq 32(%rsi),%rdx,%rcx - - xorq %rax,%rax - mulxq 0(%rsi),%rcx,%rbp - adcxq %rcx,%r8 - adoxq %rbp,%r9 - mulxq 8(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - mulxq 16(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - mulxq 24(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r8 - adcxq %rax,%r8 - - - movq %r9,%rdx - mulxq 32(%rsi),%rdx,%rcx - - mulxq 0(%rsi),%rcx,%rbp - adoxq %rcx,%r9 - adcxq %rbp,%r10 - mulxq 8(%rsi),%rcx,%rbp - adoxq %rcx,%r10 - adcxq %rbp,%r11 - mulxq 16(%rsi),%rcx,%rbp - adoxq %rcx,%r11 - adcxq %rbp,%r8 - mulxq 24(%rsi),%rcx,%rbp - adoxq %rcx,%r8 - adcxq %rbp,%r9 - adoxq %rax,%r9 - - - movq %r10,%rdx - mulxq 32(%rsi),%rdx,%rcx - - mulxq 0(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - mulxq 8(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r8 - mulxq 16(%rsi),%rcx,%rbp - adcxq %rcx,%r8 - adoxq %rbp,%r9 - mulxq 24(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - adcxq %rax,%r10 - - - movq %r11,%rdx - mulxq 32(%rsi),%rdx,%rcx - - mulxq 0(%rsi),%rcx,%rbp - adoxq %rcx,%r11 - adcxq %rbp,%r8 - mulxq 8(%rsi),%rcx,%rbp - adoxq %rcx,%r8 - adcxq %rbp,%r9 - mulxq 16(%rsi),%rcx,%rbp - adoxq %rcx,%r9 - adcxq %rbp,%r10 - mulxq 24(%rsi),%rcx,%rbp - adoxq %rcx,%r10 - adcxq %rbp,%r11 - adoxq %rax,%r11 - - - addq %r8,%r12 - adcq %r13,%r9 - movq %r12,%rdx - adcq %r14,%r10 - adcq %r15,%r11 - movq %r9,%r14 - adcq $0,%rax - - - subq 0(%rsi),%r12 - movq %r10,%r15 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - movq %r11,%r8 - sbbq 24(%rsi),%r11 - sbbq $0,%rax - - cmovncq %r12,%rdx - cmovncq %r9,%r14 - cmovncq %r10,%r15 - cmovncq %r11,%r8 - - decq %rbx - jnz .Loop_ord_sqrx - - movq %rdx,0(%rdi) - movq %r14,8(%rdi) - pxor %xmm1,%xmm1 - movq %r15,16(%rdi) - pxor %xmm2,%xmm2 - movq %r8,24(%rdi) - pxor %xmm3,%xmm3 - - movq 0(%rsp),%r15 -.cfi_restore %r15 - movq 8(%rsp),%r14 -.cfi_restore %r14 - movq 16(%rsp),%r13 -.cfi_restore %r13 - movq 24(%rsp),%r12 -.cfi_restore %r12 - movq 32(%rsp),%rbx -.cfi_restore %rbx - movq 40(%rsp),%rbp -.cfi_restore %rbp - leaq 48(%rsp),%rsp -.cfi_adjust_cfa_offset -48 -.Lord_sqrx_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_ord_sqr_montx,.-ecp_nistz256_ord_sqr_montx - - - - - - -.globl ecp_nistz256_mul_mont -.hidden ecp_nistz256_mul_mont -.type ecp_nistz256_mul_mont,@function -.align 32 -ecp_nistz256_mul_mont: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx -.Lmul_mont: - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 -.Lmul_body: - cmpl $0x80100,%ecx - je .Lmul_montx - movq %rdx,%rbx - movq 0(%rdx),%rax - movq 0(%rsi),%r9 - movq 8(%rsi),%r10 - movq 16(%rsi),%r11 - movq 24(%rsi),%r12 - - call __ecp_nistz256_mul_montq - jmp .Lmul_mont_done - -.align 32 -.Lmul_montx: - movq %rdx,%rbx - movq 0(%rdx),%rdx - movq 0(%rsi),%r9 - movq 8(%rsi),%r10 - movq 16(%rsi),%r11 - movq 24(%rsi),%r12 - leaq -128(%rsi),%rsi - - call __ecp_nistz256_mul_montx -.Lmul_mont_done: - movq 0(%rsp),%r15 -.cfi_restore %r15 - movq 8(%rsp),%r14 -.cfi_restore %r14 - movq 16(%rsp),%r13 -.cfi_restore %r13 - movq 24(%rsp),%r12 -.cfi_restore %r12 - movq 32(%rsp),%rbx -.cfi_restore %rbx - movq 40(%rsp),%rbp -.cfi_restore %rbp - leaq 48(%rsp),%rsp -.cfi_adjust_cfa_offset -48 -.Lmul_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont - -.type __ecp_nistz256_mul_montq,@function -.align 32 -__ecp_nistz256_mul_montq: -.cfi_startproc - - - movq %rax,%rbp - mulq %r9 - movq .Lpoly+8(%rip),%r14 - movq %rax,%r8 - movq %rbp,%rax - movq %rdx,%r9 - - mulq %r10 - movq .Lpoly+24(%rip),%r15 - addq %rax,%r9 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %r11 - addq %rax,%r10 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %r12 - addq %rax,%r11 - movq %r8,%rax - adcq $0,%rdx - xorq %r13,%r13 - movq %rdx,%r12 - - - - - - - - - - - movq %r8,%rbp - shlq $32,%r8 - mulq %r15 - shrq $32,%rbp - addq %r8,%r9 - adcq %rbp,%r10 - adcq %rax,%r11 - movq 8(%rbx),%rax - adcq %rdx,%r12 - adcq $0,%r13 - xorq %r8,%r8 - - - - movq %rax,%rbp - mulq 0(%rsi) - addq %rax,%r9 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 8(%rsi) - addq %rcx,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 16(%rsi) - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 24(%rsi) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %r9,%rax - adcq %rdx,%r13 - adcq $0,%r8 - - - - movq %r9,%rbp - shlq $32,%r9 - mulq %r15 - shrq $32,%rbp - addq %r9,%r10 - adcq %rbp,%r11 - adcq %rax,%r12 - movq 16(%rbx),%rax - adcq %rdx,%r13 - adcq $0,%r8 - xorq %r9,%r9 - - - - movq %rax,%rbp - mulq 0(%rsi) - addq %rax,%r10 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 8(%rsi) - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 16(%rsi) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 24(%rsi) - addq %rcx,%r13 - adcq $0,%rdx - addq %rax,%r13 - movq %r10,%rax - adcq %rdx,%r8 - adcq $0,%r9 - - - - movq %r10,%rbp - shlq $32,%r10 - mulq %r15 - shrq $32,%rbp - addq %r10,%r11 - adcq %rbp,%r12 - adcq %rax,%r13 - movq 24(%rbx),%rax - adcq %rdx,%r8 - adcq $0,%r9 - xorq %r10,%r10 - - - - movq %rax,%rbp - mulq 0(%rsi) - addq %rax,%r11 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 8(%rsi) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 16(%rsi) - addq %rcx,%r13 - adcq $0,%rdx - addq %rax,%r13 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 24(%rsi) - addq %rcx,%r8 - adcq $0,%rdx - addq %rax,%r8 - movq %r11,%rax - adcq %rdx,%r9 - adcq $0,%r10 - - - - movq %r11,%rbp - shlq $32,%r11 - mulq %r15 - shrq $32,%rbp - addq %r11,%r12 - adcq %rbp,%r13 - movq %r12,%rcx - adcq %rax,%r8 - adcq %rdx,%r9 - movq %r13,%rbp - adcq $0,%r10 - - - - subq $-1,%r12 - movq %r8,%rbx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%rdx - sbbq %r15,%r9 - sbbq $0,%r10 - - cmovcq %rcx,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rbx,%r8 - movq %r13,8(%rdi) - cmovcq %rdx,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq - - - - - - - - -.globl ecp_nistz256_sqr_mont -.hidden ecp_nistz256_sqr_mont -.type ecp_nistz256_sqr_mont,@function -.align 32 -ecp_nistz256_sqr_mont: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 -.Lsqr_body: - cmpl $0x80100,%ecx - je .Lsqr_montx - movq 0(%rsi),%rax - movq 8(%rsi),%r14 - movq 16(%rsi),%r15 - movq 24(%rsi),%r8 - - call __ecp_nistz256_sqr_montq - jmp .Lsqr_mont_done - -.align 32 -.Lsqr_montx: - movq 0(%rsi),%rdx - movq 8(%rsi),%r14 - movq 16(%rsi),%r15 - movq 24(%rsi),%r8 - leaq -128(%rsi),%rsi - - call __ecp_nistz256_sqr_montx -.Lsqr_mont_done: - movq 0(%rsp),%r15 -.cfi_restore %r15 - movq 8(%rsp),%r14 -.cfi_restore %r14 - movq 16(%rsp),%r13 -.cfi_restore %r13 - movq 24(%rsp),%r12 -.cfi_restore %r12 - movq 32(%rsp),%rbx -.cfi_restore %rbx - movq 40(%rsp),%rbp -.cfi_restore %rbp - leaq 48(%rsp),%rsp -.cfi_adjust_cfa_offset -48 -.Lsqr_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont - -.type __ecp_nistz256_sqr_montq,@function -.align 32 -__ecp_nistz256_sqr_montq: -.cfi_startproc - movq %rax,%r13 - mulq %r14 - movq %rax,%r9 - movq %r15,%rax - movq %rdx,%r10 - - mulq %r13 - addq %rax,%r10 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %r13 - addq %rax,%r11 - movq %r15,%rax - adcq $0,%rdx - movq %rdx,%r12 - - - mulq %r14 - addq %rax,%r11 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq %r14 - addq %rax,%r12 - movq %r8,%rax - adcq $0,%rdx - addq %rbp,%r12 - movq %rdx,%r13 - adcq $0,%r13 - - - mulq %r15 - xorq %r15,%r15 - addq %rax,%r13 - movq 0(%rsi),%rax - movq %rdx,%r14 - adcq $0,%r14 - - addq %r9,%r9 - adcq %r10,%r10 - adcq %r11,%r11 - adcq %r12,%r12 - adcq %r13,%r13 - adcq %r14,%r14 - adcq $0,%r15 - - mulq %rax - movq %rax,%r8 - movq 8(%rsi),%rax - movq %rdx,%rcx - - mulq %rax - addq %rcx,%r9 - adcq %rax,%r10 - movq 16(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq %rax - addq %rcx,%r11 - adcq %rax,%r12 - movq 24(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq %rax - addq %rcx,%r13 - adcq %rax,%r14 - movq %r8,%rax - adcq %rdx,%r15 - - movq .Lpoly+8(%rip),%rsi - movq .Lpoly+24(%rip),%rbp - - - - - movq %r8,%rcx - shlq $32,%r8 - mulq %rbp - shrq $32,%rcx - addq %r8,%r9 - adcq %rcx,%r10 - adcq %rax,%r11 - movq %r9,%rax - adcq $0,%rdx - - - - movq %r9,%rcx - shlq $32,%r9 - movq %rdx,%r8 - mulq %rbp - shrq $32,%rcx - addq %r9,%r10 - adcq %rcx,%r11 - adcq %rax,%r8 - movq %r10,%rax - adcq $0,%rdx - - - - movq %r10,%rcx - shlq $32,%r10 - movq %rdx,%r9 - mulq %rbp - shrq $32,%rcx - addq %r10,%r11 - adcq %rcx,%r8 - adcq %rax,%r9 - movq %r11,%rax - adcq $0,%rdx - - - - movq %r11,%rcx - shlq $32,%r11 - movq %rdx,%r10 - mulq %rbp - shrq $32,%rcx - addq %r11,%r8 - adcq %rcx,%r9 - adcq %rax,%r10 - adcq $0,%rdx - xorq %r11,%r11 - - - - addq %r8,%r12 - adcq %r9,%r13 - movq %r12,%r8 - adcq %r10,%r14 - adcq %rdx,%r15 - movq %r13,%r9 - adcq $0,%r11 - - subq $-1,%r12 - movq %r14,%r10 - sbbq %rsi,%r13 - sbbq $0,%r14 - movq %r15,%rcx - sbbq %rbp,%r15 - sbbq $0,%r11 - - cmovcq %r8,%r12 - cmovcq %r9,%r13 - movq %r12,0(%rdi) - cmovcq %r10,%r14 - movq %r13,8(%rdi) - cmovcq %rcx,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq -.type __ecp_nistz256_mul_montx,@function -.align 32 -__ecp_nistz256_mul_montx: -.cfi_startproc - - - mulxq %r9,%r8,%r9 - mulxq %r10,%rcx,%r10 - movq $32,%r14 - xorq %r13,%r13 - mulxq %r11,%rbp,%r11 - movq .Lpoly+24(%rip),%r15 - adcq %rcx,%r9 - mulxq %r12,%rcx,%r12 - movq %r8,%rdx - adcq %rbp,%r10 - shlxq %r14,%r8,%rbp - adcq %rcx,%r11 - shrxq %r14,%r8,%rcx - adcq $0,%r12 - - - - addq %rbp,%r9 - adcq %rcx,%r10 - - mulxq %r15,%rcx,%rbp - movq 8(%rbx),%rdx - adcq %rcx,%r11 - adcq %rbp,%r12 - adcq $0,%r13 - xorq %r8,%r8 - - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r9,%rdx - adcxq %rcx,%r12 - shlxq %r14,%r9,%rcx - adoxq %rbp,%r13 - shrxq %r14,%r9,%rbp - - adcxq %r8,%r13 - adoxq %r8,%r8 - adcq $0,%r8 - - - - addq %rcx,%r10 - adcq %rbp,%r11 - - mulxq %r15,%rcx,%rbp - movq 16(%rbx),%rdx - adcq %rcx,%r12 - adcq %rbp,%r13 - adcq $0,%r8 - xorq %r9,%r9 - - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r10,%rdx - adcxq %rcx,%r13 - shlxq %r14,%r10,%rcx - adoxq %rbp,%r8 - shrxq %r14,%r10,%rbp - - adcxq %r9,%r8 - adoxq %r9,%r9 - adcq $0,%r9 - - - - addq %rcx,%r11 - adcq %rbp,%r12 - - mulxq %r15,%rcx,%rbp - movq 24(%rbx),%rdx - adcq %rcx,%r13 - adcq %rbp,%r8 - adcq $0,%r9 - xorq %r10,%r10 - - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r11,%rdx - adcxq %rcx,%r8 - shlxq %r14,%r11,%rcx - adoxq %rbp,%r9 - shrxq %r14,%r11,%rbp - - adcxq %r10,%r9 - adoxq %r10,%r10 - adcq $0,%r10 - - - - addq %rcx,%r12 - adcq %rbp,%r13 - - mulxq %r15,%rcx,%rbp - movq %r12,%rbx - movq .Lpoly+8(%rip),%r14 - adcq %rcx,%r8 - movq %r13,%rdx - adcq %rbp,%r9 - adcq $0,%r10 - - - - xorl %eax,%eax - movq %r8,%rcx - sbbq $-1,%r12 - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%rbp - sbbq %r15,%r9 - sbbq $0,%r10 - - cmovcq %rbx,%r12 - cmovcq %rdx,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %rbp,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx - -.type __ecp_nistz256_sqr_montx,@function -.align 32 -__ecp_nistz256_sqr_montx: -.cfi_startproc - mulxq %r14,%r9,%r10 - mulxq %r15,%rcx,%r11 - xorl %eax,%eax - adcq %rcx,%r10 - mulxq %r8,%rbp,%r12 - movq %r14,%rdx - adcq %rbp,%r11 - adcq $0,%r12 - xorq %r13,%r13 - - - mulxq %r15,%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq %r8,%rcx,%rbp - movq %r15,%rdx - adcxq %rcx,%r12 - adoxq %rbp,%r13 - adcq $0,%r13 - - - mulxq %r8,%rcx,%r14 - movq 0+128(%rsi),%rdx - xorq %r15,%r15 - adcxq %r9,%r9 - adoxq %rcx,%r13 - adcxq %r10,%r10 - adoxq %r15,%r14 - - mulxq %rdx,%r8,%rbp - movq 8+128(%rsi),%rdx - adcxq %r11,%r11 - adoxq %rbp,%r9 - adcxq %r12,%r12 - mulxq %rdx,%rcx,%rax - movq 16+128(%rsi),%rdx - adcxq %r13,%r13 - adoxq %rcx,%r10 - adcxq %r14,%r14 -.byte 0x67 - mulxq %rdx,%rcx,%rbp - movq 24+128(%rsi),%rdx - adoxq %rax,%r11 - adcxq %r15,%r15 - adoxq %rcx,%r12 - movq $32,%rsi - adoxq %rbp,%r13 -.byte 0x67,0x67 - mulxq %rdx,%rcx,%rax - movq .Lpoly+24(%rip),%rdx - adoxq %rcx,%r14 - shlxq %rsi,%r8,%rcx - adoxq %rax,%r15 - shrxq %rsi,%r8,%rax - movq %rdx,%rbp - - - addq %rcx,%r9 - adcq %rax,%r10 - - mulxq %r8,%rcx,%r8 - adcq %rcx,%r11 - shlxq %rsi,%r9,%rcx - adcq $0,%r8 - shrxq %rsi,%r9,%rax - - - addq %rcx,%r10 - adcq %rax,%r11 - - mulxq %r9,%rcx,%r9 - adcq %rcx,%r8 - shlxq %rsi,%r10,%rcx - adcq $0,%r9 - shrxq %rsi,%r10,%rax - - - addq %rcx,%r11 - adcq %rax,%r8 - - mulxq %r10,%rcx,%r10 - adcq %rcx,%r9 - shlxq %rsi,%r11,%rcx - adcq $0,%r10 - shrxq %rsi,%r11,%rax - - - addq %rcx,%r8 - adcq %rax,%r9 - - mulxq %r11,%rcx,%r11 - adcq %rcx,%r10 - adcq $0,%r11 - - xorq %rdx,%rdx - addq %r8,%r12 - movq .Lpoly+8(%rip),%rsi - adcq %r9,%r13 - movq %r12,%r8 - adcq %r10,%r14 - adcq %r11,%r15 - movq %r13,%r9 - adcq $0,%rdx - - subq $-1,%r12 - movq %r14,%r10 - sbbq %rsi,%r13 - sbbq $0,%r14 - movq %r15,%r11 - sbbq %rbp,%r15 - sbbq $0,%rdx - - cmovcq %r8,%r12 - cmovcq %r9,%r13 - movq %r12,0(%rdi) - cmovcq %r10,%r14 - movq %r13,8(%rdi) - cmovcq %r11,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx - - -.globl ecp_nistz256_select_w5 -.hidden ecp_nistz256_select_w5 -.type ecp_nistz256_select_w5,@function -.align 32 -ecp_nistz256_select_w5: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rax - movq 8(%rax),%rax - testl $32,%eax - jnz .Lavx2_select_w5 - movdqa .LOne(%rip),%xmm0 - movd %edx,%xmm1 - - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - - movdqa %xmm0,%xmm8 - pshufd $0,%xmm1,%xmm1 - - movq $16,%rax -.Lselect_loop_sse_w5: - - movdqa %xmm8,%xmm15 - paddd %xmm0,%xmm8 - pcmpeqd %xmm1,%xmm15 - - movdqa 0(%rsi),%xmm9 - movdqa 16(%rsi),%xmm10 - movdqa 32(%rsi),%xmm11 - movdqa 48(%rsi),%xmm12 - movdqa 64(%rsi),%xmm13 - movdqa 80(%rsi),%xmm14 - leaq 96(%rsi),%rsi - - pand %xmm15,%xmm9 - pand %xmm15,%xmm10 - por %xmm9,%xmm2 - pand %xmm15,%xmm11 - por %xmm10,%xmm3 - pand %xmm15,%xmm12 - por %xmm11,%xmm4 - pand %xmm15,%xmm13 - por %xmm12,%xmm5 - pand %xmm15,%xmm14 - por %xmm13,%xmm6 - por %xmm14,%xmm7 - - decq %rax - jnz .Lselect_loop_sse_w5 - - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - movdqu %xmm4,32(%rdi) - movdqu %xmm5,48(%rdi) - movdqu %xmm6,64(%rdi) - movdqu %xmm7,80(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_ecp_nistz256_select_w5: -.size ecp_nistz256_select_w5,.-ecp_nistz256_select_w5 - - - -.globl ecp_nistz256_select_w7 -.hidden ecp_nistz256_select_w7 -.type ecp_nistz256_select_w7,@function -.align 32 -ecp_nistz256_select_w7: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rax - movq 8(%rax),%rax - testl $32,%eax - jnz .Lavx2_select_w7 - movdqa .LOne(%rip),%xmm8 - movd %edx,%xmm1 - - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - - movdqa %xmm8,%xmm0 - pshufd $0,%xmm1,%xmm1 - movq $64,%rax - -.Lselect_loop_sse_w7: - movdqa %xmm8,%xmm15 - paddd %xmm0,%xmm8 - movdqa 0(%rsi),%xmm9 - movdqa 16(%rsi),%xmm10 - pcmpeqd %xmm1,%xmm15 - movdqa 32(%rsi),%xmm11 - movdqa 48(%rsi),%xmm12 - leaq 64(%rsi),%rsi - - pand %xmm15,%xmm9 - pand %xmm15,%xmm10 - por %xmm9,%xmm2 - pand %xmm15,%xmm11 - por %xmm10,%xmm3 - pand %xmm15,%xmm12 - por %xmm11,%xmm4 - prefetcht0 255(%rsi) - por %xmm12,%xmm5 - - decq %rax - jnz .Lselect_loop_sse_w7 - - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - movdqu %xmm4,32(%rdi) - movdqu %xmm5,48(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_ecp_nistz256_select_w7: -.size ecp_nistz256_select_w7,.-ecp_nistz256_select_w7 - - -.type ecp_nistz256_avx2_select_w5,@function -.align 32 -ecp_nistz256_avx2_select_w5: -.cfi_startproc -.Lavx2_select_w5: - vzeroupper - vmovdqa .LTwo(%rip),%ymm0 - - vpxor %ymm2,%ymm2,%ymm2 - vpxor %ymm3,%ymm3,%ymm3 - vpxor %ymm4,%ymm4,%ymm4 - - vmovdqa .LOne(%rip),%ymm5 - vmovdqa .LTwo(%rip),%ymm10 - - vmovd %edx,%xmm1 - vpermd %ymm1,%ymm2,%ymm1 - - movq $8,%rax -.Lselect_loop_avx2_w5: - - vmovdqa 0(%rsi),%ymm6 - vmovdqa 32(%rsi),%ymm7 - vmovdqa 64(%rsi),%ymm8 - - vmovdqa 96(%rsi),%ymm11 - vmovdqa 128(%rsi),%ymm12 - vmovdqa 160(%rsi),%ymm13 - - vpcmpeqd %ymm1,%ymm5,%ymm9 - vpcmpeqd %ymm1,%ymm10,%ymm14 - - vpaddd %ymm0,%ymm5,%ymm5 - vpaddd %ymm0,%ymm10,%ymm10 - leaq 192(%rsi),%rsi - - vpand %ymm9,%ymm6,%ymm6 - vpand %ymm9,%ymm7,%ymm7 - vpand %ymm9,%ymm8,%ymm8 - vpand %ymm14,%ymm11,%ymm11 - vpand %ymm14,%ymm12,%ymm12 - vpand %ymm14,%ymm13,%ymm13 - - vpxor %ymm6,%ymm2,%ymm2 - vpxor %ymm7,%ymm3,%ymm3 - vpxor %ymm8,%ymm4,%ymm4 - vpxor %ymm11,%ymm2,%ymm2 - vpxor %ymm12,%ymm3,%ymm3 - vpxor %ymm13,%ymm4,%ymm4 - - decq %rax - jnz .Lselect_loop_avx2_w5 - - vmovdqu %ymm2,0(%rdi) - vmovdqu %ymm3,32(%rdi) - vmovdqu %ymm4,64(%rdi) - vzeroupper - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_ecp_nistz256_avx2_select_w5: -.size ecp_nistz256_avx2_select_w5,.-ecp_nistz256_avx2_select_w5 - - - -.globl ecp_nistz256_avx2_select_w7 -.hidden ecp_nistz256_avx2_select_w7 -.type ecp_nistz256_avx2_select_w7,@function -.align 32 -ecp_nistz256_avx2_select_w7: -.cfi_startproc -.Lavx2_select_w7: - vzeroupper - vmovdqa .LThree(%rip),%ymm0 - - vpxor %ymm2,%ymm2,%ymm2 - vpxor %ymm3,%ymm3,%ymm3 - - vmovdqa .LOne(%rip),%ymm4 - vmovdqa .LTwo(%rip),%ymm8 - vmovdqa .LThree(%rip),%ymm12 - - vmovd %edx,%xmm1 - vpermd %ymm1,%ymm2,%ymm1 - - - movq $21,%rax -.Lselect_loop_avx2_w7: - - vmovdqa 0(%rsi),%ymm5 - vmovdqa 32(%rsi),%ymm6 - - vmovdqa 64(%rsi),%ymm9 - vmovdqa 96(%rsi),%ymm10 - - vmovdqa 128(%rsi),%ymm13 - vmovdqa 160(%rsi),%ymm14 - - vpcmpeqd %ymm1,%ymm4,%ymm7 - vpcmpeqd %ymm1,%ymm8,%ymm11 - vpcmpeqd %ymm1,%ymm12,%ymm15 - - vpaddd %ymm0,%ymm4,%ymm4 - vpaddd %ymm0,%ymm8,%ymm8 - vpaddd %ymm0,%ymm12,%ymm12 - leaq 192(%rsi),%rsi - - vpand %ymm7,%ymm5,%ymm5 - vpand %ymm7,%ymm6,%ymm6 - vpand %ymm11,%ymm9,%ymm9 - vpand %ymm11,%ymm10,%ymm10 - vpand %ymm15,%ymm13,%ymm13 - vpand %ymm15,%ymm14,%ymm14 - - vpxor %ymm5,%ymm2,%ymm2 - vpxor %ymm6,%ymm3,%ymm3 - vpxor %ymm9,%ymm2,%ymm2 - vpxor %ymm10,%ymm3,%ymm3 - vpxor %ymm13,%ymm2,%ymm2 - vpxor %ymm14,%ymm3,%ymm3 - - decq %rax - jnz .Lselect_loop_avx2_w7 - - - vmovdqa 0(%rsi),%ymm5 - vmovdqa 32(%rsi),%ymm6 - - vpcmpeqd %ymm1,%ymm4,%ymm7 - - vpand %ymm7,%ymm5,%ymm5 - vpand %ymm7,%ymm6,%ymm6 - - vpxor %ymm5,%ymm2,%ymm2 - vpxor %ymm6,%ymm3,%ymm3 - - vmovdqu %ymm2,0(%rdi) - vmovdqu %ymm3,32(%rdi) - vzeroupper - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_ecp_nistz256_avx2_select_w7: -.size ecp_nistz256_avx2_select_w7,.-ecp_nistz256_avx2_select_w7 -.type __ecp_nistz256_add_toq,@function -.align 32 -__ecp_nistz256_add_toq: -.cfi_startproc - xorq %r11,%r11 - addq 0(%rbx),%r12 - adcq 8(%rbx),%r13 - movq %r12,%rax - adcq 16(%rbx),%r8 - adcq 24(%rbx),%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq - -.type __ecp_nistz256_sub_fromq,@function -.align 32 -__ecp_nistz256_sub_fromq: -.cfi_startproc - subq 0(%rbx),%r12 - sbbq 8(%rbx),%r13 - movq %r12,%rax - sbbq 16(%rbx),%r8 - sbbq 24(%rbx),%r9 - movq %r13,%rbp - sbbq %r11,%r11 - - addq $-1,%r12 - movq %r8,%rcx - adcq %r14,%r13 - adcq $0,%r8 - movq %r9,%r10 - adcq %r15,%r9 - testq %r11,%r11 - - cmovzq %rax,%r12 - cmovzq %rbp,%r13 - movq %r12,0(%rdi) - cmovzq %rcx,%r8 - movq %r13,8(%rdi) - cmovzq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq - -.type __ecp_nistz256_subq,@function -.align 32 -__ecp_nistz256_subq: -.cfi_startproc - subq %r12,%rax - sbbq %r13,%rbp - movq %rax,%r12 - sbbq %r8,%rcx - sbbq %r9,%r10 - movq %rbp,%r13 - sbbq %r11,%r11 - - addq $-1,%rax - movq %rcx,%r8 - adcq %r14,%rbp - adcq $0,%rcx - movq %r10,%r9 - adcq %r15,%r10 - testq %r11,%r11 - - cmovnzq %rax,%r12 - cmovnzq %rbp,%r13 - cmovnzq %rcx,%r8 - cmovnzq %r10,%r9 - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_subq,.-__ecp_nistz256_subq - -.type __ecp_nistz256_mul_by_2q,@function -.align 32 -__ecp_nistz256_mul_by_2q: -.cfi_startproc - xorq %r11,%r11 - addq %r12,%r12 - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q -.globl ecp_nistz256_point_double -.hidden ecp_nistz256_point_double -.type ecp_nistz256_point_double,@function -.align 32 -ecp_nistz256_point_double: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lpoint_doublex - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $160+8,%rsp -.cfi_adjust_cfa_offset 32*5+8 -.Lpoint_doubleq_body: - -.Lpoint_double_shortcutq: - movdqu 0(%rsi),%xmm0 - movq %rsi,%rbx - movdqu 16(%rsi),%xmm1 - movq 32+0(%rsi),%r12 - movq 32+8(%rsi),%r13 - movq 32+16(%rsi),%r8 - movq 32+24(%rsi),%r9 - movq .Lpoly+8(%rip),%r14 - movq .Lpoly+24(%rip),%r15 - movdqa %xmm0,96(%rsp) - movdqa %xmm1,96+16(%rsp) - leaq 32(%rdi),%r10 - leaq 64(%rdi),%r11 -.byte 102,72,15,110,199 -.byte 102,73,15,110,202 -.byte 102,73,15,110,211 - - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_by_2q - - movq 64+0(%rsi),%rax - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - leaq 64-0(%rsi),%rsi - leaq 64(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 0+0(%rsp),%rax - movq 8+0(%rsp),%r14 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 0(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 32(%rbx),%rax - movq 64+0(%rbx),%r9 - movq 64+8(%rbx),%r10 - movq 64+16(%rbx),%r11 - movq 64+24(%rbx),%r12 - leaq 64-0(%rbx),%rsi - leaq 32(%rbx),%rbx -.byte 102,72,15,126,215 - call __ecp_nistz256_mul_montq - call __ecp_nistz256_mul_by_2q - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_toq - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 0+0(%rsp),%rax - movq 8+0(%rsp),%r14 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 -.byte 102,72,15,126,207 - call __ecp_nistz256_sqr_montq - xorq %r9,%r9 - movq %r12,%rax - addq $-1,%r12 - movq %r13,%r10 - adcq %rsi,%r13 - movq %r14,%rcx - adcq $0,%r14 - movq %r15,%r8 - adcq %rbp,%r15 - adcq $0,%r9 - xorq %rsi,%rsi - testq $1,%rax - - cmovzq %rax,%r12 - cmovzq %r10,%r13 - cmovzq %rcx,%r14 - cmovzq %r8,%r15 - cmovzq %rsi,%r9 - - movq %r13,%rax - shrq $1,%r12 - shlq $63,%rax - movq %r14,%r10 - shrq $1,%r13 - orq %rax,%r12 - shlq $63,%r10 - movq %r15,%rcx - shrq $1,%r14 - orq %r10,%r13 - shlq $63,%rcx - movq %r12,0(%rdi) - shrq $1,%r15 - movq %r13,8(%rdi) - shlq $63,%r9 - orq %rcx,%r14 - orq %r9,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - movq 64(%rsp),%rax - leaq 64(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2q - - leaq 32(%rsp),%rbx - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_toq - - movq 96(%rsp),%rax - leaq 96(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2q - - movq 0+32(%rsp),%rax - movq 8+32(%rsp),%r14 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r15 - movq 24+32(%rsp),%r8 -.byte 102,72,15,126,199 - call __ecp_nistz256_sqr_montq - - leaq 128(%rsp),%rbx - movq %r14,%r8 - movq %r15,%r9 - movq %rsi,%r14 - movq %rbp,%r15 - call __ecp_nistz256_sub_fromq - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 0(%rsp),%rdi - call __ecp_nistz256_subq - - movq 32(%rsp),%rax - leaq 32(%rsp),%rbx - movq %r12,%r14 - xorl %ecx,%ecx - movq %r12,0+0(%rsp) - movq %r13,%r10 - movq %r13,0+8(%rsp) - cmovzq %r8,%r11 - movq %r8,0+16(%rsp) - leaq 0-0(%rsp),%rsi - cmovzq %r9,%r12 - movq %r9,0+24(%rsp) - movq %r14,%r9 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - -.byte 102,72,15,126,203 -.byte 102,72,15,126,207 - call __ecp_nistz256_sub_fromq - - leaq 160+56(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbx -.cfi_restore %rbx - movq -8(%rsi),%rbp -.cfi_restore %rbp - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lpoint_doubleq_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_point_double,.-ecp_nistz256_point_double -.globl ecp_nistz256_point_add -.hidden ecp_nistz256_point_add -.type ecp_nistz256_point_add,@function -.align 32 -ecp_nistz256_point_add: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lpoint_addx - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $576+8,%rsp -.cfi_adjust_cfa_offset 32*18+8 -.Lpoint_addq_body: - - movdqu 0(%rsi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq %rsi,%rbx - movq %rdx,%rsi - movdqa %xmm0,384(%rsp) - movdqa %xmm1,384+16(%rsp) - movdqa %xmm2,416(%rsp) - movdqa %xmm3,416+16(%rsp) - movdqa %xmm4,448(%rsp) - movdqa %xmm5,448+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rsi),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rsi),%xmm3 - movq 64+0(%rsi),%rax - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,480(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,480+16(%rsp) - movdqu 64(%rsi),%xmm0 - movdqu 80(%rsi),%xmm1 - movdqa %xmm2,512(%rsp) - movdqa %xmm3,512+16(%rsp) - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - - leaq 64-0(%rsi),%rsi - movq %rax,544+0(%rsp) - movq %r14,544+8(%rsp) - movq %r15,544+16(%rsp) - movq %r8,544+24(%rsp) - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm1,%xmm4 - por %xmm1,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - movq 64+0(%rbx),%rax - movq 64+8(%rbx),%r14 - movq 64+16(%rbx),%r15 - movq 64+24(%rbx),%r8 -.byte 102,72,15,110,203 - - leaq 64-0(%rbx),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 544(%rsp),%rax - leaq 544(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq 0+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 448(%rsp),%rax - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 416(%rsp),%rax - leaq 416(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq 0+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 512(%rsp),%rax - leaq 512(%rsp),%rbx - movq 0+256(%rsp),%r9 - movq 8+256(%rsp),%r10 - leaq 0+256(%rsp),%rsi - movq 16+256(%rsp),%r11 - movq 24+256(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 224(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - orq %r13,%r12 - movdqa %xmm4,%xmm2 - orq %r8,%r12 - orq %r9,%r12 - por %xmm5,%xmm2 -.byte 102,73,15,110,220 - - movq 384(%rsp),%rax - leaq 384(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq 0+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 480(%rsp),%rax - leaq 480(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 160(%rsp),%rbx - leaq 0(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - orq %r13,%r12 - orq %r8,%r12 - orq %r9,%r12 - -.byte 102,73,15,126,208 -.byte 102,73,15,126,217 - orq %r8,%r12 -.byte 0x3e - jnz .Ladd_proceedq - - - - testq %r9,%r9 - jz .Ladd_doubleq - - - - - - -.byte 102,72,15,126,199 - pxor %xmm0,%xmm0 - movdqu %xmm0,0(%rdi) - movdqu %xmm0,16(%rdi) - movdqu %xmm0,32(%rdi) - movdqu %xmm0,48(%rdi) - movdqu %xmm0,64(%rdi) - movdqu %xmm0,80(%rdi) - jmp .Ladd_doneq - -.align 32 -.Ladd_doubleq: -.byte 102,72,15,126,206 -.byte 102,72,15,126,199 - addq $416,%rsp -.cfi_adjust_cfa_offset -416 - jmp .Lpoint_double_shortcutq -.cfi_adjust_cfa_offset 416 - -.align 32 -.Ladd_proceedq: - movq 0+64(%rsp),%rax - movq 8+64(%rsp),%r14 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 448(%rsp),%rax - leaq 448(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 0+0(%rsp),%rax - movq 8+0(%rsp),%r14 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 544(%rsp),%rax - leaq 544(%rsp),%rbx - movq 0+352(%rsp),%r9 - movq 8+352(%rsp),%r10 - leaq 0+352(%rsp),%rsi - movq 16+352(%rsp),%r11 - movq 24+352(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 0(%rsp),%rax - leaq 0(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 160(%rsp),%rax - leaq 160(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montq - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 96(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subq - - leaq 128(%rsp),%rbx - leaq 288(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 192+0(%rsp),%rax - movq 192+8(%rsp),%rbp - movq 192+16(%rsp),%rcx - movq 192+24(%rsp),%r10 - leaq 320(%rsp),%rdi - - call __ecp_nistz256_subq - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 128(%rsp),%rax - leaq 128(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq 0+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 320(%rsp),%rax - leaq 320(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 320(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 256(%rsp),%rbx - leaq 320(%rsp),%rdi - call __ecp_nistz256_sub_fromq - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 352(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 352+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 544(%rsp),%xmm2 - pand 544+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 480(%rsp),%xmm2 - pand 480+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 320(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 320+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 512(%rsp),%xmm2 - pand 512+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - -.Ladd_doneq: - leaq 576+56(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbx -.cfi_restore %rbx - movq -8(%rsi),%rbp -.cfi_restore %rbp - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lpoint_addq_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_point_add,.-ecp_nistz256_point_add -.globl ecp_nistz256_point_add_affine -.hidden ecp_nistz256_point_add_affine -.type ecp_nistz256_point_add_affine,@function -.align 32 -ecp_nistz256_point_add_affine: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lpoint_add_affinex - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $480+8,%rsp -.cfi_adjust_cfa_offset 32*15+8 -.Ladd_affineq_body: - - movdqu 0(%rsi),%xmm0 - movq %rdx,%rbx - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq 64+0(%rsi),%rax - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,320(%rsp) - movdqa %xmm1,320+16(%rsp) - movdqa %xmm2,352(%rsp) - movdqa %xmm3,352+16(%rsp) - movdqa %xmm4,384(%rsp) - movdqa %xmm5,384+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rbx),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rbx),%xmm1 - movdqu 32(%rbx),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rbx),%xmm3 - movdqa %xmm0,416(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,416+16(%rsp) - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - movdqa %xmm2,448(%rsp) - movdqa %xmm3,448+16(%rsp) - por %xmm2,%xmm3 - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm1,%xmm3 - - leaq 64-0(%rsi),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm3,%xmm4 - movq 0(%rbx),%rax - - movq %r12,%r9 - por %xmm3,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - movq %r13,%r10 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - movq %r14,%r11 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - - leaq 32-0(%rsp),%rsi - movq %r15,%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 320(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 384(%rsp),%rax - leaq 384(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 384(%rsp),%rax - leaq 384(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 288(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 448(%rsp),%rax - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 352(%rsp),%rbx - leaq 96(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 0+64(%rsp),%rax - movq 8+64(%rsp),%r14 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 128(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 0+96(%rsp),%rax - movq 8+96(%rsp),%r14 - leaq 0+96(%rsp),%rsi - movq 16+96(%rsp),%r15 - movq 24+96(%rsp),%r8 - leaq 192(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 128(%rsp),%rax - leaq 128(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 320(%rsp),%rax - leaq 320(%rsp),%rbx - movq 0+128(%rsp),%r9 - movq 8+128(%rsp),%r10 - leaq 0+128(%rsp),%rsi - movq 16+128(%rsp),%r11 - movq 24+128(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 192(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subq - - leaq 160(%rsp),%rbx - leaq 224(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 64(%rsp),%rdi - - call __ecp_nistz256_subq - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 352(%rsp),%rax - leaq 352(%rsp),%rbx - movq 0+160(%rsp),%r9 - movq 8+160(%rsp),%r10 - leaq 0+160(%rsp),%rsi - movq 16+160(%rsp),%r11 - movq 24+160(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 96(%rsp),%rax - leaq 96(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 64(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 32(%rsp),%rbx - leaq 256(%rsp),%rdi - call __ecp_nistz256_sub_fromq - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand .LONE_mont(%rip),%xmm2 - pand .LONE_mont+16(%rip),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 224(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 224+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 320(%rsp),%xmm2 - pand 320+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 256(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 256+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 352(%rsp),%xmm2 - pand 352+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - - leaq 480+56(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbx -.cfi_restore %rbx - movq -8(%rsi),%rbp -.cfi_restore %rbp - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Ladd_affineq_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine -.type __ecp_nistz256_add_tox,@function -.align 32 -__ecp_nistz256_add_tox: -.cfi_startproc - xorq %r11,%r11 - adcq 0(%rbx),%r12 - adcq 8(%rbx),%r13 - movq %r12,%rax - adcq 16(%rbx),%r8 - adcq 24(%rbx),%r9 - movq %r13,%rbp - adcq $0,%r11 - - xorq %r10,%r10 - sbbq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox - -.type __ecp_nistz256_sub_fromx,@function -.align 32 -__ecp_nistz256_sub_fromx: -.cfi_startproc - xorq %r11,%r11 - sbbq 0(%rbx),%r12 - sbbq 8(%rbx),%r13 - movq %r12,%rax - sbbq 16(%rbx),%r8 - sbbq 24(%rbx),%r9 - movq %r13,%rbp - sbbq $0,%r11 - - xorq %r10,%r10 - adcq $-1,%r12 - movq %r8,%rcx - adcq %r14,%r13 - adcq $0,%r8 - movq %r9,%r10 - adcq %r15,%r9 - - btq $0,%r11 - cmovncq %rax,%r12 - cmovncq %rbp,%r13 - movq %r12,0(%rdi) - cmovncq %rcx,%r8 - movq %r13,8(%rdi) - cmovncq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx - -.type __ecp_nistz256_subx,@function -.align 32 -__ecp_nistz256_subx: -.cfi_startproc - xorq %r11,%r11 - sbbq %r12,%rax - sbbq %r13,%rbp - movq %rax,%r12 - sbbq %r8,%rcx - sbbq %r9,%r10 - movq %rbp,%r13 - sbbq $0,%r11 - - xorq %r9,%r9 - adcq $-1,%rax - movq %rcx,%r8 - adcq %r14,%rbp - adcq $0,%rcx - movq %r10,%r9 - adcq %r15,%r10 - - btq $0,%r11 - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - cmovcq %rcx,%r8 - cmovcq %r10,%r9 - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_subx,.-__ecp_nistz256_subx - -.type __ecp_nistz256_mul_by_2x,@function -.align 32 -__ecp_nistz256_mul_by_2x: -.cfi_startproc - xorq %r11,%r11 - adcq %r12,%r12 - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - xorq %r10,%r10 - sbbq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 -.cfi_endproc -.size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x -.type ecp_nistz256_point_doublex,@function -.align 32 -ecp_nistz256_point_doublex: -.cfi_startproc -.Lpoint_doublex: - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $160+8,%rsp -.cfi_adjust_cfa_offset 32*5+8 -.Lpoint_doublex_body: - -.Lpoint_double_shortcutx: - movdqu 0(%rsi),%xmm0 - movq %rsi,%rbx - movdqu 16(%rsi),%xmm1 - movq 32+0(%rsi),%r12 - movq 32+8(%rsi),%r13 - movq 32+16(%rsi),%r8 - movq 32+24(%rsi),%r9 - movq .Lpoly+8(%rip),%r14 - movq .Lpoly+24(%rip),%r15 - movdqa %xmm0,96(%rsp) - movdqa %xmm1,96+16(%rsp) - leaq 32(%rdi),%r10 - leaq 64(%rdi),%r11 -.byte 102,72,15,110,199 -.byte 102,73,15,110,202 -.byte 102,73,15,110,211 - - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_by_2x - - movq 64+0(%rsi),%rdx - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - leaq 64-128(%rsi),%rsi - leaq 64(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 0+0(%rsp),%rdx - movq 8+0(%rsp),%r14 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 0(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 32(%rbx),%rdx - movq 64+0(%rbx),%r9 - movq 64+8(%rbx),%r10 - movq 64+16(%rbx),%r11 - movq 64+24(%rbx),%r12 - leaq 64-128(%rbx),%rsi - leaq 32(%rbx),%rbx -.byte 102,72,15,126,215 - call __ecp_nistz256_mul_montx - call __ecp_nistz256_mul_by_2x - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_tox - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 0+0(%rsp),%rdx - movq 8+0(%rsp),%r14 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 -.byte 102,72,15,126,207 - call __ecp_nistz256_sqr_montx - xorq %r9,%r9 - movq %r12,%rax - addq $-1,%r12 - movq %r13,%r10 - adcq %rsi,%r13 - movq %r14,%rcx - adcq $0,%r14 - movq %r15,%r8 - adcq %rbp,%r15 - adcq $0,%r9 - xorq %rsi,%rsi - testq $1,%rax - - cmovzq %rax,%r12 - cmovzq %r10,%r13 - cmovzq %rcx,%r14 - cmovzq %r8,%r15 - cmovzq %rsi,%r9 - - movq %r13,%rax - shrq $1,%r12 - shlq $63,%rax - movq %r14,%r10 - shrq $1,%r13 - orq %rax,%r12 - shlq $63,%r10 - movq %r15,%rcx - shrq $1,%r14 - orq %r10,%r13 - shlq $63,%rcx - movq %r12,0(%rdi) - shrq $1,%r15 - movq %r13,8(%rdi) - shlq $63,%r9 - orq %rcx,%r14 - orq %r9,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - movq 64(%rsp),%rdx - leaq 64(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2x - - leaq 32(%rsp),%rbx - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_tox - - movq 96(%rsp),%rdx - leaq 96(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2x - - movq 0+32(%rsp),%rdx - movq 8+32(%rsp),%r14 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r15 - movq 24+32(%rsp),%r8 -.byte 102,72,15,126,199 - call __ecp_nistz256_sqr_montx - - leaq 128(%rsp),%rbx - movq %r14,%r8 - movq %r15,%r9 - movq %rsi,%r14 - movq %rbp,%r15 - call __ecp_nistz256_sub_fromx - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 0(%rsp),%rdi - call __ecp_nistz256_subx - - movq 32(%rsp),%rdx - leaq 32(%rsp),%rbx - movq %r12,%r14 - xorl %ecx,%ecx - movq %r12,0+0(%rsp) - movq %r13,%r10 - movq %r13,0+8(%rsp) - cmovzq %r8,%r11 - movq %r8,0+16(%rsp) - leaq 0-128(%rsp),%rsi - cmovzq %r9,%r12 - movq %r9,0+24(%rsp) - movq %r14,%r9 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - -.byte 102,72,15,126,203 -.byte 102,72,15,126,207 - call __ecp_nistz256_sub_fromx - - leaq 160+56(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbx -.cfi_restore %rbx - movq -8(%rsi),%rbp -.cfi_restore %rbp - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lpoint_doublex_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_point_doublex,.-ecp_nistz256_point_doublex -.type ecp_nistz256_point_addx,@function -.align 32 -ecp_nistz256_point_addx: -.cfi_startproc -.Lpoint_addx: - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $576+8,%rsp -.cfi_adjust_cfa_offset 32*18+8 -.Lpoint_addx_body: - - movdqu 0(%rsi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq %rsi,%rbx - movq %rdx,%rsi - movdqa %xmm0,384(%rsp) - movdqa %xmm1,384+16(%rsp) - movdqa %xmm2,416(%rsp) - movdqa %xmm3,416+16(%rsp) - movdqa %xmm4,448(%rsp) - movdqa %xmm5,448+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rsi),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rsi),%xmm3 - movq 64+0(%rsi),%rdx - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,480(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,480+16(%rsp) - movdqu 64(%rsi),%xmm0 - movdqu 80(%rsi),%xmm1 - movdqa %xmm2,512(%rsp) - movdqa %xmm3,512+16(%rsp) - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - - leaq 64-128(%rsi),%rsi - movq %rdx,544+0(%rsp) - movq %r14,544+8(%rsp) - movq %r15,544+16(%rsp) - movq %r8,544+24(%rsp) - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm1,%xmm4 - por %xmm1,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - movq 64+0(%rbx),%rdx - movq 64+8(%rbx),%r14 - movq 64+16(%rbx),%r15 - movq 64+24(%rbx),%r8 -.byte 102,72,15,110,203 - - leaq 64-128(%rbx),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 544(%rsp),%rdx - leaq 544(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq -128+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 448(%rsp),%rdx - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 416(%rsp),%rdx - leaq 416(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq -128+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 512(%rsp),%rdx - leaq 512(%rsp),%rbx - movq 0+256(%rsp),%r9 - movq 8+256(%rsp),%r10 - leaq -128+256(%rsp),%rsi - movq 16+256(%rsp),%r11 - movq 24+256(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 224(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - orq %r13,%r12 - movdqa %xmm4,%xmm2 - orq %r8,%r12 - orq %r9,%r12 - por %xmm5,%xmm2 -.byte 102,73,15,110,220 - - movq 384(%rsp),%rdx - leaq 384(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq -128+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 480(%rsp),%rdx - leaq 480(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 160(%rsp),%rbx - leaq 0(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - orq %r13,%r12 - orq %r8,%r12 - orq %r9,%r12 - -.byte 102,73,15,126,208 -.byte 102,73,15,126,217 - orq %r8,%r12 -.byte 0x3e - jnz .Ladd_proceedx - - - - testq %r9,%r9 - jz .Ladd_doublex - - - - - - -.byte 102,72,15,126,199 - pxor %xmm0,%xmm0 - movdqu %xmm0,0(%rdi) - movdqu %xmm0,16(%rdi) - movdqu %xmm0,32(%rdi) - movdqu %xmm0,48(%rdi) - movdqu %xmm0,64(%rdi) - movdqu %xmm0,80(%rdi) - jmp .Ladd_donex - -.align 32 -.Ladd_doublex: -.byte 102,72,15,126,206 -.byte 102,72,15,126,199 - addq $416,%rsp -.cfi_adjust_cfa_offset -416 - jmp .Lpoint_double_shortcutx -.cfi_adjust_cfa_offset 416 - -.align 32 -.Ladd_proceedx: - movq 0+64(%rsp),%rdx - movq 8+64(%rsp),%r14 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 448(%rsp),%rdx - leaq 448(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 0+0(%rsp),%rdx - movq 8+0(%rsp),%r14 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 544(%rsp),%rdx - leaq 544(%rsp),%rbx - movq 0+352(%rsp),%r9 - movq 8+352(%rsp),%r10 - leaq -128+352(%rsp),%rsi - movq 16+352(%rsp),%r11 - movq 24+352(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 0(%rsp),%rdx - leaq 0(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 160(%rsp),%rdx - leaq 160(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montx - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 96(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subx - - leaq 128(%rsp),%rbx - leaq 288(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 192+0(%rsp),%rax - movq 192+8(%rsp),%rbp - movq 192+16(%rsp),%rcx - movq 192+24(%rsp),%r10 - leaq 320(%rsp),%rdi - - call __ecp_nistz256_subx - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 128(%rsp),%rdx - leaq 128(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq -128+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 320(%rsp),%rdx - leaq 320(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 320(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 256(%rsp),%rbx - leaq 320(%rsp),%rdi - call __ecp_nistz256_sub_fromx - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 352(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 352+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 544(%rsp),%xmm2 - pand 544+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 480(%rsp),%xmm2 - pand 480+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 320(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 320+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 512(%rsp),%xmm2 - pand 512+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - -.Ladd_donex: - leaq 576+56(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbx -.cfi_restore %rbx - movq -8(%rsi),%rbp -.cfi_restore %rbp - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lpoint_addx_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_point_addx,.-ecp_nistz256_point_addx -.type ecp_nistz256_point_add_affinex,@function -.align 32 -ecp_nistz256_point_add_affinex: -.cfi_startproc -.Lpoint_add_affinex: - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbp,-16 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset %rbx,-24 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r15,-56 - subq $480+8,%rsp -.cfi_adjust_cfa_offset 32*15+8 -.Ladd_affinex_body: - - movdqu 0(%rsi),%xmm0 - movq %rdx,%rbx - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq 64+0(%rsi),%rdx - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,320(%rsp) - movdqa %xmm1,320+16(%rsp) - movdqa %xmm2,352(%rsp) - movdqa %xmm3,352+16(%rsp) - movdqa %xmm4,384(%rsp) - movdqa %xmm5,384+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rbx),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rbx),%xmm1 - movdqu 32(%rbx),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rbx),%xmm3 - movdqa %xmm0,416(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,416+16(%rsp) - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - movdqa %xmm2,448(%rsp) - movdqa %xmm3,448+16(%rsp) - por %xmm2,%xmm3 - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm1,%xmm3 - - leaq 64-128(%rsi),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm3,%xmm4 - movq 0(%rbx),%rdx - - movq %r12,%r9 - por %xmm3,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - movq %r13,%r10 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - movq %r14,%r11 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - - leaq 32-128(%rsp),%rsi - movq %r15,%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 320(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 384(%rsp),%rdx - leaq 384(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 384(%rsp),%rdx - leaq 384(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 288(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 448(%rsp),%rdx - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 352(%rsp),%rbx - leaq 96(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 0+64(%rsp),%rdx - movq 8+64(%rsp),%r14 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 128(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 0+96(%rsp),%rdx - movq 8+96(%rsp),%r14 - leaq -128+96(%rsp),%rsi - movq 16+96(%rsp),%r15 - movq 24+96(%rsp),%r8 - leaq 192(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 128(%rsp),%rdx - leaq 128(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 320(%rsp),%rdx - leaq 320(%rsp),%rbx - movq 0+128(%rsp),%r9 - movq 8+128(%rsp),%r10 - leaq -128+128(%rsp),%rsi - movq 16+128(%rsp),%r11 - movq 24+128(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 192(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subx - - leaq 160(%rsp),%rbx - leaq 224(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 64(%rsp),%rdi - - call __ecp_nistz256_subx - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 352(%rsp),%rdx - leaq 352(%rsp),%rbx - movq 0+160(%rsp),%r9 - movq 8+160(%rsp),%r10 - leaq -128+160(%rsp),%rsi - movq 16+160(%rsp),%r11 - movq 24+160(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 96(%rsp),%rdx - leaq 96(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 64(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 32(%rsp),%rbx - leaq 256(%rsp),%rdi - call __ecp_nistz256_sub_fromx - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand .LONE_mont(%rip),%xmm2 - pand .LONE_mont+16(%rip),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 224(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 224+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 320(%rsp),%xmm2 - pand 320+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 256(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 256+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 352(%rsp),%xmm2 - pand 352+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - - leaq 480+56(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbx -.cfi_restore %rbx - movq -8(%rsi),%rbp -.cfi_restore %rbp - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Ladd_affinex_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size ecp_nistz256_point_add_affinex,.-ecp_nistz256_point_add_affinex -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S deleted file mode 100644 index d072a83479..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S +++ /dev/null @@ -1,343 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.type beeu_mod_inverse_vartime,@function -.hidden beeu_mod_inverse_vartime -.globl beeu_mod_inverse_vartime -.hidden beeu_mod_inverse_vartime -.align 32 -beeu_mod_inverse_vartime: -.cfi_startproc - pushq %rbp -.cfi_adjust_cfa_offset 8 -.cfi_offset rbp,-16 - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12,-24 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13,-32 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14,-40 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset r15,-48 - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset rbx,-56 - pushq %rsi -.cfi_adjust_cfa_offset 8 -.cfi_offset rsi,-64 - - subq $80,%rsp -.cfi_adjust_cfa_offset 80 - movq %rdi,0(%rsp) - - - movq $1,%r8 - xorq %r9,%r9 - xorq %r10,%r10 - xorq %r11,%r11 - xorq %rdi,%rdi - - xorq %r12,%r12 - xorq %r13,%r13 - xorq %r14,%r14 - xorq %r15,%r15 - xorq %rbp,%rbp - - - vmovdqu 0(%rsi),%xmm0 - vmovdqu 16(%rsi),%xmm1 - vmovdqu %xmm0,48(%rsp) - vmovdqu %xmm1,64(%rsp) - - vmovdqu 0(%rdx),%xmm0 - vmovdqu 16(%rdx),%xmm1 - vmovdqu %xmm0,16(%rsp) - vmovdqu %xmm1,32(%rsp) - -.Lbeeu_loop: - xorq %rbx,%rbx - orq 48(%rsp),%rbx - orq 56(%rsp),%rbx - orq 64(%rsp),%rbx - orq 72(%rsp),%rbx - jz .Lbeeu_loop_end - - - - - - - - - - - movq $1,%rcx - - -.Lbeeu_shift_loop_XB: - movq %rcx,%rbx - andq 48(%rsp),%rbx - jnz .Lbeeu_shift_loop_end_XB - - - movq $1,%rbx - andq %r8,%rbx - jz .Lshift1_0 - addq 0(%rdx),%r8 - adcq 8(%rdx),%r9 - adcq 16(%rdx),%r10 - adcq 24(%rdx),%r11 - adcq $0,%rdi - -.Lshift1_0: - shrdq $1,%r9,%r8 - shrdq $1,%r10,%r9 - shrdq $1,%r11,%r10 - shrdq $1,%rdi,%r11 - shrq $1,%rdi - - shlq $1,%rcx - - - - - - cmpq $0x8000000,%rcx - jne .Lbeeu_shift_loop_XB - -.Lbeeu_shift_loop_end_XB: - bsfq %rcx,%rcx - testq %rcx,%rcx - jz .Lbeeu_no_shift_XB - - - - movq 8+48(%rsp),%rax - movq 16+48(%rsp),%rbx - movq 24+48(%rsp),%rsi - - shrdq %cl,%rax,0+48(%rsp) - shrdq %cl,%rbx,8+48(%rsp) - shrdq %cl,%rsi,16+48(%rsp) - - shrq %cl,%rsi - movq %rsi,24+48(%rsp) - - -.Lbeeu_no_shift_XB: - - movq $1,%rcx - - -.Lbeeu_shift_loop_YA: - movq %rcx,%rbx - andq 16(%rsp),%rbx - jnz .Lbeeu_shift_loop_end_YA - - - movq $1,%rbx - andq %r12,%rbx - jz .Lshift1_1 - addq 0(%rdx),%r12 - adcq 8(%rdx),%r13 - adcq 16(%rdx),%r14 - adcq 24(%rdx),%r15 - adcq $0,%rbp - -.Lshift1_1: - shrdq $1,%r13,%r12 - shrdq $1,%r14,%r13 - shrdq $1,%r15,%r14 - shrdq $1,%rbp,%r15 - shrq $1,%rbp - - shlq $1,%rcx - - - - - - cmpq $0x8000000,%rcx - jne .Lbeeu_shift_loop_YA - -.Lbeeu_shift_loop_end_YA: - bsfq %rcx,%rcx - testq %rcx,%rcx - jz .Lbeeu_no_shift_YA - - - - movq 8+16(%rsp),%rax - movq 16+16(%rsp),%rbx - movq 24+16(%rsp),%rsi - - shrdq %cl,%rax,0+16(%rsp) - shrdq %cl,%rbx,8+16(%rsp) - shrdq %cl,%rsi,16+16(%rsp) - - shrq %cl,%rsi - movq %rsi,24+16(%rsp) - - -.Lbeeu_no_shift_YA: - - movq 48(%rsp),%rax - movq 56(%rsp),%rbx - movq 64(%rsp),%rsi - movq 72(%rsp),%rcx - subq 16(%rsp),%rax - sbbq 24(%rsp),%rbx - sbbq 32(%rsp),%rsi - sbbq 40(%rsp),%rcx - jnc .Lbeeu_B_bigger_than_A - - - movq 16(%rsp),%rax - movq 24(%rsp),%rbx - movq 32(%rsp),%rsi - movq 40(%rsp),%rcx - subq 48(%rsp),%rax - sbbq 56(%rsp),%rbx - sbbq 64(%rsp),%rsi - sbbq 72(%rsp),%rcx - movq %rax,16(%rsp) - movq %rbx,24(%rsp) - movq %rsi,32(%rsp) - movq %rcx,40(%rsp) - - - addq %r8,%r12 - adcq %r9,%r13 - adcq %r10,%r14 - adcq %r11,%r15 - adcq %rdi,%rbp - jmp .Lbeeu_loop - -.Lbeeu_B_bigger_than_A: - - movq %rax,48(%rsp) - movq %rbx,56(%rsp) - movq %rsi,64(%rsp) - movq %rcx,72(%rsp) - - - addq %r12,%r8 - adcq %r13,%r9 - adcq %r14,%r10 - adcq %r15,%r11 - adcq %rbp,%rdi - - jmp .Lbeeu_loop - -.Lbeeu_loop_end: - - - - - movq 16(%rsp),%rbx - subq $1,%rbx - orq 24(%rsp),%rbx - orq 32(%rsp),%rbx - orq 40(%rsp),%rbx - - jnz .Lbeeu_err - - - - - movq 0(%rdx),%r8 - movq 8(%rdx),%r9 - movq 16(%rdx),%r10 - movq 24(%rdx),%r11 - xorq %rdi,%rdi - -.Lbeeu_reduction_loop: - movq %r12,16(%rsp) - movq %r13,24(%rsp) - movq %r14,32(%rsp) - movq %r15,40(%rsp) - movq %rbp,48(%rsp) - - - subq %r8,%r12 - sbbq %r9,%r13 - sbbq %r10,%r14 - sbbq %r11,%r15 - sbbq $0,%rbp - - - cmovcq 16(%rsp),%r12 - cmovcq 24(%rsp),%r13 - cmovcq 32(%rsp),%r14 - cmovcq 40(%rsp),%r15 - jnc .Lbeeu_reduction_loop - - - subq %r12,%r8 - sbbq %r13,%r9 - sbbq %r14,%r10 - sbbq %r15,%r11 - -.Lbeeu_save: - - movq 0(%rsp),%rdi - - movq %r8,0(%rdi) - movq %r9,8(%rdi) - movq %r10,16(%rdi) - movq %r11,24(%rdi) - - - movq $1,%rax - jmp .Lbeeu_finish - -.Lbeeu_err: - - xorq %rax,%rax - -.Lbeeu_finish: - addq $80,%rsp -.cfi_adjust_cfa_offset -80 - popq %rsi -.cfi_adjust_cfa_offset -8 -.cfi_restore rsi - popq %rbx -.cfi_adjust_cfa_offset -8 -.cfi_restore rbx - popq %r15 -.cfi_adjust_cfa_offset -8 -.cfi_restore r15 - popq %r14 -.cfi_adjust_cfa_offset -8 -.cfi_restore r14 - popq %r13 -.cfi_adjust_cfa_offset -8 -.cfi_restore r13 - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_restore r12 - popq %rbp -.cfi_adjust_cfa_offset -8 -.cfi_restore rbp - .byte 0xf3,0xc3 -.cfi_endproc - -.size beeu_mod_inverse_vartime, .-beeu_mod_inverse_vartime -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S deleted file mode 100644 index 18d66f6f7f..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S +++ /dev/null @@ -1,63 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - -.globl CRYPTO_rdrand -.hidden CRYPTO_rdrand -.type CRYPTO_rdrand,@function -.align 16 -CRYPTO_rdrand: -.cfi_startproc - xorq %rax,%rax -.byte 72,15,199,242 - - adcq %rax,%rax - movq %rdx,0(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size CRYPTO_rdrand,.-CRYPTO_rdrand - - - - - -.globl CRYPTO_rdrand_multiple8_buf -.hidden CRYPTO_rdrand_multiple8_buf -.type CRYPTO_rdrand_multiple8_buf,@function -.align 16 -CRYPTO_rdrand_multiple8_buf: -.cfi_startproc - testq %rsi,%rsi - jz .Lout - movq $8,%rdx -.Lloop: -.byte 72,15,199,241 - jnc .Lerr - movq %rcx,0(%rdi) - addq %rdx,%rdi - subq %rdx,%rsi - jnz .Lloop -.Lout: - movq $1,%rax - .byte 0xf3,0xc3 -.Lerr: - xorq %rax,%rax - .byte 0xf3,0xc3 -.cfi_endproc -.size CRYPTO_rdrand_multiple8_buf,.-CRYPTO_rdrand_multiple8_buf -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/rsaz-avx2.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/rsaz-avx2.S deleted file mode 100644 index faccd484b0..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/rsaz-avx2.S +++ /dev/null @@ -1,1749 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl rsaz_1024_sqr_avx2 -.hidden rsaz_1024_sqr_avx2 -.type rsaz_1024_sqr_avx2,@function -.align 64 -rsaz_1024_sqr_avx2: -.cfi_startproc - leaq (%rsp),%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - vzeroupper - movq %rax,%rbp -.cfi_def_cfa_register %rbp - movq %rdx,%r13 - subq $832,%rsp - movq %r13,%r15 - subq $-128,%rdi - subq $-128,%rsi - subq $-128,%r13 - - andq $4095,%r15 - addq $320,%r15 - shrq $12,%r15 - vpxor %ymm9,%ymm9,%ymm9 - jz .Lsqr_1024_no_n_copy - - - - - - subq $320,%rsp - vmovdqu 0-128(%r13),%ymm0 - andq $-2048,%rsp - vmovdqu 32-128(%r13),%ymm1 - vmovdqu 64-128(%r13),%ymm2 - vmovdqu 96-128(%r13),%ymm3 - vmovdqu 128-128(%r13),%ymm4 - vmovdqu 160-128(%r13),%ymm5 - vmovdqu 192-128(%r13),%ymm6 - vmovdqu 224-128(%r13),%ymm7 - vmovdqu 256-128(%r13),%ymm8 - leaq 832+128(%rsp),%r13 - vmovdqu %ymm0,0-128(%r13) - vmovdqu %ymm1,32-128(%r13) - vmovdqu %ymm2,64-128(%r13) - vmovdqu %ymm3,96-128(%r13) - vmovdqu %ymm4,128-128(%r13) - vmovdqu %ymm5,160-128(%r13) - vmovdqu %ymm6,192-128(%r13) - vmovdqu %ymm7,224-128(%r13) - vmovdqu %ymm8,256-128(%r13) - vmovdqu %ymm9,288-128(%r13) - -.Lsqr_1024_no_n_copy: - andq $-1024,%rsp - - vmovdqu 32-128(%rsi),%ymm1 - vmovdqu 64-128(%rsi),%ymm2 - vmovdqu 96-128(%rsi),%ymm3 - vmovdqu 128-128(%rsi),%ymm4 - vmovdqu 160-128(%rsi),%ymm5 - vmovdqu 192-128(%rsi),%ymm6 - vmovdqu 224-128(%rsi),%ymm7 - vmovdqu 256-128(%rsi),%ymm8 - - leaq 192(%rsp),%rbx - vmovdqu .Land_mask(%rip),%ymm15 - jmp .LOOP_GRANDE_SQR_1024 - -.align 32 -.LOOP_GRANDE_SQR_1024: - leaq 576+128(%rsp),%r9 - leaq 448(%rsp),%r12 - - - - - vpaddq %ymm1,%ymm1,%ymm1 - vpbroadcastq 0-128(%rsi),%ymm10 - vpaddq %ymm2,%ymm2,%ymm2 - vmovdqa %ymm1,0-128(%r9) - vpaddq %ymm3,%ymm3,%ymm3 - vmovdqa %ymm2,32-128(%r9) - vpaddq %ymm4,%ymm4,%ymm4 - vmovdqa %ymm3,64-128(%r9) - vpaddq %ymm5,%ymm5,%ymm5 - vmovdqa %ymm4,96-128(%r9) - vpaddq %ymm6,%ymm6,%ymm6 - vmovdqa %ymm5,128-128(%r9) - vpaddq %ymm7,%ymm7,%ymm7 - vmovdqa %ymm6,160-128(%r9) - vpaddq %ymm8,%ymm8,%ymm8 - vmovdqa %ymm7,192-128(%r9) - vpxor %ymm9,%ymm9,%ymm9 - vmovdqa %ymm8,224-128(%r9) - - vpmuludq 0-128(%rsi),%ymm10,%ymm0 - vpbroadcastq 32-128(%rsi),%ymm11 - vmovdqu %ymm9,288-192(%rbx) - vpmuludq %ymm10,%ymm1,%ymm1 - vmovdqu %ymm9,320-448(%r12) - vpmuludq %ymm10,%ymm2,%ymm2 - vmovdqu %ymm9,352-448(%r12) - vpmuludq %ymm10,%ymm3,%ymm3 - vmovdqu %ymm9,384-448(%r12) - vpmuludq %ymm10,%ymm4,%ymm4 - vmovdqu %ymm9,416-448(%r12) - vpmuludq %ymm10,%ymm5,%ymm5 - vmovdqu %ymm9,448-448(%r12) - vpmuludq %ymm10,%ymm6,%ymm6 - vmovdqu %ymm9,480-448(%r12) - vpmuludq %ymm10,%ymm7,%ymm7 - vmovdqu %ymm9,512-448(%r12) - vpmuludq %ymm10,%ymm8,%ymm8 - vpbroadcastq 64-128(%rsi),%ymm10 - vmovdqu %ymm9,544-448(%r12) - - movq %rsi,%r15 - movl $4,%r14d - jmp .Lsqr_entry_1024 -.align 32 -.LOOP_SQR_1024: - vpbroadcastq 32-128(%r15),%ymm11 - vpmuludq 0-128(%rsi),%ymm10,%ymm0 - vpaddq 0-192(%rbx),%ymm0,%ymm0 - vpmuludq 0-128(%r9),%ymm10,%ymm1 - vpaddq 32-192(%rbx),%ymm1,%ymm1 - vpmuludq 32-128(%r9),%ymm10,%ymm2 - vpaddq 64-192(%rbx),%ymm2,%ymm2 - vpmuludq 64-128(%r9),%ymm10,%ymm3 - vpaddq 96-192(%rbx),%ymm3,%ymm3 - vpmuludq 96-128(%r9),%ymm10,%ymm4 - vpaddq 128-192(%rbx),%ymm4,%ymm4 - vpmuludq 128-128(%r9),%ymm10,%ymm5 - vpaddq 160-192(%rbx),%ymm5,%ymm5 - vpmuludq 160-128(%r9),%ymm10,%ymm6 - vpaddq 192-192(%rbx),%ymm6,%ymm6 - vpmuludq 192-128(%r9),%ymm10,%ymm7 - vpaddq 224-192(%rbx),%ymm7,%ymm7 - vpmuludq 224-128(%r9),%ymm10,%ymm8 - vpbroadcastq 64-128(%r15),%ymm10 - vpaddq 256-192(%rbx),%ymm8,%ymm8 -.Lsqr_entry_1024: - vmovdqu %ymm0,0-192(%rbx) - vmovdqu %ymm1,32-192(%rbx) - - vpmuludq 32-128(%rsi),%ymm11,%ymm12 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 32-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm3,%ymm3 - vpmuludq 64-128(%r9),%ymm11,%ymm13 - vpaddq %ymm13,%ymm4,%ymm4 - vpmuludq 96-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 128-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm6,%ymm6 - vpmuludq 160-128(%r9),%ymm11,%ymm13 - vpaddq %ymm13,%ymm7,%ymm7 - vpmuludq 192-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq 224-128(%r9),%ymm11,%ymm0 - vpbroadcastq 96-128(%r15),%ymm11 - vpaddq 288-192(%rbx),%ymm0,%ymm0 - - vmovdqu %ymm2,64-192(%rbx) - vmovdqu %ymm3,96-192(%rbx) - - vpmuludq 64-128(%rsi),%ymm10,%ymm13 - vpaddq %ymm13,%ymm4,%ymm4 - vpmuludq 64-128(%r9),%ymm10,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 96-128(%r9),%ymm10,%ymm14 - vpaddq %ymm14,%ymm6,%ymm6 - vpmuludq 128-128(%r9),%ymm10,%ymm13 - vpaddq %ymm13,%ymm7,%ymm7 - vpmuludq 160-128(%r9),%ymm10,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq 192-128(%r9),%ymm10,%ymm14 - vpaddq %ymm14,%ymm0,%ymm0 - vpmuludq 224-128(%r9),%ymm10,%ymm1 - vpbroadcastq 128-128(%r15),%ymm10 - vpaddq 320-448(%r12),%ymm1,%ymm1 - - vmovdqu %ymm4,128-192(%rbx) - vmovdqu %ymm5,160-192(%rbx) - - vpmuludq 96-128(%rsi),%ymm11,%ymm12 - vpaddq %ymm12,%ymm6,%ymm6 - vpmuludq 96-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm7,%ymm7 - vpmuludq 128-128(%r9),%ymm11,%ymm13 - vpaddq %ymm13,%ymm8,%ymm8 - vpmuludq 160-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm0,%ymm0 - vpmuludq 192-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm1,%ymm1 - vpmuludq 224-128(%r9),%ymm11,%ymm2 - vpbroadcastq 160-128(%r15),%ymm11 - vpaddq 352-448(%r12),%ymm2,%ymm2 - - vmovdqu %ymm6,192-192(%rbx) - vmovdqu %ymm7,224-192(%rbx) - - vpmuludq 128-128(%rsi),%ymm10,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq 128-128(%r9),%ymm10,%ymm14 - vpaddq %ymm14,%ymm0,%ymm0 - vpmuludq 160-128(%r9),%ymm10,%ymm13 - vpaddq %ymm13,%ymm1,%ymm1 - vpmuludq 192-128(%r9),%ymm10,%ymm12 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 224-128(%r9),%ymm10,%ymm3 - vpbroadcastq 192-128(%r15),%ymm10 - vpaddq 384-448(%r12),%ymm3,%ymm3 - - vmovdqu %ymm8,256-192(%rbx) - vmovdqu %ymm0,288-192(%rbx) - leaq 8(%rbx),%rbx - - vpmuludq 160-128(%rsi),%ymm11,%ymm13 - vpaddq %ymm13,%ymm1,%ymm1 - vpmuludq 160-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 192-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm3,%ymm3 - vpmuludq 224-128(%r9),%ymm11,%ymm4 - vpbroadcastq 224-128(%r15),%ymm11 - vpaddq 416-448(%r12),%ymm4,%ymm4 - - vmovdqu %ymm1,320-448(%r12) - vmovdqu %ymm2,352-448(%r12) - - vpmuludq 192-128(%rsi),%ymm10,%ymm12 - vpaddq %ymm12,%ymm3,%ymm3 - vpmuludq 192-128(%r9),%ymm10,%ymm14 - vpbroadcastq 256-128(%r15),%ymm0 - vpaddq %ymm14,%ymm4,%ymm4 - vpmuludq 224-128(%r9),%ymm10,%ymm5 - vpbroadcastq 0+8-128(%r15),%ymm10 - vpaddq 448-448(%r12),%ymm5,%ymm5 - - vmovdqu %ymm3,384-448(%r12) - vmovdqu %ymm4,416-448(%r12) - leaq 8(%r15),%r15 - - vpmuludq 224-128(%rsi),%ymm11,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 224-128(%r9),%ymm11,%ymm6 - vpaddq 480-448(%r12),%ymm6,%ymm6 - - vpmuludq 256-128(%rsi),%ymm0,%ymm7 - vmovdqu %ymm5,448-448(%r12) - vpaddq 512-448(%r12),%ymm7,%ymm7 - vmovdqu %ymm6,480-448(%r12) - vmovdqu %ymm7,512-448(%r12) - leaq 8(%r12),%r12 - - decl %r14d - jnz .LOOP_SQR_1024 - - vmovdqu 256(%rsp),%ymm8 - vmovdqu 288(%rsp),%ymm1 - vmovdqu 320(%rsp),%ymm2 - leaq 192(%rsp),%rbx - - vpsrlq $29,%ymm8,%ymm14 - vpand %ymm15,%ymm8,%ymm8 - vpsrlq $29,%ymm1,%ymm11 - vpand %ymm15,%ymm1,%ymm1 - - vpermq $0x93,%ymm14,%ymm14 - vpxor %ymm9,%ymm9,%ymm9 - vpermq $0x93,%ymm11,%ymm11 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm8,%ymm8 - vpblendd $3,%ymm11,%ymm9,%ymm11 - vpaddq %ymm14,%ymm1,%ymm1 - vpaddq %ymm11,%ymm2,%ymm2 - vmovdqu %ymm1,288-192(%rbx) - vmovdqu %ymm2,320-192(%rbx) - - movq (%rsp),%rax - movq 8(%rsp),%r10 - movq 16(%rsp),%r11 - movq 24(%rsp),%r12 - vmovdqu 32(%rsp),%ymm1 - vmovdqu 64-192(%rbx),%ymm2 - vmovdqu 96-192(%rbx),%ymm3 - vmovdqu 128-192(%rbx),%ymm4 - vmovdqu 160-192(%rbx),%ymm5 - vmovdqu 192-192(%rbx),%ymm6 - vmovdqu 224-192(%rbx),%ymm7 - - movq %rax,%r9 - imull %ecx,%eax - andl $0x1fffffff,%eax - vmovd %eax,%xmm12 - - movq %rax,%rdx - imulq -128(%r13),%rax - vpbroadcastq %xmm12,%ymm12 - addq %rax,%r9 - movq %rdx,%rax - imulq 8-128(%r13),%rax - shrq $29,%r9 - addq %rax,%r10 - movq %rdx,%rax - imulq 16-128(%r13),%rax - addq %r9,%r10 - addq %rax,%r11 - imulq 24-128(%r13),%rdx - addq %rdx,%r12 - - movq %r10,%rax - imull %ecx,%eax - andl $0x1fffffff,%eax - - movl $9,%r14d - jmp .LOOP_REDUCE_1024 - -.align 32 -.LOOP_REDUCE_1024: - vmovd %eax,%xmm13 - vpbroadcastq %xmm13,%ymm13 - - vpmuludq 32-128(%r13),%ymm12,%ymm10 - movq %rax,%rdx - imulq -128(%r13),%rax - vpaddq %ymm10,%ymm1,%ymm1 - addq %rax,%r10 - vpmuludq 64-128(%r13),%ymm12,%ymm14 - movq %rdx,%rax - imulq 8-128(%r13),%rax - vpaddq %ymm14,%ymm2,%ymm2 - vpmuludq 96-128(%r13),%ymm12,%ymm11 -.byte 0x67 - addq %rax,%r11 -.byte 0x67 - movq %rdx,%rax - imulq 16-128(%r13),%rax - shrq $29,%r10 - vpaddq %ymm11,%ymm3,%ymm3 - vpmuludq 128-128(%r13),%ymm12,%ymm10 - addq %rax,%r12 - addq %r10,%r11 - vpaddq %ymm10,%ymm4,%ymm4 - vpmuludq 160-128(%r13),%ymm12,%ymm14 - movq %r11,%rax - imull %ecx,%eax - vpaddq %ymm14,%ymm5,%ymm5 - vpmuludq 192-128(%r13),%ymm12,%ymm11 - andl $0x1fffffff,%eax - vpaddq %ymm11,%ymm6,%ymm6 - vpmuludq 224-128(%r13),%ymm12,%ymm10 - vpaddq %ymm10,%ymm7,%ymm7 - vpmuludq 256-128(%r13),%ymm12,%ymm14 - vmovd %eax,%xmm12 - - vpaddq %ymm14,%ymm8,%ymm8 - - vpbroadcastq %xmm12,%ymm12 - - vpmuludq 32-8-128(%r13),%ymm13,%ymm11 - vmovdqu 96-8-128(%r13),%ymm14 - movq %rax,%rdx - imulq -128(%r13),%rax - vpaddq %ymm11,%ymm1,%ymm1 - vpmuludq 64-8-128(%r13),%ymm13,%ymm10 - vmovdqu 128-8-128(%r13),%ymm11 - addq %rax,%r11 - movq %rdx,%rax - imulq 8-128(%r13),%rax - vpaddq %ymm10,%ymm2,%ymm2 - addq %r12,%rax - shrq $29,%r11 - vpmuludq %ymm13,%ymm14,%ymm14 - vmovdqu 160-8-128(%r13),%ymm10 - addq %r11,%rax - vpaddq %ymm14,%ymm3,%ymm3 - vpmuludq %ymm13,%ymm11,%ymm11 - vmovdqu 192-8-128(%r13),%ymm14 -.byte 0x67 - movq %rax,%r12 - imull %ecx,%eax - vpaddq %ymm11,%ymm4,%ymm4 - vpmuludq %ymm13,%ymm10,%ymm10 -.byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 - andl $0x1fffffff,%eax - vpaddq %ymm10,%ymm5,%ymm5 - vpmuludq %ymm13,%ymm14,%ymm14 - vmovdqu 256-8-128(%r13),%ymm10 - vpaddq %ymm14,%ymm6,%ymm6 - vpmuludq %ymm13,%ymm11,%ymm11 - vmovdqu 288-8-128(%r13),%ymm9 - vmovd %eax,%xmm0 - imulq -128(%r13),%rax - vpaddq %ymm11,%ymm7,%ymm7 - vpmuludq %ymm13,%ymm10,%ymm10 - vmovdqu 32-16-128(%r13),%ymm14 - vpbroadcastq %xmm0,%ymm0 - vpaddq %ymm10,%ymm8,%ymm8 - vpmuludq %ymm13,%ymm9,%ymm9 - vmovdqu 64-16-128(%r13),%ymm11 - addq %rax,%r12 - - vmovdqu 32-24-128(%r13),%ymm13 - vpmuludq %ymm12,%ymm14,%ymm14 - vmovdqu 96-16-128(%r13),%ymm10 - vpaddq %ymm14,%ymm1,%ymm1 - vpmuludq %ymm0,%ymm13,%ymm13 - vpmuludq %ymm12,%ymm11,%ymm11 -.byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff - vpaddq %ymm1,%ymm13,%ymm13 - vpaddq %ymm11,%ymm2,%ymm2 - vpmuludq %ymm12,%ymm10,%ymm10 - vmovdqu 160-16-128(%r13),%ymm11 -.byte 0x67 - vmovq %xmm13,%rax - vmovdqu %ymm13,(%rsp) - vpaddq %ymm10,%ymm3,%ymm3 - vpmuludq %ymm12,%ymm14,%ymm14 - vmovdqu 192-16-128(%r13),%ymm10 - vpaddq %ymm14,%ymm4,%ymm4 - vpmuludq %ymm12,%ymm11,%ymm11 - vmovdqu 224-16-128(%r13),%ymm14 - vpaddq %ymm11,%ymm5,%ymm5 - vpmuludq %ymm12,%ymm10,%ymm10 - vmovdqu 256-16-128(%r13),%ymm11 - vpaddq %ymm10,%ymm6,%ymm6 - vpmuludq %ymm12,%ymm14,%ymm14 - shrq $29,%r12 - vmovdqu 288-16-128(%r13),%ymm10 - addq %r12,%rax - vpaddq %ymm14,%ymm7,%ymm7 - vpmuludq %ymm12,%ymm11,%ymm11 - - movq %rax,%r9 - imull %ecx,%eax - vpaddq %ymm11,%ymm8,%ymm8 - vpmuludq %ymm12,%ymm10,%ymm10 - andl $0x1fffffff,%eax - vmovd %eax,%xmm12 - vmovdqu 96-24-128(%r13),%ymm11 -.byte 0x67 - vpaddq %ymm10,%ymm9,%ymm9 - vpbroadcastq %xmm12,%ymm12 - - vpmuludq 64-24-128(%r13),%ymm0,%ymm14 - vmovdqu 128-24-128(%r13),%ymm10 - movq %rax,%rdx - imulq -128(%r13),%rax - movq 8(%rsp),%r10 - vpaddq %ymm14,%ymm2,%ymm1 - vpmuludq %ymm0,%ymm11,%ymm11 - vmovdqu 160-24-128(%r13),%ymm14 - addq %rax,%r9 - movq %rdx,%rax - imulq 8-128(%r13),%rax -.byte 0x67 - shrq $29,%r9 - movq 16(%rsp),%r11 - vpaddq %ymm11,%ymm3,%ymm2 - vpmuludq %ymm0,%ymm10,%ymm10 - vmovdqu 192-24-128(%r13),%ymm11 - addq %rax,%r10 - movq %rdx,%rax - imulq 16-128(%r13),%rax - vpaddq %ymm10,%ymm4,%ymm3 - vpmuludq %ymm0,%ymm14,%ymm14 - vmovdqu 224-24-128(%r13),%ymm10 - imulq 24-128(%r13),%rdx - addq %rax,%r11 - leaq (%r9,%r10,1),%rax - vpaddq %ymm14,%ymm5,%ymm4 - vpmuludq %ymm0,%ymm11,%ymm11 - vmovdqu 256-24-128(%r13),%ymm14 - movq %rax,%r10 - imull %ecx,%eax - vpmuludq %ymm0,%ymm10,%ymm10 - vpaddq %ymm11,%ymm6,%ymm5 - vmovdqu 288-24-128(%r13),%ymm11 - andl $0x1fffffff,%eax - vpaddq %ymm10,%ymm7,%ymm6 - vpmuludq %ymm0,%ymm14,%ymm14 - addq 24(%rsp),%rdx - vpaddq %ymm14,%ymm8,%ymm7 - vpmuludq %ymm0,%ymm11,%ymm11 - vpaddq %ymm11,%ymm9,%ymm8 - vmovq %r12,%xmm9 - movq %rdx,%r12 - - decl %r14d - jnz .LOOP_REDUCE_1024 - leaq 448(%rsp),%r12 - vpaddq %ymm9,%ymm13,%ymm0 - vpxor %ymm9,%ymm9,%ymm9 - - vpaddq 288-192(%rbx),%ymm0,%ymm0 - vpaddq 320-448(%r12),%ymm1,%ymm1 - vpaddq 352-448(%r12),%ymm2,%ymm2 - vpaddq 384-448(%r12),%ymm3,%ymm3 - vpaddq 416-448(%r12),%ymm4,%ymm4 - vpaddq 448-448(%r12),%ymm5,%ymm5 - vpaddq 480-448(%r12),%ymm6,%ymm6 - vpaddq 512-448(%r12),%ymm7,%ymm7 - vpaddq 544-448(%r12),%ymm8,%ymm8 - - vpsrlq $29,%ymm0,%ymm14 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm11 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm3,%ymm3 - vpermq $0x93,%ymm12,%ymm12 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm13,%ymm13 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm0,%ymm0 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm1,%ymm1 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm2,%ymm2 - vpblendd $3,%ymm13,%ymm9,%ymm13 - vpaddq %ymm12,%ymm3,%ymm3 - vpaddq %ymm13,%ymm4,%ymm4 - - vpsrlq $29,%ymm0,%ymm14 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm11 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm3,%ymm3 - vpermq $0x93,%ymm12,%ymm12 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm13,%ymm13 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm0,%ymm0 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm1,%ymm1 - vmovdqu %ymm0,0-128(%rdi) - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm2,%ymm2 - vmovdqu %ymm1,32-128(%rdi) - vpblendd $3,%ymm13,%ymm9,%ymm13 - vpaddq %ymm12,%ymm3,%ymm3 - vmovdqu %ymm2,64-128(%rdi) - vpaddq %ymm13,%ymm4,%ymm4 - vmovdqu %ymm3,96-128(%rdi) - vpsrlq $29,%ymm4,%ymm14 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm11 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm13,%ymm13 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm4,%ymm4 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm5,%ymm5 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm6,%ymm6 - vpblendd $3,%ymm13,%ymm0,%ymm13 - vpaddq %ymm12,%ymm7,%ymm7 - vpaddq %ymm13,%ymm8,%ymm8 - - vpsrlq $29,%ymm4,%ymm14 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm11 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm13,%ymm13 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm4,%ymm4 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm5,%ymm5 - vmovdqu %ymm4,128-128(%rdi) - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm6,%ymm6 - vmovdqu %ymm5,160-128(%rdi) - vpblendd $3,%ymm13,%ymm0,%ymm13 - vpaddq %ymm12,%ymm7,%ymm7 - vmovdqu %ymm6,192-128(%rdi) - vpaddq %ymm13,%ymm8,%ymm8 - vmovdqu %ymm7,224-128(%rdi) - vmovdqu %ymm8,256-128(%rdi) - - movq %rdi,%rsi - decl %r8d - jne .LOOP_GRANDE_SQR_1024 - - vzeroall - movq %rbp,%rax -.cfi_def_cfa_register %rax - movq -48(%rax),%r15 -.cfi_restore %r15 - movq -40(%rax),%r14 -.cfi_restore %r14 - movq -32(%rax),%r13 -.cfi_restore %r13 - movq -24(%rax),%r12 -.cfi_restore %r12 - movq -16(%rax),%rbp -.cfi_restore %rbp - movq -8(%rax),%rbx -.cfi_restore %rbx - leaq (%rax),%rsp -.cfi_def_cfa_register %rsp -.Lsqr_1024_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2 -.globl rsaz_1024_mul_avx2 -.hidden rsaz_1024_mul_avx2 -.type rsaz_1024_mul_avx2,@function -.align 64 -rsaz_1024_mul_avx2: -.cfi_startproc - leaq (%rsp),%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - movq %rax,%rbp -.cfi_def_cfa_register %rbp - vzeroall - movq %rdx,%r13 - subq $64,%rsp - - - - - - -.byte 0x67,0x67 - movq %rsi,%r15 - andq $4095,%r15 - addq $320,%r15 - shrq $12,%r15 - movq %rsi,%r15 - cmovnzq %r13,%rsi - cmovnzq %r15,%r13 - - movq %rcx,%r15 - subq $-128,%rsi - subq $-128,%rcx - subq $-128,%rdi - - andq $4095,%r15 - addq $320,%r15 -.byte 0x67,0x67 - shrq $12,%r15 - jz .Lmul_1024_no_n_copy - - - - - - subq $320,%rsp - vmovdqu 0-128(%rcx),%ymm0 - andq $-512,%rsp - vmovdqu 32-128(%rcx),%ymm1 - vmovdqu 64-128(%rcx),%ymm2 - vmovdqu 96-128(%rcx),%ymm3 - vmovdqu 128-128(%rcx),%ymm4 - vmovdqu 160-128(%rcx),%ymm5 - vmovdqu 192-128(%rcx),%ymm6 - vmovdqu 224-128(%rcx),%ymm7 - vmovdqu 256-128(%rcx),%ymm8 - leaq 64+128(%rsp),%rcx - vmovdqu %ymm0,0-128(%rcx) - vpxor %ymm0,%ymm0,%ymm0 - vmovdqu %ymm1,32-128(%rcx) - vpxor %ymm1,%ymm1,%ymm1 - vmovdqu %ymm2,64-128(%rcx) - vpxor %ymm2,%ymm2,%ymm2 - vmovdqu %ymm3,96-128(%rcx) - vpxor %ymm3,%ymm3,%ymm3 - vmovdqu %ymm4,128-128(%rcx) - vpxor %ymm4,%ymm4,%ymm4 - vmovdqu %ymm5,160-128(%rcx) - vpxor %ymm5,%ymm5,%ymm5 - vmovdqu %ymm6,192-128(%rcx) - vpxor %ymm6,%ymm6,%ymm6 - vmovdqu %ymm7,224-128(%rcx) - vpxor %ymm7,%ymm7,%ymm7 - vmovdqu %ymm8,256-128(%rcx) - vmovdqa %ymm0,%ymm8 - vmovdqu %ymm9,288-128(%rcx) -.Lmul_1024_no_n_copy: - andq $-64,%rsp - - movq (%r13),%rbx - vpbroadcastq (%r13),%ymm10 - vmovdqu %ymm0,(%rsp) - xorq %r9,%r9 -.byte 0x67 - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r12,%r12 - - vmovdqu .Land_mask(%rip),%ymm15 - movl $9,%r14d - vmovdqu %ymm9,288-128(%rdi) - jmp .Loop_mul_1024 - -.align 32 -.Loop_mul_1024: - vpsrlq $29,%ymm3,%ymm9 - movq %rbx,%rax - imulq -128(%rsi),%rax - addq %r9,%rax - movq %rbx,%r10 - imulq 8-128(%rsi),%r10 - addq 8(%rsp),%r10 - - movq %rax,%r9 - imull %r8d,%eax - andl $0x1fffffff,%eax - - movq %rbx,%r11 - imulq 16-128(%rsi),%r11 - addq 16(%rsp),%r11 - - movq %rbx,%r12 - imulq 24-128(%rsi),%r12 - addq 24(%rsp),%r12 - vpmuludq 32-128(%rsi),%ymm10,%ymm0 - vmovd %eax,%xmm11 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq 64-128(%rsi),%ymm10,%ymm12 - vpbroadcastq %xmm11,%ymm11 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 96-128(%rsi),%ymm10,%ymm13 - vpand %ymm15,%ymm3,%ymm3 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq 128-128(%rsi),%ymm10,%ymm0 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq 160-128(%rsi),%ymm10,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 192-128(%rsi),%ymm10,%ymm13 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq 224-128(%rsi),%ymm10,%ymm0 - vpermq $0x93,%ymm9,%ymm9 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq 256-128(%rsi),%ymm10,%ymm12 - vpbroadcastq 8(%r13),%ymm10 - vpaddq %ymm12,%ymm8,%ymm8 - - movq %rax,%rdx - imulq -128(%rcx),%rax - addq %rax,%r9 - movq %rdx,%rax - imulq 8-128(%rcx),%rax - addq %rax,%r10 - movq %rdx,%rax - imulq 16-128(%rcx),%rax - addq %rax,%r11 - shrq $29,%r9 - imulq 24-128(%rcx),%rdx - addq %rdx,%r12 - addq %r9,%r10 - - vpmuludq 32-128(%rcx),%ymm11,%ymm13 - vmovq %xmm10,%rbx - vpaddq %ymm13,%ymm1,%ymm1 - vpmuludq 64-128(%rcx),%ymm11,%ymm0 - vpaddq %ymm0,%ymm2,%ymm2 - vpmuludq 96-128(%rcx),%ymm11,%ymm12 - vpaddq %ymm12,%ymm3,%ymm3 - vpmuludq 128-128(%rcx),%ymm11,%ymm13 - vpaddq %ymm13,%ymm4,%ymm4 - vpmuludq 160-128(%rcx),%ymm11,%ymm0 - vpaddq %ymm0,%ymm5,%ymm5 - vpmuludq 192-128(%rcx),%ymm11,%ymm12 - vpaddq %ymm12,%ymm6,%ymm6 - vpmuludq 224-128(%rcx),%ymm11,%ymm13 - vpblendd $3,%ymm14,%ymm9,%ymm12 - vpaddq %ymm13,%ymm7,%ymm7 - vpmuludq 256-128(%rcx),%ymm11,%ymm0 - vpaddq %ymm12,%ymm3,%ymm3 - vpaddq %ymm0,%ymm8,%ymm8 - - movq %rbx,%rax - imulq -128(%rsi),%rax - addq %rax,%r10 - vmovdqu -8+32-128(%rsi),%ymm12 - movq %rbx,%rax - imulq 8-128(%rsi),%rax - addq %rax,%r11 - vmovdqu -8+64-128(%rsi),%ymm13 - - movq %r10,%rax - vpblendd $0xfc,%ymm14,%ymm9,%ymm9 - imull %r8d,%eax - vpaddq %ymm9,%ymm4,%ymm4 - andl $0x1fffffff,%eax - - imulq 16-128(%rsi),%rbx - addq %rbx,%r12 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovd %eax,%xmm11 - vmovdqu -8+96-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm1,%ymm1 - vpmuludq %ymm10,%ymm13,%ymm13 - vpbroadcastq %xmm11,%ymm11 - vmovdqu -8+128-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm2,%ymm2 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -8+160-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm3,%ymm3 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -8+192-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm4,%ymm4 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -8+224-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm5,%ymm5 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -8+256-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm6,%ymm6 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -8+288-128(%rsi),%ymm9 - vpaddq %ymm12,%ymm7,%ymm7 - vpmuludq %ymm10,%ymm13,%ymm13 - vpaddq %ymm13,%ymm8,%ymm8 - vpmuludq %ymm10,%ymm9,%ymm9 - vpbroadcastq 16(%r13),%ymm10 - - movq %rax,%rdx - imulq -128(%rcx),%rax - addq %rax,%r10 - vmovdqu -8+32-128(%rcx),%ymm0 - movq %rdx,%rax - imulq 8-128(%rcx),%rax - addq %rax,%r11 - vmovdqu -8+64-128(%rcx),%ymm12 - shrq $29,%r10 - imulq 16-128(%rcx),%rdx - addq %rdx,%r12 - addq %r10,%r11 - - vpmuludq %ymm11,%ymm0,%ymm0 - vmovq %xmm10,%rbx - vmovdqu -8+96-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -8+128-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -8+160-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -8+192-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -8+224-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -8+256-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -8+288-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm11,%ymm12,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm11,%ymm13,%ymm13 - vpaddq %ymm13,%ymm9,%ymm9 - - vmovdqu -16+32-128(%rsi),%ymm0 - movq %rbx,%rax - imulq -128(%rsi),%rax - addq %r11,%rax - - vmovdqu -16+64-128(%rsi),%ymm12 - movq %rax,%r11 - imull %r8d,%eax - andl $0x1fffffff,%eax - - imulq 8-128(%rsi),%rbx - addq %rbx,%r12 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovd %eax,%xmm11 - vmovdqu -16+96-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm10,%ymm12,%ymm12 - vpbroadcastq %xmm11,%ymm11 - vmovdqu -16+128-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -16+160-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -16+192-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -16+224-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -16+256-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -16+288-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm10,%ymm12,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm10,%ymm13,%ymm13 - vpbroadcastq 24(%r13),%ymm10 - vpaddq %ymm13,%ymm9,%ymm9 - - vmovdqu -16+32-128(%rcx),%ymm0 - movq %rax,%rdx - imulq -128(%rcx),%rax - addq %rax,%r11 - vmovdqu -16+64-128(%rcx),%ymm12 - imulq 8-128(%rcx),%rdx - addq %rdx,%r12 - shrq $29,%r11 - - vpmuludq %ymm11,%ymm0,%ymm0 - vmovq %xmm10,%rbx - vmovdqu -16+96-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -16+128-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -16+160-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -16+192-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -16+224-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -16+256-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -16+288-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -24+32-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -24+64-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm9,%ymm9 - - addq %r11,%r12 - imulq -128(%rsi),%rbx - addq %rbx,%r12 - - movq %r12,%rax - imull %r8d,%eax - andl $0x1fffffff,%eax - - vpmuludq %ymm10,%ymm0,%ymm0 - vmovd %eax,%xmm11 - vmovdqu -24+96-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm10,%ymm12,%ymm12 - vpbroadcastq %xmm11,%ymm11 - vmovdqu -24+128-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -24+160-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -24+192-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -24+224-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -24+256-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -24+288-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm10,%ymm12,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm10,%ymm13,%ymm13 - vpbroadcastq 32(%r13),%ymm10 - vpaddq %ymm13,%ymm9,%ymm9 - addq $32,%r13 - - vmovdqu -24+32-128(%rcx),%ymm0 - imulq -128(%rcx),%rax - addq %rax,%r12 - shrq $29,%r12 - - vmovdqu -24+64-128(%rcx),%ymm12 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovq %xmm10,%rbx - vmovdqu -24+96-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm1,%ymm0 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu %ymm0,(%rsp) - vpaddq %ymm12,%ymm2,%ymm1 - vmovdqu -24+128-128(%rcx),%ymm0 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -24+160-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm3,%ymm2 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -24+192-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm4,%ymm3 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -24+224-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm5,%ymm4 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -24+256-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm6,%ymm5 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -24+288-128(%rcx),%ymm13 - movq %r12,%r9 - vpaddq %ymm0,%ymm7,%ymm6 - vpmuludq %ymm11,%ymm12,%ymm12 - addq (%rsp),%r9 - vpaddq %ymm12,%ymm8,%ymm7 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovq %r12,%xmm12 - vpaddq %ymm13,%ymm9,%ymm8 - - decl %r14d - jnz .Loop_mul_1024 - vpaddq (%rsp),%ymm12,%ymm0 - - vpsrlq $29,%ymm0,%ymm12 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm13 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm3,%ymm3 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm10,%ymm10 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpermq $0x93,%ymm11,%ymm11 - vpaddq %ymm9,%ymm0,%ymm0 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm1,%ymm1 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm2,%ymm2 - vpblendd $3,%ymm11,%ymm14,%ymm11 - vpaddq %ymm10,%ymm3,%ymm3 - vpaddq %ymm11,%ymm4,%ymm4 - - vpsrlq $29,%ymm0,%ymm12 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm13 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm3,%ymm3 - vpermq $0x93,%ymm10,%ymm10 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm11,%ymm11 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm9,%ymm0,%ymm0 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm1,%ymm1 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm2,%ymm2 - vpblendd $3,%ymm11,%ymm14,%ymm11 - vpaddq %ymm10,%ymm3,%ymm3 - vpaddq %ymm11,%ymm4,%ymm4 - - vmovdqu %ymm0,0-128(%rdi) - vmovdqu %ymm1,32-128(%rdi) - vmovdqu %ymm2,64-128(%rdi) - vmovdqu %ymm3,96-128(%rdi) - vpsrlq $29,%ymm4,%ymm12 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm13 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm10,%ymm10 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm11,%ymm11 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm9,%ymm4,%ymm4 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm5,%ymm5 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm6,%ymm6 - vpblendd $3,%ymm11,%ymm0,%ymm11 - vpaddq %ymm10,%ymm7,%ymm7 - vpaddq %ymm11,%ymm8,%ymm8 - - vpsrlq $29,%ymm4,%ymm12 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm13 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm10,%ymm10 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm11,%ymm11 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm9,%ymm4,%ymm4 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm5,%ymm5 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm6,%ymm6 - vpblendd $3,%ymm11,%ymm0,%ymm11 - vpaddq %ymm10,%ymm7,%ymm7 - vpaddq %ymm11,%ymm8,%ymm8 - - vmovdqu %ymm4,128-128(%rdi) - vmovdqu %ymm5,160-128(%rdi) - vmovdqu %ymm6,192-128(%rdi) - vmovdqu %ymm7,224-128(%rdi) - vmovdqu %ymm8,256-128(%rdi) - vzeroupper - - movq %rbp,%rax -.cfi_def_cfa_register %rax - movq -48(%rax),%r15 -.cfi_restore %r15 - movq -40(%rax),%r14 -.cfi_restore %r14 - movq -32(%rax),%r13 -.cfi_restore %r13 - movq -24(%rax),%r12 -.cfi_restore %r12 - movq -16(%rax),%rbp -.cfi_restore %rbp - movq -8(%rax),%rbx -.cfi_restore %rbx - leaq (%rax),%rsp -.cfi_def_cfa_register %rsp -.Lmul_1024_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2 -.globl rsaz_1024_red2norm_avx2 -.hidden rsaz_1024_red2norm_avx2 -.type rsaz_1024_red2norm_avx2,@function -.align 32 -rsaz_1024_red2norm_avx2: -.cfi_startproc - subq $-128,%rsi - xorq %rax,%rax - movq -128(%rsi),%r8 - movq -120(%rsi),%r9 - movq -112(%rsi),%r10 - shlq $0,%r8 - shlq $29,%r9 - movq %r10,%r11 - shlq $58,%r10 - shrq $6,%r11 - addq %r8,%rax - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,0(%rdi) - movq %r11,%rax - movq -104(%rsi),%r8 - movq -96(%rsi),%r9 - shlq $23,%r8 - movq %r9,%r10 - shlq $52,%r9 - shrq $12,%r10 - addq %r8,%rax - addq %r9,%rax - adcq $0,%r10 - movq %rax,8(%rdi) - movq %r10,%rax - movq -88(%rsi),%r11 - movq -80(%rsi),%r8 - shlq $17,%r11 - movq %r8,%r9 - shlq $46,%r8 - shrq $18,%r9 - addq %r11,%rax - addq %r8,%rax - adcq $0,%r9 - movq %rax,16(%rdi) - movq %r9,%rax - movq -72(%rsi),%r10 - movq -64(%rsi),%r11 - shlq $11,%r10 - movq %r11,%r8 - shlq $40,%r11 - shrq $24,%r8 - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,24(%rdi) - movq %r8,%rax - movq -56(%rsi),%r9 - movq -48(%rsi),%r10 - movq -40(%rsi),%r11 - shlq $5,%r9 - shlq $34,%r10 - movq %r11,%r8 - shlq $63,%r11 - shrq $1,%r8 - addq %r9,%rax - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,32(%rdi) - movq %r8,%rax - movq -32(%rsi),%r9 - movq -24(%rsi),%r10 - shlq $28,%r9 - movq %r10,%r11 - shlq $57,%r10 - shrq $7,%r11 - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,40(%rdi) - movq %r11,%rax - movq -16(%rsi),%r8 - movq -8(%rsi),%r9 - shlq $22,%r8 - movq %r9,%r10 - shlq $51,%r9 - shrq $13,%r10 - addq %r8,%rax - addq %r9,%rax - adcq $0,%r10 - movq %rax,48(%rdi) - movq %r10,%rax - movq 0(%rsi),%r11 - movq 8(%rsi),%r8 - shlq $16,%r11 - movq %r8,%r9 - shlq $45,%r8 - shrq $19,%r9 - addq %r11,%rax - addq %r8,%rax - adcq $0,%r9 - movq %rax,56(%rdi) - movq %r9,%rax - movq 16(%rsi),%r10 - movq 24(%rsi),%r11 - shlq $10,%r10 - movq %r11,%r8 - shlq $39,%r11 - shrq $25,%r8 - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,64(%rdi) - movq %r8,%rax - movq 32(%rsi),%r9 - movq 40(%rsi),%r10 - movq 48(%rsi),%r11 - shlq $4,%r9 - shlq $33,%r10 - movq %r11,%r8 - shlq $62,%r11 - shrq $2,%r8 - addq %r9,%rax - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,72(%rdi) - movq %r8,%rax - movq 56(%rsi),%r9 - movq 64(%rsi),%r10 - shlq $27,%r9 - movq %r10,%r11 - shlq $56,%r10 - shrq $8,%r11 - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,80(%rdi) - movq %r11,%rax - movq 72(%rsi),%r8 - movq 80(%rsi),%r9 - shlq $21,%r8 - movq %r9,%r10 - shlq $50,%r9 - shrq $14,%r10 - addq %r8,%rax - addq %r9,%rax - adcq $0,%r10 - movq %rax,88(%rdi) - movq %r10,%rax - movq 88(%rsi),%r11 - movq 96(%rsi),%r8 - shlq $15,%r11 - movq %r8,%r9 - shlq $44,%r8 - shrq $20,%r9 - addq %r11,%rax - addq %r8,%rax - adcq $0,%r9 - movq %rax,96(%rdi) - movq %r9,%rax - movq 104(%rsi),%r10 - movq 112(%rsi),%r11 - shlq $9,%r10 - movq %r11,%r8 - shlq $38,%r11 - shrq $26,%r8 - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,104(%rdi) - movq %r8,%rax - movq 120(%rsi),%r9 - movq 128(%rsi),%r10 - movq 136(%rsi),%r11 - shlq $3,%r9 - shlq $32,%r10 - movq %r11,%r8 - shlq $61,%r11 - shrq $3,%r8 - addq %r9,%rax - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,112(%rdi) - movq %r8,%rax - movq 144(%rsi),%r9 - movq 152(%rsi),%r10 - shlq $26,%r9 - movq %r10,%r11 - shlq $55,%r10 - shrq $9,%r11 - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,120(%rdi) - movq %r11,%rax - .byte 0xf3,0xc3 -.cfi_endproc -.size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2 - -.globl rsaz_1024_norm2red_avx2 -.hidden rsaz_1024_norm2red_avx2 -.type rsaz_1024_norm2red_avx2,@function -.align 32 -rsaz_1024_norm2red_avx2: -.cfi_startproc - subq $-128,%rdi - movq (%rsi),%r8 - movl $0x1fffffff,%eax - movq 8(%rsi),%r9 - movq %r8,%r11 - shrq $0,%r11 - andq %rax,%r11 - movq %r11,-128(%rdi) - movq %r8,%r10 - shrq $29,%r10 - andq %rax,%r10 - movq %r10,-120(%rdi) - shrdq $58,%r9,%r8 - andq %rax,%r8 - movq %r8,-112(%rdi) - movq 16(%rsi),%r10 - movq %r9,%r8 - shrq $23,%r8 - andq %rax,%r8 - movq %r8,-104(%rdi) - shrdq $52,%r10,%r9 - andq %rax,%r9 - movq %r9,-96(%rdi) - movq 24(%rsi),%r11 - movq %r10,%r9 - shrq $17,%r9 - andq %rax,%r9 - movq %r9,-88(%rdi) - shrdq $46,%r11,%r10 - andq %rax,%r10 - movq %r10,-80(%rdi) - movq 32(%rsi),%r8 - movq %r11,%r10 - shrq $11,%r10 - andq %rax,%r10 - movq %r10,-72(%rdi) - shrdq $40,%r8,%r11 - andq %rax,%r11 - movq %r11,-64(%rdi) - movq 40(%rsi),%r9 - movq %r8,%r11 - shrq $5,%r11 - andq %rax,%r11 - movq %r11,-56(%rdi) - movq %r8,%r10 - shrq $34,%r10 - andq %rax,%r10 - movq %r10,-48(%rdi) - shrdq $63,%r9,%r8 - andq %rax,%r8 - movq %r8,-40(%rdi) - movq 48(%rsi),%r10 - movq %r9,%r8 - shrq $28,%r8 - andq %rax,%r8 - movq %r8,-32(%rdi) - shrdq $57,%r10,%r9 - andq %rax,%r9 - movq %r9,-24(%rdi) - movq 56(%rsi),%r11 - movq %r10,%r9 - shrq $22,%r9 - andq %rax,%r9 - movq %r9,-16(%rdi) - shrdq $51,%r11,%r10 - andq %rax,%r10 - movq %r10,-8(%rdi) - movq 64(%rsi),%r8 - movq %r11,%r10 - shrq $16,%r10 - andq %rax,%r10 - movq %r10,0(%rdi) - shrdq $45,%r8,%r11 - andq %rax,%r11 - movq %r11,8(%rdi) - movq 72(%rsi),%r9 - movq %r8,%r11 - shrq $10,%r11 - andq %rax,%r11 - movq %r11,16(%rdi) - shrdq $39,%r9,%r8 - andq %rax,%r8 - movq %r8,24(%rdi) - movq 80(%rsi),%r10 - movq %r9,%r8 - shrq $4,%r8 - andq %rax,%r8 - movq %r8,32(%rdi) - movq %r9,%r11 - shrq $33,%r11 - andq %rax,%r11 - movq %r11,40(%rdi) - shrdq $62,%r10,%r9 - andq %rax,%r9 - movq %r9,48(%rdi) - movq 88(%rsi),%r11 - movq %r10,%r9 - shrq $27,%r9 - andq %rax,%r9 - movq %r9,56(%rdi) - shrdq $56,%r11,%r10 - andq %rax,%r10 - movq %r10,64(%rdi) - movq 96(%rsi),%r8 - movq %r11,%r10 - shrq $21,%r10 - andq %rax,%r10 - movq %r10,72(%rdi) - shrdq $50,%r8,%r11 - andq %rax,%r11 - movq %r11,80(%rdi) - movq 104(%rsi),%r9 - movq %r8,%r11 - shrq $15,%r11 - andq %rax,%r11 - movq %r11,88(%rdi) - shrdq $44,%r9,%r8 - andq %rax,%r8 - movq %r8,96(%rdi) - movq 112(%rsi),%r10 - movq %r9,%r8 - shrq $9,%r8 - andq %rax,%r8 - movq %r8,104(%rdi) - shrdq $38,%r10,%r9 - andq %rax,%r9 - movq %r9,112(%rdi) - movq 120(%rsi),%r11 - movq %r10,%r9 - shrq $3,%r9 - andq %rax,%r9 - movq %r9,120(%rdi) - movq %r10,%r8 - shrq $32,%r8 - andq %rax,%r8 - movq %r8,128(%rdi) - shrdq $61,%r11,%r10 - andq %rax,%r10 - movq %r10,136(%rdi) - xorq %r8,%r8 - movq %r11,%r10 - shrq $26,%r10 - andq %rax,%r10 - movq %r10,144(%rdi) - shrdq $55,%r8,%r11 - andq %rax,%r11 - movq %r11,152(%rdi) - movq %r8,160(%rdi) - movq %r8,168(%rdi) - movq %r8,176(%rdi) - movq %r8,184(%rdi) - .byte 0xf3,0xc3 -.cfi_endproc -.size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2 -.globl rsaz_1024_scatter5_avx2 -.hidden rsaz_1024_scatter5_avx2 -.type rsaz_1024_scatter5_avx2,@function -.align 32 -rsaz_1024_scatter5_avx2: -.cfi_startproc - vzeroupper - vmovdqu .Lscatter_permd(%rip),%ymm5 - shll $4,%edx - leaq (%rdi,%rdx,1),%rdi - movl $9,%eax - jmp .Loop_scatter_1024 - -.align 32 -.Loop_scatter_1024: - vmovdqu (%rsi),%ymm0 - leaq 32(%rsi),%rsi - vpermd %ymm0,%ymm5,%ymm0 - vmovdqu %xmm0,(%rdi) - leaq 512(%rdi),%rdi - decl %eax - jnz .Loop_scatter_1024 - - vzeroupper - .byte 0xf3,0xc3 -.cfi_endproc -.size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2 - -.globl rsaz_1024_gather5_avx2 -.hidden rsaz_1024_gather5_avx2 -.type rsaz_1024_gather5_avx2,@function -.align 32 -rsaz_1024_gather5_avx2: -.cfi_startproc - vzeroupper - movq %rsp,%r11 -.cfi_def_cfa_register %r11 - leaq -256(%rsp),%rsp - andq $-32,%rsp - leaq .Linc(%rip),%r10 - leaq -128(%rsp),%rax - - vmovd %edx,%xmm4 - vmovdqa (%r10),%ymm0 - vmovdqa 32(%r10),%ymm1 - vmovdqa 64(%r10),%ymm5 - vpbroadcastd %xmm4,%ymm4 - - vpaddd %ymm5,%ymm0,%ymm2 - vpcmpeqd %ymm4,%ymm0,%ymm0 - vpaddd %ymm5,%ymm1,%ymm3 - vpcmpeqd %ymm4,%ymm1,%ymm1 - vmovdqa %ymm0,0+128(%rax) - vpaddd %ymm5,%ymm2,%ymm0 - vpcmpeqd %ymm4,%ymm2,%ymm2 - vmovdqa %ymm1,32+128(%rax) - vpaddd %ymm5,%ymm3,%ymm1 - vpcmpeqd %ymm4,%ymm3,%ymm3 - vmovdqa %ymm2,64+128(%rax) - vpaddd %ymm5,%ymm0,%ymm2 - vpcmpeqd %ymm4,%ymm0,%ymm0 - vmovdqa %ymm3,96+128(%rax) - vpaddd %ymm5,%ymm1,%ymm3 - vpcmpeqd %ymm4,%ymm1,%ymm1 - vmovdqa %ymm0,128+128(%rax) - vpaddd %ymm5,%ymm2,%ymm8 - vpcmpeqd %ymm4,%ymm2,%ymm2 - vmovdqa %ymm1,160+128(%rax) - vpaddd %ymm5,%ymm3,%ymm9 - vpcmpeqd %ymm4,%ymm3,%ymm3 - vmovdqa %ymm2,192+128(%rax) - vpaddd %ymm5,%ymm8,%ymm10 - vpcmpeqd %ymm4,%ymm8,%ymm8 - vmovdqa %ymm3,224+128(%rax) - vpaddd %ymm5,%ymm9,%ymm11 - vpcmpeqd %ymm4,%ymm9,%ymm9 - vpaddd %ymm5,%ymm10,%ymm12 - vpcmpeqd %ymm4,%ymm10,%ymm10 - vpaddd %ymm5,%ymm11,%ymm13 - vpcmpeqd %ymm4,%ymm11,%ymm11 - vpaddd %ymm5,%ymm12,%ymm14 - vpcmpeqd %ymm4,%ymm12,%ymm12 - vpaddd %ymm5,%ymm13,%ymm15 - vpcmpeqd %ymm4,%ymm13,%ymm13 - vpcmpeqd %ymm4,%ymm14,%ymm14 - vpcmpeqd %ymm4,%ymm15,%ymm15 - - vmovdqa -32(%r10),%ymm7 - leaq 128(%rsi),%rsi - movl $9,%edx - -.Loop_gather_1024: - vmovdqa 0-128(%rsi),%ymm0 - vmovdqa 32-128(%rsi),%ymm1 - vmovdqa 64-128(%rsi),%ymm2 - vmovdqa 96-128(%rsi),%ymm3 - vpand 0+128(%rax),%ymm0,%ymm0 - vpand 32+128(%rax),%ymm1,%ymm1 - vpand 64+128(%rax),%ymm2,%ymm2 - vpor %ymm0,%ymm1,%ymm4 - vpand 96+128(%rax),%ymm3,%ymm3 - vmovdqa 128-128(%rsi),%ymm0 - vmovdqa 160-128(%rsi),%ymm1 - vpor %ymm2,%ymm3,%ymm5 - vmovdqa 192-128(%rsi),%ymm2 - vmovdqa 224-128(%rsi),%ymm3 - vpand 128+128(%rax),%ymm0,%ymm0 - vpand 160+128(%rax),%ymm1,%ymm1 - vpand 192+128(%rax),%ymm2,%ymm2 - vpor %ymm0,%ymm4,%ymm4 - vpand 224+128(%rax),%ymm3,%ymm3 - vpand 256-128(%rsi),%ymm8,%ymm0 - vpor %ymm1,%ymm5,%ymm5 - vpand 288-128(%rsi),%ymm9,%ymm1 - vpor %ymm2,%ymm4,%ymm4 - vpand 320-128(%rsi),%ymm10,%ymm2 - vpor %ymm3,%ymm5,%ymm5 - vpand 352-128(%rsi),%ymm11,%ymm3 - vpor %ymm0,%ymm4,%ymm4 - vpand 384-128(%rsi),%ymm12,%ymm0 - vpor %ymm1,%ymm5,%ymm5 - vpand 416-128(%rsi),%ymm13,%ymm1 - vpor %ymm2,%ymm4,%ymm4 - vpand 448-128(%rsi),%ymm14,%ymm2 - vpor %ymm3,%ymm5,%ymm5 - vpand 480-128(%rsi),%ymm15,%ymm3 - leaq 512(%rsi),%rsi - vpor %ymm0,%ymm4,%ymm4 - vpor %ymm1,%ymm5,%ymm5 - vpor %ymm2,%ymm4,%ymm4 - vpor %ymm3,%ymm5,%ymm5 - - vpor %ymm5,%ymm4,%ymm4 - vextracti128 $1,%ymm4,%xmm5 - vpor %xmm4,%xmm5,%xmm5 - vpermd %ymm5,%ymm7,%ymm5 - vmovdqu %ymm5,(%rdi) - leaq 32(%rdi),%rdi - decl %edx - jnz .Loop_gather_1024 - - vpxor %ymm0,%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - vzeroupper - leaq (%r11),%rsp -.cfi_def_cfa_register %rsp - .byte 0xf3,0xc3 -.cfi_endproc -.LSEH_end_rsaz_1024_gather5: -.size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2 -.align 64 -.Land_mask: -.quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff -.Lscatter_permd: -.long 0,2,4,6,7,7,7,7 -.Lgather_permd: -.long 0,7,1,7,2,7,3,7 -.Linc: -.long 0,0,0,0, 1,1,1,1 -.long 2,2,2,2, 3,3,3,3 -.long 4,4,4,4, 4,4,4,4 -.align 64 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha1-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha1-x86_64.S deleted file mode 100644 index a4ce81ff91..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha1-x86_64.S +++ /dev/null @@ -1,3601 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - -.globl sha1_block_data_order -.hidden sha1_block_data_order -.type sha1_block_data_order,@function -.align 16 -sha1_block_data_order: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%r10 - movl 0(%r10),%r9d - movl 4(%r10),%r8d - movl 8(%r10),%r10d - testl $512,%r8d - jz .Lialu - andl $268435456,%r8d - andl $1073741824,%r9d - orl %r9d,%r8d - cmpl $1342177280,%r8d - je _avx_shortcut - jmp _ssse3_shortcut - -.align 16 -.Lialu: - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - movq %rdi,%r8 - subq $72,%rsp - movq %rsi,%r9 - andq $-64,%rsp - movq %rdx,%r10 - movq %rax,64(%rsp) -.cfi_escape 0x0f,0x06,0x77,0xc0,0x00,0x06,0x23,0x08 -.Lprologue: - - movl 0(%r8),%esi - movl 4(%r8),%edi - movl 8(%r8),%r11d - movl 12(%r8),%r12d - movl 16(%r8),%r13d - jmp .Lloop - -.align 16 -.Lloop: - movl 0(%r9),%edx - bswapl %edx - movl 4(%r9),%ebp - movl %r12d,%eax - movl %edx,0(%rsp) - movl %esi,%ecx - bswapl %ebp - xorl %r11d,%eax - roll $5,%ecx - andl %edi,%eax - leal 1518500249(%rdx,%r13,1),%r13d - addl %ecx,%r13d - xorl %r12d,%eax - roll $30,%edi - addl %eax,%r13d - movl 8(%r9),%r14d - movl %r11d,%eax - movl %ebp,4(%rsp) - movl %r13d,%ecx - bswapl %r14d - xorl %edi,%eax - roll $5,%ecx - andl %esi,%eax - leal 1518500249(%rbp,%r12,1),%r12d - addl %ecx,%r12d - xorl %r11d,%eax - roll $30,%esi - addl %eax,%r12d - movl 12(%r9),%edx - movl %edi,%eax - movl %r14d,8(%rsp) - movl %r12d,%ecx - bswapl %edx - xorl %esi,%eax - roll $5,%ecx - andl %r13d,%eax - leal 1518500249(%r14,%r11,1),%r11d - addl %ecx,%r11d - xorl %edi,%eax - roll $30,%r13d - addl %eax,%r11d - movl 16(%r9),%ebp - movl %esi,%eax - movl %edx,12(%rsp) - movl %r11d,%ecx - bswapl %ebp - xorl %r13d,%eax - roll $5,%ecx - andl %r12d,%eax - leal 1518500249(%rdx,%rdi,1),%edi - addl %ecx,%edi - xorl %esi,%eax - roll $30,%r12d - addl %eax,%edi - movl 20(%r9),%r14d - movl %r13d,%eax - movl %ebp,16(%rsp) - movl %edi,%ecx - bswapl %r14d - xorl %r12d,%eax - roll $5,%ecx - andl %r11d,%eax - leal 1518500249(%rbp,%rsi,1),%esi - addl %ecx,%esi - xorl %r13d,%eax - roll $30,%r11d - addl %eax,%esi - movl 24(%r9),%edx - movl %r12d,%eax - movl %r14d,20(%rsp) - movl %esi,%ecx - bswapl %edx - xorl %r11d,%eax - roll $5,%ecx - andl %edi,%eax - leal 1518500249(%r14,%r13,1),%r13d - addl %ecx,%r13d - xorl %r12d,%eax - roll $30,%edi - addl %eax,%r13d - movl 28(%r9),%ebp - movl %r11d,%eax - movl %edx,24(%rsp) - movl %r13d,%ecx - bswapl %ebp - xorl %edi,%eax - roll $5,%ecx - andl %esi,%eax - leal 1518500249(%rdx,%r12,1),%r12d - addl %ecx,%r12d - xorl %r11d,%eax - roll $30,%esi - addl %eax,%r12d - movl 32(%r9),%r14d - movl %edi,%eax - movl %ebp,28(%rsp) - movl %r12d,%ecx - bswapl %r14d - xorl %esi,%eax - roll $5,%ecx - andl %r13d,%eax - leal 1518500249(%rbp,%r11,1),%r11d - addl %ecx,%r11d - xorl %edi,%eax - roll $30,%r13d - addl %eax,%r11d - movl 36(%r9),%edx - movl %esi,%eax - movl %r14d,32(%rsp) - movl %r11d,%ecx - bswapl %edx - xorl %r13d,%eax - roll $5,%ecx - andl %r12d,%eax - leal 1518500249(%r14,%rdi,1),%edi - addl %ecx,%edi - xorl %esi,%eax - roll $30,%r12d - addl %eax,%edi - movl 40(%r9),%ebp - movl %r13d,%eax - movl %edx,36(%rsp) - movl %edi,%ecx - bswapl %ebp - xorl %r12d,%eax - roll $5,%ecx - andl %r11d,%eax - leal 1518500249(%rdx,%rsi,1),%esi - addl %ecx,%esi - xorl %r13d,%eax - roll $30,%r11d - addl %eax,%esi - movl 44(%r9),%r14d - movl %r12d,%eax - movl %ebp,40(%rsp) - movl %esi,%ecx - bswapl %r14d - xorl %r11d,%eax - roll $5,%ecx - andl %edi,%eax - leal 1518500249(%rbp,%r13,1),%r13d - addl %ecx,%r13d - xorl %r12d,%eax - roll $30,%edi - addl %eax,%r13d - movl 48(%r9),%edx - movl %r11d,%eax - movl %r14d,44(%rsp) - movl %r13d,%ecx - bswapl %edx - xorl %edi,%eax - roll $5,%ecx - andl %esi,%eax - leal 1518500249(%r14,%r12,1),%r12d - addl %ecx,%r12d - xorl %r11d,%eax - roll $30,%esi - addl %eax,%r12d - movl 52(%r9),%ebp - movl %edi,%eax - movl %edx,48(%rsp) - movl %r12d,%ecx - bswapl %ebp - xorl %esi,%eax - roll $5,%ecx - andl %r13d,%eax - leal 1518500249(%rdx,%r11,1),%r11d - addl %ecx,%r11d - xorl %edi,%eax - roll $30,%r13d - addl %eax,%r11d - movl 56(%r9),%r14d - movl %esi,%eax - movl %ebp,52(%rsp) - movl %r11d,%ecx - bswapl %r14d - xorl %r13d,%eax - roll $5,%ecx - andl %r12d,%eax - leal 1518500249(%rbp,%rdi,1),%edi - addl %ecx,%edi - xorl %esi,%eax - roll $30,%r12d - addl %eax,%edi - movl 60(%r9),%edx - movl %r13d,%eax - movl %r14d,56(%rsp) - movl %edi,%ecx - bswapl %edx - xorl %r12d,%eax - roll $5,%ecx - andl %r11d,%eax - leal 1518500249(%r14,%rsi,1),%esi - addl %ecx,%esi - xorl %r13d,%eax - roll $30,%r11d - addl %eax,%esi - xorl 0(%rsp),%ebp - movl %r12d,%eax - movl %edx,60(%rsp) - movl %esi,%ecx - xorl 8(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 32(%rsp),%ebp - andl %edi,%eax - leal 1518500249(%rdx,%r13,1),%r13d - roll $30,%edi - xorl %r12d,%eax - addl %ecx,%r13d - roll $1,%ebp - addl %eax,%r13d - xorl 4(%rsp),%r14d - movl %r11d,%eax - movl %ebp,0(%rsp) - movl %r13d,%ecx - xorl 12(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 36(%rsp),%r14d - andl %esi,%eax - leal 1518500249(%rbp,%r12,1),%r12d - roll $30,%esi - xorl %r11d,%eax - addl %ecx,%r12d - roll $1,%r14d - addl %eax,%r12d - xorl 8(%rsp),%edx - movl %edi,%eax - movl %r14d,4(%rsp) - movl %r12d,%ecx - xorl 16(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 40(%rsp),%edx - andl %r13d,%eax - leal 1518500249(%r14,%r11,1),%r11d - roll $30,%r13d - xorl %edi,%eax - addl %ecx,%r11d - roll $1,%edx - addl %eax,%r11d - xorl 12(%rsp),%ebp - movl %esi,%eax - movl %edx,8(%rsp) - movl %r11d,%ecx - xorl 20(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 44(%rsp),%ebp - andl %r12d,%eax - leal 1518500249(%rdx,%rdi,1),%edi - roll $30,%r12d - xorl %esi,%eax - addl %ecx,%edi - roll $1,%ebp - addl %eax,%edi - xorl 16(%rsp),%r14d - movl %r13d,%eax - movl %ebp,12(%rsp) - movl %edi,%ecx - xorl 24(%rsp),%r14d - xorl %r12d,%eax - roll $5,%ecx - xorl 48(%rsp),%r14d - andl %r11d,%eax - leal 1518500249(%rbp,%rsi,1),%esi - roll $30,%r11d - xorl %r13d,%eax - addl %ecx,%esi - roll $1,%r14d - addl %eax,%esi - xorl 20(%rsp),%edx - movl %edi,%eax - movl %r14d,16(%rsp) - movl %esi,%ecx - xorl 28(%rsp),%edx - xorl %r12d,%eax - roll $5,%ecx - xorl 52(%rsp),%edx - leal 1859775393(%r14,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%edx - xorl 24(%rsp),%ebp - movl %esi,%eax - movl %edx,20(%rsp) - movl %r13d,%ecx - xorl 32(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 56(%rsp),%ebp - leal 1859775393(%rdx,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%ebp - xorl 28(%rsp),%r14d - movl %r13d,%eax - movl %ebp,24(%rsp) - movl %r12d,%ecx - xorl 36(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 60(%rsp),%r14d - leal 1859775393(%rbp,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%r14d - xorl 32(%rsp),%edx - movl %r12d,%eax - movl %r14d,28(%rsp) - movl %r11d,%ecx - xorl 40(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 0(%rsp),%edx - leal 1859775393(%r14,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%edx - xorl 36(%rsp),%ebp - movl %r11d,%eax - movl %edx,32(%rsp) - movl %edi,%ecx - xorl 44(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 4(%rsp),%ebp - leal 1859775393(%rdx,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%ebp - xorl 40(%rsp),%r14d - movl %edi,%eax - movl %ebp,36(%rsp) - movl %esi,%ecx - xorl 48(%rsp),%r14d - xorl %r12d,%eax - roll $5,%ecx - xorl 8(%rsp),%r14d - leal 1859775393(%rbp,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%r14d - xorl 44(%rsp),%edx - movl %esi,%eax - movl %r14d,40(%rsp) - movl %r13d,%ecx - xorl 52(%rsp),%edx - xorl %r11d,%eax - roll $5,%ecx - xorl 12(%rsp),%edx - leal 1859775393(%r14,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%edx - xorl 48(%rsp),%ebp - movl %r13d,%eax - movl %edx,44(%rsp) - movl %r12d,%ecx - xorl 56(%rsp),%ebp - xorl %edi,%eax - roll $5,%ecx - xorl 16(%rsp),%ebp - leal 1859775393(%rdx,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%ebp - xorl 52(%rsp),%r14d - movl %r12d,%eax - movl %ebp,48(%rsp) - movl %r11d,%ecx - xorl 60(%rsp),%r14d - xorl %esi,%eax - roll $5,%ecx - xorl 20(%rsp),%r14d - leal 1859775393(%rbp,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%r14d - xorl 56(%rsp),%edx - movl %r11d,%eax - movl %r14d,52(%rsp) - movl %edi,%ecx - xorl 0(%rsp),%edx - xorl %r13d,%eax - roll $5,%ecx - xorl 24(%rsp),%edx - leal 1859775393(%r14,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%edx - xorl 60(%rsp),%ebp - movl %edi,%eax - movl %edx,56(%rsp) - movl %esi,%ecx - xorl 4(%rsp),%ebp - xorl %r12d,%eax - roll $5,%ecx - xorl 28(%rsp),%ebp - leal 1859775393(%rdx,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%ebp - xorl 0(%rsp),%r14d - movl %esi,%eax - movl %ebp,60(%rsp) - movl %r13d,%ecx - xorl 8(%rsp),%r14d - xorl %r11d,%eax - roll $5,%ecx - xorl 32(%rsp),%r14d - leal 1859775393(%rbp,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%r14d - xorl 4(%rsp),%edx - movl %r13d,%eax - movl %r14d,0(%rsp) - movl %r12d,%ecx - xorl 12(%rsp),%edx - xorl %edi,%eax - roll $5,%ecx - xorl 36(%rsp),%edx - leal 1859775393(%r14,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%edx - xorl 8(%rsp),%ebp - movl %r12d,%eax - movl %edx,4(%rsp) - movl %r11d,%ecx - xorl 16(%rsp),%ebp - xorl %esi,%eax - roll $5,%ecx - xorl 40(%rsp),%ebp - leal 1859775393(%rdx,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%ebp - xorl 12(%rsp),%r14d - movl %r11d,%eax - movl %ebp,8(%rsp) - movl %edi,%ecx - xorl 20(%rsp),%r14d - xorl %r13d,%eax - roll $5,%ecx - xorl 44(%rsp),%r14d - leal 1859775393(%rbp,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%r14d - xorl 16(%rsp),%edx - movl %edi,%eax - movl %r14d,12(%rsp) - movl %esi,%ecx - xorl 24(%rsp),%edx - xorl %r12d,%eax - roll $5,%ecx - xorl 48(%rsp),%edx - leal 1859775393(%r14,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%edx - xorl 20(%rsp),%ebp - movl %esi,%eax - movl %edx,16(%rsp) - movl %r13d,%ecx - xorl 28(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 52(%rsp),%ebp - leal 1859775393(%rdx,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%ebp - xorl 24(%rsp),%r14d - movl %r13d,%eax - movl %ebp,20(%rsp) - movl %r12d,%ecx - xorl 32(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 56(%rsp),%r14d - leal 1859775393(%rbp,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%r14d - xorl 28(%rsp),%edx - movl %r12d,%eax - movl %r14d,24(%rsp) - movl %r11d,%ecx - xorl 36(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 60(%rsp),%edx - leal 1859775393(%r14,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%edx - xorl 32(%rsp),%ebp - movl %r11d,%eax - movl %edx,28(%rsp) - movl %edi,%ecx - xorl 40(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 0(%rsp),%ebp - leal 1859775393(%rdx,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%ebp - xorl 36(%rsp),%r14d - movl %r12d,%eax - movl %ebp,32(%rsp) - movl %r12d,%ebx - xorl 44(%rsp),%r14d - andl %r11d,%eax - movl %esi,%ecx - xorl 4(%rsp),%r14d - leal -1894007588(%rbp,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%r14d - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 40(%rsp),%edx - movl %r11d,%eax - movl %r14d,36(%rsp) - movl %r11d,%ebx - xorl 48(%rsp),%edx - andl %edi,%eax - movl %r13d,%ecx - xorl 8(%rsp),%edx - leal -1894007588(%r14,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%edx - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 44(%rsp),%ebp - movl %edi,%eax - movl %edx,40(%rsp) - movl %edi,%ebx - xorl 52(%rsp),%ebp - andl %esi,%eax - movl %r12d,%ecx - xorl 12(%rsp),%ebp - leal -1894007588(%rdx,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%ebp - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 48(%rsp),%r14d - movl %esi,%eax - movl %ebp,44(%rsp) - movl %esi,%ebx - xorl 56(%rsp),%r14d - andl %r13d,%eax - movl %r11d,%ecx - xorl 16(%rsp),%r14d - leal -1894007588(%rbp,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%r14d - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 52(%rsp),%edx - movl %r13d,%eax - movl %r14d,48(%rsp) - movl %r13d,%ebx - xorl 60(%rsp),%edx - andl %r12d,%eax - movl %edi,%ecx - xorl 20(%rsp),%edx - leal -1894007588(%r14,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%edx - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 56(%rsp),%ebp - movl %r12d,%eax - movl %edx,52(%rsp) - movl %r12d,%ebx - xorl 0(%rsp),%ebp - andl %r11d,%eax - movl %esi,%ecx - xorl 24(%rsp),%ebp - leal -1894007588(%rdx,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%ebp - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 60(%rsp),%r14d - movl %r11d,%eax - movl %ebp,56(%rsp) - movl %r11d,%ebx - xorl 4(%rsp),%r14d - andl %edi,%eax - movl %r13d,%ecx - xorl 28(%rsp),%r14d - leal -1894007588(%rbp,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%r14d - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 0(%rsp),%edx - movl %edi,%eax - movl %r14d,60(%rsp) - movl %edi,%ebx - xorl 8(%rsp),%edx - andl %esi,%eax - movl %r12d,%ecx - xorl 32(%rsp),%edx - leal -1894007588(%r14,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%edx - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 4(%rsp),%ebp - movl %esi,%eax - movl %edx,0(%rsp) - movl %esi,%ebx - xorl 12(%rsp),%ebp - andl %r13d,%eax - movl %r11d,%ecx - xorl 36(%rsp),%ebp - leal -1894007588(%rdx,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%ebp - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 8(%rsp),%r14d - movl %r13d,%eax - movl %ebp,4(%rsp) - movl %r13d,%ebx - xorl 16(%rsp),%r14d - andl %r12d,%eax - movl %edi,%ecx - xorl 40(%rsp),%r14d - leal -1894007588(%rbp,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%r14d - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 12(%rsp),%edx - movl %r12d,%eax - movl %r14d,8(%rsp) - movl %r12d,%ebx - xorl 20(%rsp),%edx - andl %r11d,%eax - movl %esi,%ecx - xorl 44(%rsp),%edx - leal -1894007588(%r14,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%edx - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 16(%rsp),%ebp - movl %r11d,%eax - movl %edx,12(%rsp) - movl %r11d,%ebx - xorl 24(%rsp),%ebp - andl %edi,%eax - movl %r13d,%ecx - xorl 48(%rsp),%ebp - leal -1894007588(%rdx,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%ebp - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 20(%rsp),%r14d - movl %edi,%eax - movl %ebp,16(%rsp) - movl %edi,%ebx - xorl 28(%rsp),%r14d - andl %esi,%eax - movl %r12d,%ecx - xorl 52(%rsp),%r14d - leal -1894007588(%rbp,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%r14d - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 24(%rsp),%edx - movl %esi,%eax - movl %r14d,20(%rsp) - movl %esi,%ebx - xorl 32(%rsp),%edx - andl %r13d,%eax - movl %r11d,%ecx - xorl 56(%rsp),%edx - leal -1894007588(%r14,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%edx - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 28(%rsp),%ebp - movl %r13d,%eax - movl %edx,24(%rsp) - movl %r13d,%ebx - xorl 36(%rsp),%ebp - andl %r12d,%eax - movl %edi,%ecx - xorl 60(%rsp),%ebp - leal -1894007588(%rdx,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%ebp - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 32(%rsp),%r14d - movl %r12d,%eax - movl %ebp,28(%rsp) - movl %r12d,%ebx - xorl 40(%rsp),%r14d - andl %r11d,%eax - movl %esi,%ecx - xorl 0(%rsp),%r14d - leal -1894007588(%rbp,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%r14d - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 36(%rsp),%edx - movl %r11d,%eax - movl %r14d,32(%rsp) - movl %r11d,%ebx - xorl 44(%rsp),%edx - andl %edi,%eax - movl %r13d,%ecx - xorl 4(%rsp),%edx - leal -1894007588(%r14,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%edx - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 40(%rsp),%ebp - movl %edi,%eax - movl %edx,36(%rsp) - movl %edi,%ebx - xorl 48(%rsp),%ebp - andl %esi,%eax - movl %r12d,%ecx - xorl 8(%rsp),%ebp - leal -1894007588(%rdx,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%ebp - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 44(%rsp),%r14d - movl %esi,%eax - movl %ebp,40(%rsp) - movl %esi,%ebx - xorl 52(%rsp),%r14d - andl %r13d,%eax - movl %r11d,%ecx - xorl 12(%rsp),%r14d - leal -1894007588(%rbp,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%r14d - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 48(%rsp),%edx - movl %r13d,%eax - movl %r14d,44(%rsp) - movl %r13d,%ebx - xorl 56(%rsp),%edx - andl %r12d,%eax - movl %edi,%ecx - xorl 16(%rsp),%edx - leal -1894007588(%r14,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%edx - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 52(%rsp),%ebp - movl %edi,%eax - movl %edx,48(%rsp) - movl %esi,%ecx - xorl 60(%rsp),%ebp - xorl %r12d,%eax - roll $5,%ecx - xorl 20(%rsp),%ebp - leal -899497514(%rdx,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%ebp - xorl 56(%rsp),%r14d - movl %esi,%eax - movl %ebp,52(%rsp) - movl %r13d,%ecx - xorl 0(%rsp),%r14d - xorl %r11d,%eax - roll $5,%ecx - xorl 24(%rsp),%r14d - leal -899497514(%rbp,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%r14d - xorl 60(%rsp),%edx - movl %r13d,%eax - movl %r14d,56(%rsp) - movl %r12d,%ecx - xorl 4(%rsp),%edx - xorl %edi,%eax - roll $5,%ecx - xorl 28(%rsp),%edx - leal -899497514(%r14,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%edx - xorl 0(%rsp),%ebp - movl %r12d,%eax - movl %edx,60(%rsp) - movl %r11d,%ecx - xorl 8(%rsp),%ebp - xorl %esi,%eax - roll $5,%ecx - xorl 32(%rsp),%ebp - leal -899497514(%rdx,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%ebp - xorl 4(%rsp),%r14d - movl %r11d,%eax - movl %ebp,0(%rsp) - movl %edi,%ecx - xorl 12(%rsp),%r14d - xorl %r13d,%eax - roll $5,%ecx - xorl 36(%rsp),%r14d - leal -899497514(%rbp,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%r14d - xorl 8(%rsp),%edx - movl %edi,%eax - movl %r14d,4(%rsp) - movl %esi,%ecx - xorl 16(%rsp),%edx - xorl %r12d,%eax - roll $5,%ecx - xorl 40(%rsp),%edx - leal -899497514(%r14,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%edx - xorl 12(%rsp),%ebp - movl %esi,%eax - movl %edx,8(%rsp) - movl %r13d,%ecx - xorl 20(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 44(%rsp),%ebp - leal -899497514(%rdx,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%ebp - xorl 16(%rsp),%r14d - movl %r13d,%eax - movl %ebp,12(%rsp) - movl %r12d,%ecx - xorl 24(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 48(%rsp),%r14d - leal -899497514(%rbp,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%r14d - xorl 20(%rsp),%edx - movl %r12d,%eax - movl %r14d,16(%rsp) - movl %r11d,%ecx - xorl 28(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 52(%rsp),%edx - leal -899497514(%r14,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%edx - xorl 24(%rsp),%ebp - movl %r11d,%eax - movl %edx,20(%rsp) - movl %edi,%ecx - xorl 32(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 56(%rsp),%ebp - leal -899497514(%rdx,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%ebp - xorl 28(%rsp),%r14d - movl %edi,%eax - movl %ebp,24(%rsp) - movl %esi,%ecx - xorl 36(%rsp),%r14d - xorl %r12d,%eax - roll $5,%ecx - xorl 60(%rsp),%r14d - leal -899497514(%rbp,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%r14d - xorl 32(%rsp),%edx - movl %esi,%eax - movl %r14d,28(%rsp) - movl %r13d,%ecx - xorl 40(%rsp),%edx - xorl %r11d,%eax - roll $5,%ecx - xorl 0(%rsp),%edx - leal -899497514(%r14,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%edx - xorl 36(%rsp),%ebp - movl %r13d,%eax - - movl %r12d,%ecx - xorl 44(%rsp),%ebp - xorl %edi,%eax - roll $5,%ecx - xorl 4(%rsp),%ebp - leal -899497514(%rdx,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%ebp - xorl 40(%rsp),%r14d - movl %r12d,%eax - - movl %r11d,%ecx - xorl 48(%rsp),%r14d - xorl %esi,%eax - roll $5,%ecx - xorl 8(%rsp),%r14d - leal -899497514(%rbp,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%r14d - xorl 44(%rsp),%edx - movl %r11d,%eax - - movl %edi,%ecx - xorl 52(%rsp),%edx - xorl %r13d,%eax - roll $5,%ecx - xorl 12(%rsp),%edx - leal -899497514(%r14,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%edx - xorl 48(%rsp),%ebp - movl %edi,%eax - - movl %esi,%ecx - xorl 56(%rsp),%ebp - xorl %r12d,%eax - roll $5,%ecx - xorl 16(%rsp),%ebp - leal -899497514(%rdx,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%ebp - xorl 52(%rsp),%r14d - movl %esi,%eax - - movl %r13d,%ecx - xorl 60(%rsp),%r14d - xorl %r11d,%eax - roll $5,%ecx - xorl 20(%rsp),%r14d - leal -899497514(%rbp,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%r14d - xorl 56(%rsp),%edx - movl %r13d,%eax - - movl %r12d,%ecx - xorl 0(%rsp),%edx - xorl %edi,%eax - roll $5,%ecx - xorl 24(%rsp),%edx - leal -899497514(%r14,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%edx - xorl 60(%rsp),%ebp - movl %r12d,%eax - - movl %r11d,%ecx - xorl 4(%rsp),%ebp - xorl %esi,%eax - roll $5,%ecx - xorl 28(%rsp),%ebp - leal -899497514(%rdx,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%ebp - movl %r11d,%eax - movl %edi,%ecx - xorl %r13d,%eax - leal -899497514(%rbp,%rsi,1),%esi - roll $5,%ecx - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - addl 0(%r8),%esi - addl 4(%r8),%edi - addl 8(%r8),%r11d - addl 12(%r8),%r12d - addl 16(%r8),%r13d - movl %esi,0(%r8) - movl %edi,4(%r8) - movl %r11d,8(%r8) - movl %r12d,12(%r8) - movl %r13d,16(%r8) - - subq $1,%r10 - leaq 64(%r9),%r9 - jnz .Lloop - - movq 64(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha1_block_data_order,.-sha1_block_data_order -.type sha1_block_data_order_ssse3,@function -.align 16 -sha1_block_data_order_ssse3: -_ssse3_shortcut: -.cfi_startproc - movq %rsp,%r11 -.cfi_def_cfa_register %r11 - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - leaq -64(%rsp),%rsp - andq $-64,%rsp - movq %rdi,%r8 - movq %rsi,%r9 - movq %rdx,%r10 - - shlq $6,%r10 - addq %r9,%r10 - leaq K_XX_XX+64(%rip),%r14 - - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movl %ebx,%esi - movl 16(%r8),%ebp - movl %ecx,%edi - xorl %edx,%edi - andl %edi,%esi - - movdqa 64(%r14),%xmm6 - movdqa -64(%r14),%xmm9 - movdqu 0(%r9),%xmm0 - movdqu 16(%r9),%xmm1 - movdqu 32(%r9),%xmm2 - movdqu 48(%r9),%xmm3 -.byte 102,15,56,0,198 -.byte 102,15,56,0,206 -.byte 102,15,56,0,214 - addq $64,%r9 - paddd %xmm9,%xmm0 -.byte 102,15,56,0,222 - paddd %xmm9,%xmm1 - paddd %xmm9,%xmm2 - movdqa %xmm0,0(%rsp) - psubd %xmm9,%xmm0 - movdqa %xmm1,16(%rsp) - psubd %xmm9,%xmm1 - movdqa %xmm2,32(%rsp) - psubd %xmm9,%xmm2 - jmp .Loop_ssse3 -.align 16 -.Loop_ssse3: - rorl $2,%ebx - pshufd $238,%xmm0,%xmm4 - xorl %edx,%esi - movdqa %xmm3,%xmm8 - paddd %xmm3,%xmm9 - movl %eax,%edi - addl 0(%rsp),%ebp - punpcklqdq %xmm1,%xmm4 - xorl %ecx,%ebx - roll $5,%eax - addl %esi,%ebp - psrldq $4,%xmm8 - andl %ebx,%edi - xorl %ecx,%ebx - pxor %xmm0,%xmm4 - addl %eax,%ebp - rorl $7,%eax - pxor %xmm2,%xmm8 - xorl %ecx,%edi - movl %ebp,%esi - addl 4(%rsp),%edx - pxor %xmm8,%xmm4 - xorl %ebx,%eax - roll $5,%ebp - movdqa %xmm9,48(%rsp) - addl %edi,%edx - andl %eax,%esi - movdqa %xmm4,%xmm10 - xorl %ebx,%eax - addl %ebp,%edx - rorl $7,%ebp - movdqa %xmm4,%xmm8 - xorl %ebx,%esi - pslldq $12,%xmm10 - paddd %xmm4,%xmm4 - movl %edx,%edi - addl 8(%rsp),%ecx - psrld $31,%xmm8 - xorl %eax,%ebp - roll $5,%edx - addl %esi,%ecx - movdqa %xmm10,%xmm9 - andl %ebp,%edi - xorl %eax,%ebp - psrld $30,%xmm10 - addl %edx,%ecx - rorl $7,%edx - por %xmm8,%xmm4 - xorl %eax,%edi - movl %ecx,%esi - addl 12(%rsp),%ebx - pslld $2,%xmm9 - pxor %xmm10,%xmm4 - xorl %ebp,%edx - movdqa -64(%r14),%xmm10 - roll $5,%ecx - addl %edi,%ebx - andl %edx,%esi - pxor %xmm9,%xmm4 - xorl %ebp,%edx - addl %ecx,%ebx - rorl $7,%ecx - pshufd $238,%xmm1,%xmm5 - xorl %ebp,%esi - movdqa %xmm4,%xmm9 - paddd %xmm4,%xmm10 - movl %ebx,%edi - addl 16(%rsp),%eax - punpcklqdq %xmm2,%xmm5 - xorl %edx,%ecx - roll $5,%ebx - addl %esi,%eax - psrldq $4,%xmm9 - andl %ecx,%edi - xorl %edx,%ecx - pxor %xmm1,%xmm5 - addl %ebx,%eax - rorl $7,%ebx - pxor %xmm3,%xmm9 - xorl %edx,%edi - movl %eax,%esi - addl 20(%rsp),%ebp - pxor %xmm9,%xmm5 - xorl %ecx,%ebx - roll $5,%eax - movdqa %xmm10,0(%rsp) - addl %edi,%ebp - andl %ebx,%esi - movdqa %xmm5,%xmm8 - xorl %ecx,%ebx - addl %eax,%ebp - rorl $7,%eax - movdqa %xmm5,%xmm9 - xorl %ecx,%esi - pslldq $12,%xmm8 - paddd %xmm5,%xmm5 - movl %ebp,%edi - addl 24(%rsp),%edx - psrld $31,%xmm9 - xorl %ebx,%eax - roll $5,%ebp - addl %esi,%edx - movdqa %xmm8,%xmm10 - andl %eax,%edi - xorl %ebx,%eax - psrld $30,%xmm8 - addl %ebp,%edx - rorl $7,%ebp - por %xmm9,%xmm5 - xorl %ebx,%edi - movl %edx,%esi - addl 28(%rsp),%ecx - pslld $2,%xmm10 - pxor %xmm8,%xmm5 - xorl %eax,%ebp - movdqa -32(%r14),%xmm8 - roll $5,%edx - addl %edi,%ecx - andl %ebp,%esi - pxor %xmm10,%xmm5 - xorl %eax,%ebp - addl %edx,%ecx - rorl $7,%edx - pshufd $238,%xmm2,%xmm6 - xorl %eax,%esi - movdqa %xmm5,%xmm10 - paddd %xmm5,%xmm8 - movl %ecx,%edi - addl 32(%rsp),%ebx - punpcklqdq %xmm3,%xmm6 - xorl %ebp,%edx - roll $5,%ecx - addl %esi,%ebx - psrldq $4,%xmm10 - andl %edx,%edi - xorl %ebp,%edx - pxor %xmm2,%xmm6 - addl %ecx,%ebx - rorl $7,%ecx - pxor %xmm4,%xmm10 - xorl %ebp,%edi - movl %ebx,%esi - addl 36(%rsp),%eax - pxor %xmm10,%xmm6 - xorl %edx,%ecx - roll $5,%ebx - movdqa %xmm8,16(%rsp) - addl %edi,%eax - andl %ecx,%esi - movdqa %xmm6,%xmm9 - xorl %edx,%ecx - addl %ebx,%eax - rorl $7,%ebx - movdqa %xmm6,%xmm10 - xorl %edx,%esi - pslldq $12,%xmm9 - paddd %xmm6,%xmm6 - movl %eax,%edi - addl 40(%rsp),%ebp - psrld $31,%xmm10 - xorl %ecx,%ebx - roll $5,%eax - addl %esi,%ebp - movdqa %xmm9,%xmm8 - andl %ebx,%edi - xorl %ecx,%ebx - psrld $30,%xmm9 - addl %eax,%ebp - rorl $7,%eax - por %xmm10,%xmm6 - xorl %ecx,%edi - movl %ebp,%esi - addl 44(%rsp),%edx - pslld $2,%xmm8 - pxor %xmm9,%xmm6 - xorl %ebx,%eax - movdqa -32(%r14),%xmm9 - roll $5,%ebp - addl %edi,%edx - andl %eax,%esi - pxor %xmm8,%xmm6 - xorl %ebx,%eax - addl %ebp,%edx - rorl $7,%ebp - pshufd $238,%xmm3,%xmm7 - xorl %ebx,%esi - movdqa %xmm6,%xmm8 - paddd %xmm6,%xmm9 - movl %edx,%edi - addl 48(%rsp),%ecx - punpcklqdq %xmm4,%xmm7 - xorl %eax,%ebp - roll $5,%edx - addl %esi,%ecx - psrldq $4,%xmm8 - andl %ebp,%edi - xorl %eax,%ebp - pxor %xmm3,%xmm7 - addl %edx,%ecx - rorl $7,%edx - pxor %xmm5,%xmm8 - xorl %eax,%edi - movl %ecx,%esi - addl 52(%rsp),%ebx - pxor %xmm8,%xmm7 - xorl %ebp,%edx - roll $5,%ecx - movdqa %xmm9,32(%rsp) - addl %edi,%ebx - andl %edx,%esi - movdqa %xmm7,%xmm10 - xorl %ebp,%edx - addl %ecx,%ebx - rorl $7,%ecx - movdqa %xmm7,%xmm8 - xorl %ebp,%esi - pslldq $12,%xmm10 - paddd %xmm7,%xmm7 - movl %ebx,%edi - addl 56(%rsp),%eax - psrld $31,%xmm8 - xorl %edx,%ecx - roll $5,%ebx - addl %esi,%eax - movdqa %xmm10,%xmm9 - andl %ecx,%edi - xorl %edx,%ecx - psrld $30,%xmm10 - addl %ebx,%eax - rorl $7,%ebx - por %xmm8,%xmm7 - xorl %edx,%edi - movl %eax,%esi - addl 60(%rsp),%ebp - pslld $2,%xmm9 - pxor %xmm10,%xmm7 - xorl %ecx,%ebx - movdqa -32(%r14),%xmm10 - roll $5,%eax - addl %edi,%ebp - andl %ebx,%esi - pxor %xmm9,%xmm7 - pshufd $238,%xmm6,%xmm9 - xorl %ecx,%ebx - addl %eax,%ebp - rorl $7,%eax - pxor %xmm4,%xmm0 - xorl %ecx,%esi - movl %ebp,%edi - addl 0(%rsp),%edx - punpcklqdq %xmm7,%xmm9 - xorl %ebx,%eax - roll $5,%ebp - pxor %xmm1,%xmm0 - addl %esi,%edx - andl %eax,%edi - movdqa %xmm10,%xmm8 - xorl %ebx,%eax - paddd %xmm7,%xmm10 - addl %ebp,%edx - pxor %xmm9,%xmm0 - rorl $7,%ebp - xorl %ebx,%edi - movl %edx,%esi - addl 4(%rsp),%ecx - movdqa %xmm0,%xmm9 - xorl %eax,%ebp - roll $5,%edx - movdqa %xmm10,48(%rsp) - addl %edi,%ecx - andl %ebp,%esi - xorl %eax,%ebp - pslld $2,%xmm0 - addl %edx,%ecx - rorl $7,%edx - psrld $30,%xmm9 - xorl %eax,%esi - movl %ecx,%edi - addl 8(%rsp),%ebx - por %xmm9,%xmm0 - xorl %ebp,%edx - roll $5,%ecx - pshufd $238,%xmm7,%xmm10 - addl %esi,%ebx - andl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 12(%rsp),%eax - xorl %ebp,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - pxor %xmm5,%xmm1 - addl 16(%rsp),%ebp - xorl %ecx,%esi - punpcklqdq %xmm0,%xmm10 - movl %eax,%edi - roll $5,%eax - pxor %xmm2,%xmm1 - addl %esi,%ebp - xorl %ecx,%edi - movdqa %xmm8,%xmm9 - rorl $7,%ebx - paddd %xmm0,%xmm8 - addl %eax,%ebp - pxor %xmm10,%xmm1 - addl 20(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - movdqa %xmm1,%xmm10 - addl %edi,%edx - xorl %ebx,%esi - movdqa %xmm8,0(%rsp) - rorl $7,%eax - addl %ebp,%edx - addl 24(%rsp),%ecx - pslld $2,%xmm1 - xorl %eax,%esi - movl %edx,%edi - psrld $30,%xmm10 - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - por %xmm10,%xmm1 - addl %edx,%ecx - addl 28(%rsp),%ebx - pshufd $238,%xmm0,%xmm8 - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - addl %ecx,%ebx - pxor %xmm6,%xmm2 - addl 32(%rsp),%eax - xorl %edx,%esi - punpcklqdq %xmm1,%xmm8 - movl %ebx,%edi - roll $5,%ebx - pxor %xmm3,%xmm2 - addl %esi,%eax - xorl %edx,%edi - movdqa 0(%r14),%xmm10 - rorl $7,%ecx - paddd %xmm1,%xmm9 - addl %ebx,%eax - pxor %xmm8,%xmm2 - addl 36(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - movdqa %xmm2,%xmm8 - addl %edi,%ebp - xorl %ecx,%esi - movdqa %xmm9,16(%rsp) - rorl $7,%ebx - addl %eax,%ebp - addl 40(%rsp),%edx - pslld $2,%xmm2 - xorl %ebx,%esi - movl %ebp,%edi - psrld $30,%xmm8 - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - por %xmm8,%xmm2 - addl %ebp,%edx - addl 44(%rsp),%ecx - pshufd $238,%xmm1,%xmm9 - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - addl %edx,%ecx - pxor %xmm7,%xmm3 - addl 48(%rsp),%ebx - xorl %ebp,%esi - punpcklqdq %xmm2,%xmm9 - movl %ecx,%edi - roll $5,%ecx - pxor %xmm4,%xmm3 - addl %esi,%ebx - xorl %ebp,%edi - movdqa %xmm10,%xmm8 - rorl $7,%edx - paddd %xmm2,%xmm10 - addl %ecx,%ebx - pxor %xmm9,%xmm3 - addl 52(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - movdqa %xmm3,%xmm9 - addl %edi,%eax - xorl %edx,%esi - movdqa %xmm10,32(%rsp) - rorl $7,%ecx - addl %ebx,%eax - addl 56(%rsp),%ebp - pslld $2,%xmm3 - xorl %ecx,%esi - movl %eax,%edi - psrld $30,%xmm9 - roll $5,%eax - addl %esi,%ebp - xorl %ecx,%edi - rorl $7,%ebx - por %xmm9,%xmm3 - addl %eax,%ebp - addl 60(%rsp),%edx - pshufd $238,%xmm2,%xmm10 - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - addl %edi,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %ebp,%edx - pxor %xmm0,%xmm4 - addl 0(%rsp),%ecx - xorl %eax,%esi - punpcklqdq %xmm3,%xmm10 - movl %edx,%edi - roll $5,%edx - pxor %xmm5,%xmm4 - addl %esi,%ecx - xorl %eax,%edi - movdqa %xmm8,%xmm9 - rorl $7,%ebp - paddd %xmm3,%xmm8 - addl %edx,%ecx - pxor %xmm10,%xmm4 - addl 4(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - movdqa %xmm4,%xmm10 - addl %edi,%ebx - xorl %ebp,%esi - movdqa %xmm8,48(%rsp) - rorl $7,%edx - addl %ecx,%ebx - addl 8(%rsp),%eax - pslld $2,%xmm4 - xorl %edx,%esi - movl %ebx,%edi - psrld $30,%xmm10 - roll $5,%ebx - addl %esi,%eax - xorl %edx,%edi - rorl $7,%ecx - por %xmm10,%xmm4 - addl %ebx,%eax - addl 12(%rsp),%ebp - pshufd $238,%xmm3,%xmm8 - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - pxor %xmm1,%xmm5 - addl 16(%rsp),%edx - xorl %ebx,%esi - punpcklqdq %xmm4,%xmm8 - movl %ebp,%edi - roll $5,%ebp - pxor %xmm6,%xmm5 - addl %esi,%edx - xorl %ebx,%edi - movdqa %xmm9,%xmm10 - rorl $7,%eax - paddd %xmm4,%xmm9 - addl %ebp,%edx - pxor %xmm8,%xmm5 - addl 20(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - movdqa %xmm5,%xmm8 - addl %edi,%ecx - xorl %eax,%esi - movdqa %xmm9,0(%rsp) - rorl $7,%ebp - addl %edx,%ecx - addl 24(%rsp),%ebx - pslld $2,%xmm5 - xorl %ebp,%esi - movl %ecx,%edi - psrld $30,%xmm8 - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - por %xmm8,%xmm5 - addl %ecx,%ebx - addl 28(%rsp),%eax - pshufd $238,%xmm4,%xmm9 - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%edi - roll $5,%ebx - addl %edi,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - pxor %xmm2,%xmm6 - addl 32(%rsp),%ebp - andl %ecx,%esi - xorl %edx,%ecx - rorl $7,%ebx - punpcklqdq %xmm5,%xmm9 - movl %eax,%edi - xorl %ecx,%esi - pxor %xmm7,%xmm6 - roll $5,%eax - addl %esi,%ebp - movdqa %xmm10,%xmm8 - xorl %ebx,%edi - paddd %xmm5,%xmm10 - xorl %ecx,%ebx - pxor %xmm9,%xmm6 - addl %eax,%ebp - addl 36(%rsp),%edx - andl %ebx,%edi - xorl %ecx,%ebx - rorl $7,%eax - movdqa %xmm6,%xmm9 - movl %ebp,%esi - xorl %ebx,%edi - movdqa %xmm10,16(%rsp) - roll $5,%ebp - addl %edi,%edx - xorl %eax,%esi - pslld $2,%xmm6 - xorl %ebx,%eax - addl %ebp,%edx - psrld $30,%xmm9 - addl 40(%rsp),%ecx - andl %eax,%esi - xorl %ebx,%eax - por %xmm9,%xmm6 - rorl $7,%ebp - movl %edx,%edi - xorl %eax,%esi - roll $5,%edx - pshufd $238,%xmm5,%xmm10 - addl %esi,%ecx - xorl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - addl 44(%rsp),%ebx - andl %ebp,%edi - xorl %eax,%ebp - rorl $7,%edx - movl %ecx,%esi - xorl %ebp,%edi - roll $5,%ecx - addl %edi,%ebx - xorl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - pxor %xmm3,%xmm7 - addl 48(%rsp),%eax - andl %edx,%esi - xorl %ebp,%edx - rorl $7,%ecx - punpcklqdq %xmm6,%xmm10 - movl %ebx,%edi - xorl %edx,%esi - pxor %xmm0,%xmm7 - roll $5,%ebx - addl %esi,%eax - movdqa 32(%r14),%xmm9 - xorl %ecx,%edi - paddd %xmm6,%xmm8 - xorl %edx,%ecx - pxor %xmm10,%xmm7 - addl %ebx,%eax - addl 52(%rsp),%ebp - andl %ecx,%edi - xorl %edx,%ecx - rorl $7,%ebx - movdqa %xmm7,%xmm10 - movl %eax,%esi - xorl %ecx,%edi - movdqa %xmm8,32(%rsp) - roll $5,%eax - addl %edi,%ebp - xorl %ebx,%esi - pslld $2,%xmm7 - xorl %ecx,%ebx - addl %eax,%ebp - psrld $30,%xmm10 - addl 56(%rsp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - por %xmm10,%xmm7 - rorl $7,%eax - movl %ebp,%edi - xorl %ebx,%esi - roll $5,%ebp - pshufd $238,%xmm6,%xmm8 - addl %esi,%edx - xorl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - addl 60(%rsp),%ecx - andl %eax,%edi - xorl %ebx,%eax - rorl $7,%ebp - movl %edx,%esi - xorl %eax,%edi - roll $5,%edx - addl %edi,%ecx - xorl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - pxor %xmm4,%xmm0 - addl 0(%rsp),%ebx - andl %ebp,%esi - xorl %eax,%ebp - rorl $7,%edx - punpcklqdq %xmm7,%xmm8 - movl %ecx,%edi - xorl %ebp,%esi - pxor %xmm1,%xmm0 - roll $5,%ecx - addl %esi,%ebx - movdqa %xmm9,%xmm10 - xorl %edx,%edi - paddd %xmm7,%xmm9 - xorl %ebp,%edx - pxor %xmm8,%xmm0 - addl %ecx,%ebx - addl 4(%rsp),%eax - andl %edx,%edi - xorl %ebp,%edx - rorl $7,%ecx - movdqa %xmm0,%xmm8 - movl %ebx,%esi - xorl %edx,%edi - movdqa %xmm9,48(%rsp) - roll $5,%ebx - addl %edi,%eax - xorl %ecx,%esi - pslld $2,%xmm0 - xorl %edx,%ecx - addl %ebx,%eax - psrld $30,%xmm8 - addl 8(%rsp),%ebp - andl %ecx,%esi - xorl %edx,%ecx - por %xmm8,%xmm0 - rorl $7,%ebx - movl %eax,%edi - xorl %ecx,%esi - roll $5,%eax - pshufd $238,%xmm7,%xmm9 - addl %esi,%ebp - xorl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - addl 12(%rsp),%edx - andl %ebx,%edi - xorl %ecx,%ebx - rorl $7,%eax - movl %ebp,%esi - xorl %ebx,%edi - roll $5,%ebp - addl %edi,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - pxor %xmm5,%xmm1 - addl 16(%rsp),%ecx - andl %eax,%esi - xorl %ebx,%eax - rorl $7,%ebp - punpcklqdq %xmm0,%xmm9 - movl %edx,%edi - xorl %eax,%esi - pxor %xmm2,%xmm1 - roll $5,%edx - addl %esi,%ecx - movdqa %xmm10,%xmm8 - xorl %ebp,%edi - paddd %xmm0,%xmm10 - xorl %eax,%ebp - pxor %xmm9,%xmm1 - addl %edx,%ecx - addl 20(%rsp),%ebx - andl %ebp,%edi - xorl %eax,%ebp - rorl $7,%edx - movdqa %xmm1,%xmm9 - movl %ecx,%esi - xorl %ebp,%edi - movdqa %xmm10,0(%rsp) - roll $5,%ecx - addl %edi,%ebx - xorl %edx,%esi - pslld $2,%xmm1 - xorl %ebp,%edx - addl %ecx,%ebx - psrld $30,%xmm9 - addl 24(%rsp),%eax - andl %edx,%esi - xorl %ebp,%edx - por %xmm9,%xmm1 - rorl $7,%ecx - movl %ebx,%edi - xorl %edx,%esi - roll $5,%ebx - pshufd $238,%xmm0,%xmm10 - addl %esi,%eax - xorl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - addl 28(%rsp),%ebp - andl %ecx,%edi - xorl %edx,%ecx - rorl $7,%ebx - movl %eax,%esi - xorl %ecx,%edi - roll $5,%eax - addl %edi,%ebp - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - pxor %xmm6,%xmm2 - addl 32(%rsp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - rorl $7,%eax - punpcklqdq %xmm1,%xmm10 - movl %ebp,%edi - xorl %ebx,%esi - pxor %xmm3,%xmm2 - roll $5,%ebp - addl %esi,%edx - movdqa %xmm8,%xmm9 - xorl %eax,%edi - paddd %xmm1,%xmm8 - xorl %ebx,%eax - pxor %xmm10,%xmm2 - addl %ebp,%edx - addl 36(%rsp),%ecx - andl %eax,%edi - xorl %ebx,%eax - rorl $7,%ebp - movdqa %xmm2,%xmm10 - movl %edx,%esi - xorl %eax,%edi - movdqa %xmm8,16(%rsp) - roll $5,%edx - addl %edi,%ecx - xorl %ebp,%esi - pslld $2,%xmm2 - xorl %eax,%ebp - addl %edx,%ecx - psrld $30,%xmm10 - addl 40(%rsp),%ebx - andl %ebp,%esi - xorl %eax,%ebp - por %xmm10,%xmm2 - rorl $7,%edx - movl %ecx,%edi - xorl %ebp,%esi - roll $5,%ecx - pshufd $238,%xmm1,%xmm8 - addl %esi,%ebx - xorl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 44(%rsp),%eax - andl %edx,%edi - xorl %ebp,%edx - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%edi - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - addl %ebx,%eax - pxor %xmm7,%xmm3 - addl 48(%rsp),%ebp - xorl %ecx,%esi - punpcklqdq %xmm2,%xmm8 - movl %eax,%edi - roll $5,%eax - pxor %xmm4,%xmm3 - addl %esi,%ebp - xorl %ecx,%edi - movdqa %xmm9,%xmm10 - rorl $7,%ebx - paddd %xmm2,%xmm9 - addl %eax,%ebp - pxor %xmm8,%xmm3 - addl 52(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - movdqa %xmm3,%xmm8 - addl %edi,%edx - xorl %ebx,%esi - movdqa %xmm9,32(%rsp) - rorl $7,%eax - addl %ebp,%edx - addl 56(%rsp),%ecx - pslld $2,%xmm3 - xorl %eax,%esi - movl %edx,%edi - psrld $30,%xmm8 - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - por %xmm8,%xmm3 - addl %edx,%ecx - addl 60(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - addl %ecx,%ebx - addl 0(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - roll $5,%ebx - paddd %xmm3,%xmm10 - addl %esi,%eax - xorl %edx,%edi - movdqa %xmm10,48(%rsp) - rorl $7,%ecx - addl %ebx,%eax - addl 4(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - addl 8(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - addl %ebp,%edx - addl 12(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - addl %edx,%ecx - cmpq %r10,%r9 - je .Ldone_ssse3 - movdqa 64(%r14),%xmm6 - movdqa -64(%r14),%xmm9 - movdqu 0(%r9),%xmm0 - movdqu 16(%r9),%xmm1 - movdqu 32(%r9),%xmm2 - movdqu 48(%r9),%xmm3 -.byte 102,15,56,0,198 - addq $64,%r9 - addl 16(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi -.byte 102,15,56,0,206 - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - paddd %xmm9,%xmm0 - addl %ecx,%ebx - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - movdqa %xmm0,0(%rsp) - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - rorl $7,%ecx - psubd %xmm9,%xmm0 - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - roll $5,%eax - addl %esi,%ebp - xorl %ecx,%edi - rorl $7,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - addl %edi,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi -.byte 102,15,56,0,214 - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - paddd %xmm9,%xmm1 - addl %edx,%ecx - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - movdqa %xmm1,16(%rsp) - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - psubd %xmm9,%xmm1 - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - roll $5,%ebx - addl %esi,%eax - xorl %edx,%edi - rorl $7,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi -.byte 102,15,56,0,222 - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - paddd %xmm9,%xmm2 - addl %ebp,%edx - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - movdqa %xmm2,32(%rsp) - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - psubd %xmm9,%xmm2 - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - rorl $7,%ecx - addl %ebx,%eax - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - addl 12(%r8),%edx - movl %eax,0(%r8) - addl 16(%r8),%ebp - movl %esi,4(%r8) - movl %esi,%ebx - movl %ecx,8(%r8) - movl %ecx,%edi - movl %edx,12(%r8) - xorl %edx,%edi - movl %ebp,16(%r8) - andl %edi,%esi - jmp .Loop_ssse3 - -.align 16 -.Ldone_ssse3: - addl 16(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - addl %ecx,%ebx - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - roll $5,%eax - addl %esi,%ebp - xorl %ecx,%edi - rorl $7,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - addl %edi,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - addl %edx,%ecx - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - roll $5,%ebx - addl %esi,%eax - xorl %edx,%edi - rorl $7,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - addl %ebp,%edx - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - rorl $7,%ecx - addl %ebx,%eax - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - movl %eax,0(%r8) - addl 12(%r8),%edx - movl %esi,4(%r8) - addl 16(%r8),%ebp - movl %ecx,8(%r8) - movl %edx,12(%r8) - movl %ebp,16(%r8) - movq -40(%r11),%r14 -.cfi_restore %r14 - movq -32(%r11),%r13 -.cfi_restore %r13 - movq -24(%r11),%r12 -.cfi_restore %r12 - movq -16(%r11),%rbp -.cfi_restore %rbp - movq -8(%r11),%rbx -.cfi_restore %rbx - leaq (%r11),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue_ssse3: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3 -.type sha1_block_data_order_avx,@function -.align 16 -sha1_block_data_order_avx: -_avx_shortcut: -.cfi_startproc - movq %rsp,%r11 -.cfi_def_cfa_register %r11 - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - leaq -64(%rsp),%rsp - vzeroupper - andq $-64,%rsp - movq %rdi,%r8 - movq %rsi,%r9 - movq %rdx,%r10 - - shlq $6,%r10 - addq %r9,%r10 - leaq K_XX_XX+64(%rip),%r14 - - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movl %ebx,%esi - movl 16(%r8),%ebp - movl %ecx,%edi - xorl %edx,%edi - andl %edi,%esi - - vmovdqa 64(%r14),%xmm6 - vmovdqa -64(%r14),%xmm11 - vmovdqu 0(%r9),%xmm0 - vmovdqu 16(%r9),%xmm1 - vmovdqu 32(%r9),%xmm2 - vmovdqu 48(%r9),%xmm3 - vpshufb %xmm6,%xmm0,%xmm0 - addq $64,%r9 - vpshufb %xmm6,%xmm1,%xmm1 - vpshufb %xmm6,%xmm2,%xmm2 - vpshufb %xmm6,%xmm3,%xmm3 - vpaddd %xmm11,%xmm0,%xmm4 - vpaddd %xmm11,%xmm1,%xmm5 - vpaddd %xmm11,%xmm2,%xmm6 - vmovdqa %xmm4,0(%rsp) - vmovdqa %xmm5,16(%rsp) - vmovdqa %xmm6,32(%rsp) - jmp .Loop_avx -.align 16 -.Loop_avx: - shrdl $2,%ebx,%ebx - xorl %edx,%esi - vpalignr $8,%xmm0,%xmm1,%xmm4 - movl %eax,%edi - addl 0(%rsp),%ebp - vpaddd %xmm3,%xmm11,%xmm9 - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrldq $4,%xmm3,%xmm8 - addl %esi,%ebp - andl %ebx,%edi - vpxor %xmm0,%xmm4,%xmm4 - xorl %ecx,%ebx - addl %eax,%ebp - vpxor %xmm2,%xmm8,%xmm8 - shrdl $7,%eax,%eax - xorl %ecx,%edi - movl %ebp,%esi - addl 4(%rsp),%edx - vpxor %xmm8,%xmm4,%xmm4 - xorl %ebx,%eax - shldl $5,%ebp,%ebp - vmovdqa %xmm9,48(%rsp) - addl %edi,%edx - andl %eax,%esi - vpsrld $31,%xmm4,%xmm8 - xorl %ebx,%eax - addl %ebp,%edx - shrdl $7,%ebp,%ebp - xorl %ebx,%esi - vpslldq $12,%xmm4,%xmm10 - vpaddd %xmm4,%xmm4,%xmm4 - movl %edx,%edi - addl 8(%rsp),%ecx - xorl %eax,%ebp - shldl $5,%edx,%edx - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm4,%xmm4 - addl %esi,%ecx - andl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm4,%xmm4 - shrdl $7,%edx,%edx - xorl %eax,%edi - movl %ecx,%esi - addl 12(%rsp),%ebx - vpxor %xmm10,%xmm4,%xmm4 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - addl %edi,%ebx - andl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %ebp,%esi - vpalignr $8,%xmm1,%xmm2,%xmm5 - movl %ebx,%edi - addl 16(%rsp),%eax - vpaddd %xmm4,%xmm11,%xmm9 - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrldq $4,%xmm4,%xmm8 - addl %esi,%eax - andl %ecx,%edi - vpxor %xmm1,%xmm5,%xmm5 - xorl %edx,%ecx - addl %ebx,%eax - vpxor %xmm3,%xmm8,%xmm8 - shrdl $7,%ebx,%ebx - xorl %edx,%edi - movl %eax,%esi - addl 20(%rsp),%ebp - vpxor %xmm8,%xmm5,%xmm5 - xorl %ecx,%ebx - shldl $5,%eax,%eax - vmovdqa %xmm9,0(%rsp) - addl %edi,%ebp - andl %ebx,%esi - vpsrld $31,%xmm5,%xmm8 - xorl %ecx,%ebx - addl %eax,%ebp - shrdl $7,%eax,%eax - xorl %ecx,%esi - vpslldq $12,%xmm5,%xmm10 - vpaddd %xmm5,%xmm5,%xmm5 - movl %ebp,%edi - addl 24(%rsp),%edx - xorl %ebx,%eax - shldl $5,%ebp,%ebp - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm5,%xmm5 - addl %esi,%edx - andl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm5,%xmm5 - shrdl $7,%ebp,%ebp - xorl %ebx,%edi - movl %edx,%esi - addl 28(%rsp),%ecx - vpxor %xmm10,%xmm5,%xmm5 - xorl %eax,%ebp - shldl $5,%edx,%edx - vmovdqa -32(%r14),%xmm11 - addl %edi,%ecx - andl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - vpalignr $8,%xmm2,%xmm3,%xmm6 - movl %ecx,%edi - addl 32(%rsp),%ebx - vpaddd %xmm5,%xmm11,%xmm9 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - vpsrldq $4,%xmm5,%xmm8 - addl %esi,%ebx - andl %edx,%edi - vpxor %xmm2,%xmm6,%xmm6 - xorl %ebp,%edx - addl %ecx,%ebx - vpxor %xmm4,%xmm8,%xmm8 - shrdl $7,%ecx,%ecx - xorl %ebp,%edi - movl %ebx,%esi - addl 36(%rsp),%eax - vpxor %xmm8,%xmm6,%xmm6 - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vmovdqa %xmm9,16(%rsp) - addl %edi,%eax - andl %ecx,%esi - vpsrld $31,%xmm6,%xmm8 - xorl %edx,%ecx - addl %ebx,%eax - shrdl $7,%ebx,%ebx - xorl %edx,%esi - vpslldq $12,%xmm6,%xmm10 - vpaddd %xmm6,%xmm6,%xmm6 - movl %eax,%edi - addl 40(%rsp),%ebp - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm6,%xmm6 - addl %esi,%ebp - andl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm6,%xmm6 - shrdl $7,%eax,%eax - xorl %ecx,%edi - movl %ebp,%esi - addl 44(%rsp),%edx - vpxor %xmm10,%xmm6,%xmm6 - xorl %ebx,%eax - shldl $5,%ebp,%ebp - addl %edi,%edx - andl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - shrdl $7,%ebp,%ebp - xorl %ebx,%esi - vpalignr $8,%xmm3,%xmm4,%xmm7 - movl %edx,%edi - addl 48(%rsp),%ecx - vpaddd %xmm6,%xmm11,%xmm9 - xorl %eax,%ebp - shldl $5,%edx,%edx - vpsrldq $4,%xmm6,%xmm8 - addl %esi,%ecx - andl %ebp,%edi - vpxor %xmm3,%xmm7,%xmm7 - xorl %eax,%ebp - addl %edx,%ecx - vpxor %xmm5,%xmm8,%xmm8 - shrdl $7,%edx,%edx - xorl %eax,%edi - movl %ecx,%esi - addl 52(%rsp),%ebx - vpxor %xmm8,%xmm7,%xmm7 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - vmovdqa %xmm9,32(%rsp) - addl %edi,%ebx - andl %edx,%esi - vpsrld $31,%xmm7,%xmm8 - xorl %ebp,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %ebp,%esi - vpslldq $12,%xmm7,%xmm10 - vpaddd %xmm7,%xmm7,%xmm7 - movl %ebx,%edi - addl 56(%rsp),%eax - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm7,%xmm7 - addl %esi,%eax - andl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm7,%xmm7 - shrdl $7,%ebx,%ebx - xorl %edx,%edi - movl %eax,%esi - addl 60(%rsp),%ebp - vpxor %xmm10,%xmm7,%xmm7 - xorl %ecx,%ebx - shldl $5,%eax,%eax - addl %edi,%ebp - andl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - vpalignr $8,%xmm6,%xmm7,%xmm8 - vpxor %xmm4,%xmm0,%xmm0 - shrdl $7,%eax,%eax - xorl %ecx,%esi - movl %ebp,%edi - addl 0(%rsp),%edx - vpxor %xmm1,%xmm0,%xmm0 - xorl %ebx,%eax - shldl $5,%ebp,%ebp - vpaddd %xmm7,%xmm11,%xmm9 - addl %esi,%edx - andl %eax,%edi - vpxor %xmm8,%xmm0,%xmm0 - xorl %ebx,%eax - addl %ebp,%edx - shrdl $7,%ebp,%ebp - xorl %ebx,%edi - vpsrld $30,%xmm0,%xmm8 - vmovdqa %xmm9,48(%rsp) - movl %edx,%esi - addl 4(%rsp),%ecx - xorl %eax,%ebp - shldl $5,%edx,%edx - vpslld $2,%xmm0,%xmm0 - addl %edi,%ecx - andl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - movl %ecx,%edi - addl 8(%rsp),%ebx - vpor %xmm8,%xmm0,%xmm0 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - addl %esi,%ebx - andl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 12(%rsp),%eax - xorl %ebp,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm7,%xmm0,%xmm8 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - vpxor %xmm2,%xmm1,%xmm1 - addl %esi,%ebp - xorl %ecx,%edi - vpaddd %xmm0,%xmm11,%xmm9 - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpxor %xmm8,%xmm1,%xmm1 - addl 20(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - vpsrld $30,%xmm1,%xmm8 - vmovdqa %xmm9,0(%rsp) - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpslld $2,%xmm1,%xmm1 - addl 24(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpor %xmm8,%xmm1,%xmm1 - addl 28(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpalignr $8,%xmm0,%xmm1,%xmm8 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - vpxor %xmm3,%xmm2,%xmm2 - addl %esi,%eax - xorl %edx,%edi - vpaddd %xmm1,%xmm11,%xmm9 - vmovdqa 0(%r14),%xmm11 - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpxor %xmm8,%xmm2,%xmm2 - addl 36(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - vpsrld $30,%xmm2,%xmm8 - vmovdqa %xmm9,16(%rsp) - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpslld $2,%xmm2,%xmm2 - addl 40(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpor %xmm8,%xmm2,%xmm2 - addl 44(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpalignr $8,%xmm1,%xmm2,%xmm8 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - vpxor %xmm4,%xmm3,%xmm3 - addl %esi,%ebx - xorl %ebp,%edi - vpaddd %xmm2,%xmm11,%xmm9 - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpxor %xmm8,%xmm3,%xmm3 - addl 52(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - vpsrld $30,%xmm3,%xmm8 - vmovdqa %xmm9,32(%rsp) - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpslld $2,%xmm3,%xmm3 - addl 56(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ecx,%edi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpor %xmm8,%xmm3,%xmm3 - addl 60(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpalignr $8,%xmm2,%xmm3,%xmm8 - vpxor %xmm0,%xmm4,%xmm4 - addl 0(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - vpxor %xmm5,%xmm4,%xmm4 - addl %esi,%ecx - xorl %eax,%edi - vpaddd %xmm3,%xmm11,%xmm9 - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpxor %xmm8,%xmm4,%xmm4 - addl 4(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - vpsrld $30,%xmm4,%xmm8 - vmovdqa %xmm9,48(%rsp) - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpslld $2,%xmm4,%xmm4 - addl 8(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpor %xmm8,%xmm4,%xmm4 - addl 12(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpalignr $8,%xmm3,%xmm4,%xmm8 - vpxor %xmm1,%xmm5,%xmm5 - addl 16(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - vpxor %xmm6,%xmm5,%xmm5 - addl %esi,%edx - xorl %ebx,%edi - vpaddd %xmm4,%xmm11,%xmm9 - shrdl $7,%eax,%eax - addl %ebp,%edx - vpxor %xmm8,%xmm5,%xmm5 - addl 20(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - vpsrld $30,%xmm5,%xmm8 - vmovdqa %xmm9,0(%rsp) - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpslld $2,%xmm5,%xmm5 - addl 24(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpor %xmm8,%xmm5,%xmm5 - addl 28(%rsp),%eax - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%edi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm4,%xmm5,%xmm8 - vpxor %xmm2,%xmm6,%xmm6 - addl 32(%rsp),%ebp - andl %ecx,%esi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - vpxor %xmm7,%xmm6,%xmm6 - movl %eax,%edi - xorl %ecx,%esi - vpaddd %xmm5,%xmm11,%xmm9 - shldl $5,%eax,%eax - addl %esi,%ebp - vpxor %xmm8,%xmm6,%xmm6 - xorl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - addl 36(%rsp),%edx - vpsrld $30,%xmm6,%xmm8 - vmovdqa %xmm9,16(%rsp) - andl %ebx,%edi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %ebp,%esi - vpslld $2,%xmm6,%xmm6 - xorl %ebx,%edi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - addl 40(%rsp),%ecx - andl %eax,%esi - vpor %xmm8,%xmm6,%xmm6 - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - movl %edx,%edi - xorl %eax,%esi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - addl 44(%rsp),%ebx - andl %ebp,%edi - xorl %eax,%ebp - shrdl $7,%edx,%edx - movl %ecx,%esi - xorl %ebp,%edi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - vpalignr $8,%xmm5,%xmm6,%xmm8 - vpxor %xmm3,%xmm7,%xmm7 - addl 48(%rsp),%eax - andl %edx,%esi - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - vpxor %xmm0,%xmm7,%xmm7 - movl %ebx,%edi - xorl %edx,%esi - vpaddd %xmm6,%xmm11,%xmm9 - vmovdqa 32(%r14),%xmm11 - shldl $5,%ebx,%ebx - addl %esi,%eax - vpxor %xmm8,%xmm7,%xmm7 - xorl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - addl 52(%rsp),%ebp - vpsrld $30,%xmm7,%xmm8 - vmovdqa %xmm9,32(%rsp) - andl %ecx,%edi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - vpslld $2,%xmm7,%xmm7 - xorl %ecx,%edi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - addl 56(%rsp),%edx - andl %ebx,%esi - vpor %xmm8,%xmm7,%xmm7 - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %ebp,%edi - xorl %ebx,%esi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - addl 60(%rsp),%ecx - andl %eax,%edi - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - movl %edx,%esi - xorl %eax,%edi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - vpalignr $8,%xmm6,%xmm7,%xmm8 - vpxor %xmm4,%xmm0,%xmm0 - addl 0(%rsp),%ebx - andl %ebp,%esi - xorl %eax,%ebp - shrdl $7,%edx,%edx - vpxor %xmm1,%xmm0,%xmm0 - movl %ecx,%edi - xorl %ebp,%esi - vpaddd %xmm7,%xmm11,%xmm9 - shldl $5,%ecx,%ecx - addl %esi,%ebx - vpxor %xmm8,%xmm0,%xmm0 - xorl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 4(%rsp),%eax - vpsrld $30,%xmm0,%xmm8 - vmovdqa %xmm9,48(%rsp) - andl %edx,%edi - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - vpslld $2,%xmm0,%xmm0 - xorl %edx,%edi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - addl 8(%rsp),%ebp - andl %ecx,%esi - vpor %xmm8,%xmm0,%xmm0 - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%edi - xorl %ecx,%esi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - addl 12(%rsp),%edx - andl %ebx,%edi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %ebp,%esi - xorl %ebx,%edi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - vpalignr $8,%xmm7,%xmm0,%xmm8 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%rsp),%ecx - andl %eax,%esi - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - vpxor %xmm2,%xmm1,%xmm1 - movl %edx,%edi - xorl %eax,%esi - vpaddd %xmm0,%xmm11,%xmm9 - shldl $5,%edx,%edx - addl %esi,%ecx - vpxor %xmm8,%xmm1,%xmm1 - xorl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - addl 20(%rsp),%ebx - vpsrld $30,%xmm1,%xmm8 - vmovdqa %xmm9,0(%rsp) - andl %ebp,%edi - xorl %eax,%ebp - shrdl $7,%edx,%edx - movl %ecx,%esi - vpslld $2,%xmm1,%xmm1 - xorl %ebp,%edi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - addl 24(%rsp),%eax - andl %edx,%esi - vpor %xmm8,%xmm1,%xmm1 - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%edi - xorl %edx,%esi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - addl 28(%rsp),%ebp - andl %ecx,%edi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - xorl %ecx,%edi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - vpalignr $8,%xmm0,%xmm1,%xmm8 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%rsp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - vpxor %xmm3,%xmm2,%xmm2 - movl %ebp,%edi - xorl %ebx,%esi - vpaddd %xmm1,%xmm11,%xmm9 - shldl $5,%ebp,%ebp - addl %esi,%edx - vpxor %xmm8,%xmm2,%xmm2 - xorl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - addl 36(%rsp),%ecx - vpsrld $30,%xmm2,%xmm8 - vmovdqa %xmm9,16(%rsp) - andl %eax,%edi - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - movl %edx,%esi - vpslld $2,%xmm2,%xmm2 - xorl %eax,%edi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - addl 40(%rsp),%ebx - andl %ebp,%esi - vpor %xmm8,%xmm2,%xmm2 - xorl %eax,%ebp - shrdl $7,%edx,%edx - movl %ecx,%edi - xorl %ebp,%esi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 44(%rsp),%eax - andl %edx,%edi - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%edi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - addl %ebx,%eax - vpalignr $8,%xmm1,%xmm2,%xmm8 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - vpxor %xmm4,%xmm3,%xmm3 - addl %esi,%ebp - xorl %ecx,%edi - vpaddd %xmm2,%xmm11,%xmm9 - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpxor %xmm8,%xmm3,%xmm3 - addl 52(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - vpsrld $30,%xmm3,%xmm8 - vmovdqa %xmm9,32(%rsp) - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpslld $2,%xmm3,%xmm3 - addl 56(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpor %xmm8,%xmm3,%xmm3 - addl 60(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 0(%rsp),%eax - vpaddd %xmm3,%xmm11,%xmm9 - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - vmovdqa %xmm9,48(%rsp) - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 4(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 8(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 12(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - cmpq %r10,%r9 - je .Ldone_avx - vmovdqa 64(%r14),%xmm6 - vmovdqa -64(%r14),%xmm11 - vmovdqu 0(%r9),%xmm0 - vmovdqu 16(%r9),%xmm1 - vmovdqu 32(%r9),%xmm2 - vmovdqu 48(%r9),%xmm3 - vpshufb %xmm6,%xmm0,%xmm0 - addq $64,%r9 - addl 16(%rsp),%ebx - xorl %ebp,%esi - vpshufb %xmm6,%xmm1,%xmm1 - movl %ecx,%edi - shldl $5,%ecx,%ecx - vpaddd %xmm11,%xmm0,%xmm4 - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vmovdqa %xmm4,0(%rsp) - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ecx,%edi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - vpshufb %xmm6,%xmm2,%xmm2 - movl %edx,%edi - shldl $5,%edx,%edx - vpaddd %xmm11,%xmm1,%xmm5 - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vmovdqa %xmm5,16(%rsp) - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - vpshufb %xmm6,%xmm3,%xmm3 - movl %ebp,%edi - shldl $5,%ebp,%ebp - vpaddd %xmm11,%xmm2,%xmm6 - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - vmovdqa %xmm6,32(%rsp) - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - addl 12(%r8),%edx - movl %eax,0(%r8) - addl 16(%r8),%ebp - movl %esi,4(%r8) - movl %esi,%ebx - movl %ecx,8(%r8) - movl %ecx,%edi - movl %edx,12(%r8) - xorl %edx,%edi - movl %ebp,16(%r8) - andl %edi,%esi - jmp .Loop_avx - -.align 16 -.Ldone_avx: - addl 16(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ecx,%edi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vzeroupper - - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - movl %eax,0(%r8) - addl 12(%r8),%edx - movl %esi,4(%r8) - addl 16(%r8),%ebp - movl %ecx,8(%r8) - movl %edx,12(%r8) - movl %ebp,16(%r8) - movq -40(%r11),%r14 -.cfi_restore %r14 - movq -32(%r11),%r13 -.cfi_restore %r13 - movq -24(%r11),%r12 -.cfi_restore %r12 - movq -16(%r11),%rbp -.cfi_restore %rbp - movq -8(%r11),%rbx -.cfi_restore %rbx - leaq (%r11),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue_avx: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha1_block_data_order_avx,.-sha1_block_data_order_avx -.align 64 -K_XX_XX: -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0 -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha256-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha256-x86_64.S deleted file mode 100644 index 0bacd6a4a8..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha256-x86_64.S +++ /dev/null @@ -1,3973 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P -.globl sha256_block_data_order -.hidden sha256_block_data_order -.type sha256_block_data_order,@function -.align 16 -sha256_block_data_order: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%r11 - movl 0(%r11),%r9d - movl 4(%r11),%r10d - movl 8(%r11),%r11d - andl $1073741824,%r9d - andl $268435968,%r10d - orl %r9d,%r10d - cmpl $1342177792,%r10d - je .Lavx_shortcut - testl $512,%r10d - jnz .Lssse3_shortcut - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - shlq $4,%rdx - subq $64+32,%rsp - leaq (%rsi,%rdx,4),%rdx - andq $-64,%rsp - movq %rdi,64+0(%rsp) - movq %rsi,64+8(%rsp) - movq %rdx,64+16(%rsp) - movq %rax,88(%rsp) -.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 -.Lprologue: - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - movl 16(%rdi),%r8d - movl 20(%rdi),%r9d - movl 24(%rdi),%r10d - movl 28(%rdi),%r11d - jmp .Lloop - -.align 16 -.Lloop: - movl %ebx,%edi - leaq K256(%rip),%rbp - xorl %ecx,%edi - movl 0(%rsi),%r12d - movl %r8d,%r13d - movl %eax,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,0(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - addl %r14d,%r11d - movl 4(%rsi),%r12d - movl %edx,%r13d - movl %r11d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,4(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - addl %r14d,%r10d - movl 8(%rsi),%r12d - movl %ecx,%r13d - movl %r10d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,8(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - addl %r14d,%r9d - movl 12(%rsi),%r12d - movl %ebx,%r13d - movl %r9d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,12(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - addl %r14d,%r8d - movl 16(%rsi),%r12d - movl %eax,%r13d - movl %r8d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,16(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - addl %r14d,%edx - movl 20(%rsi),%r12d - movl %r11d,%r13d - movl %edx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,20(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - addl %r14d,%ecx - movl 24(%rsi),%r12d - movl %r10d,%r13d - movl %ecx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,24(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - addl %r14d,%ebx - movl 28(%rsi),%r12d - movl %r9d,%r13d - movl %ebx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,28(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - addl %r14d,%eax - movl 32(%rsi),%r12d - movl %r8d,%r13d - movl %eax,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,32(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - addl %r14d,%r11d - movl 36(%rsi),%r12d - movl %edx,%r13d - movl %r11d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,36(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - addl %r14d,%r10d - movl 40(%rsi),%r12d - movl %ecx,%r13d - movl %r10d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,40(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - addl %r14d,%r9d - movl 44(%rsi),%r12d - movl %ebx,%r13d - movl %r9d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,44(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - addl %r14d,%r8d - movl 48(%rsi),%r12d - movl %eax,%r13d - movl %r8d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,48(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - addl %r14d,%edx - movl 52(%rsi),%r12d - movl %r11d,%r13d - movl %edx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,52(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - addl %r14d,%ecx - movl 56(%rsi),%r12d - movl %r10d,%r13d - movl %ecx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,56(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - addl %r14d,%ebx - movl 60(%rsi),%r12d - movl %r9d,%r13d - movl %ebx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,60(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - jmp .Lrounds_16_xx -.align 16 -.Lrounds_16_xx: - movl 4(%rsp),%r13d - movl 56(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%eax - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 36(%rsp),%r12d - - addl 0(%rsp),%r12d - movl %r8d,%r13d - addl %r15d,%r12d - movl %eax,%r14d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,0(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - movl 8(%rsp),%r13d - movl 60(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r11d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 40(%rsp),%r12d - - addl 4(%rsp),%r12d - movl %edx,%r13d - addl %edi,%r12d - movl %r11d,%r14d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,4(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - movl 12(%rsp),%r13d - movl 0(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r10d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 44(%rsp),%r12d - - addl 8(%rsp),%r12d - movl %ecx,%r13d - addl %r15d,%r12d - movl %r10d,%r14d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,8(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - movl 16(%rsp),%r13d - movl 4(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r9d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 48(%rsp),%r12d - - addl 12(%rsp),%r12d - movl %ebx,%r13d - addl %edi,%r12d - movl %r9d,%r14d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,12(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - movl 20(%rsp),%r13d - movl 8(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r8d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 52(%rsp),%r12d - - addl 16(%rsp),%r12d - movl %eax,%r13d - addl %r15d,%r12d - movl %r8d,%r14d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,16(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - movl 24(%rsp),%r13d - movl 12(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%edx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 56(%rsp),%r12d - - addl 20(%rsp),%r12d - movl %r11d,%r13d - addl %edi,%r12d - movl %edx,%r14d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,20(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - movl 28(%rsp),%r13d - movl 16(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ecx - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 60(%rsp),%r12d - - addl 24(%rsp),%r12d - movl %r10d,%r13d - addl %r15d,%r12d - movl %ecx,%r14d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,24(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - movl 32(%rsp),%r13d - movl 20(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ebx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 0(%rsp),%r12d - - addl 28(%rsp),%r12d - movl %r9d,%r13d - addl %edi,%r12d - movl %ebx,%r14d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,28(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - movl 36(%rsp),%r13d - movl 24(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%eax - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 4(%rsp),%r12d - - addl 32(%rsp),%r12d - movl %r8d,%r13d - addl %r15d,%r12d - movl %eax,%r14d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,32(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - movl 40(%rsp),%r13d - movl 28(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r11d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 8(%rsp),%r12d - - addl 36(%rsp),%r12d - movl %edx,%r13d - addl %edi,%r12d - movl %r11d,%r14d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,36(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - movl 44(%rsp),%r13d - movl 32(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r10d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 12(%rsp),%r12d - - addl 40(%rsp),%r12d - movl %ecx,%r13d - addl %r15d,%r12d - movl %r10d,%r14d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,40(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - movl 48(%rsp),%r13d - movl 36(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r9d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 16(%rsp),%r12d - - addl 44(%rsp),%r12d - movl %ebx,%r13d - addl %edi,%r12d - movl %r9d,%r14d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,44(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - movl 52(%rsp),%r13d - movl 40(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r8d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 20(%rsp),%r12d - - addl 48(%rsp),%r12d - movl %eax,%r13d - addl %r15d,%r12d - movl %r8d,%r14d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,48(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - movl 56(%rsp),%r13d - movl 44(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%edx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 24(%rsp),%r12d - - addl 52(%rsp),%r12d - movl %r11d,%r13d - addl %edi,%r12d - movl %edx,%r14d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,52(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - movl 60(%rsp),%r13d - movl 48(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ecx - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 28(%rsp),%r12d - - addl 56(%rsp),%r12d - movl %r10d,%r13d - addl %r15d,%r12d - movl %ecx,%r14d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,56(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - movl 0(%rsp),%r13d - movl 52(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ebx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 32(%rsp),%r12d - - addl 60(%rsp),%r12d - movl %r9d,%r13d - addl %edi,%r12d - movl %ebx,%r14d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,60(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - cmpb $0,3(%rbp) - jnz .Lrounds_16_xx - - movq 64+0(%rsp),%rdi - addl %r14d,%eax - leaq 64(%rsi),%rsi - - addl 0(%rdi),%eax - addl 4(%rdi),%ebx - addl 8(%rdi),%ecx - addl 12(%rdi),%edx - addl 16(%rdi),%r8d - addl 20(%rdi),%r9d - addl 24(%rdi),%r10d - addl 28(%rdi),%r11d - - cmpq 64+16(%rsp),%rsi - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - jb .Lloop - - movq 88(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha256_block_data_order,.-sha256_block_data_order -.align 64 -.type K256,@object -K256: -.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 -.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff -.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff -.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 -.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.type sha256_block_data_order_ssse3,@function -.align 64 -sha256_block_data_order_ssse3: -.cfi_startproc -.Lssse3_shortcut: - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - shlq $4,%rdx - subq $96,%rsp - leaq (%rsi,%rdx,4),%rdx - andq $-64,%rsp - movq %rdi,64+0(%rsp) - movq %rsi,64+8(%rsp) - movq %rdx,64+16(%rsp) - movq %rax,88(%rsp) -.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 -.Lprologue_ssse3: - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - movl 16(%rdi),%r8d - movl 20(%rdi),%r9d - movl 24(%rdi),%r10d - movl 28(%rdi),%r11d - - - jmp .Lloop_ssse3 -.align 16 -.Lloop_ssse3: - movdqa K256+512(%rip),%xmm7 - movdqu 0(%rsi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 -.byte 102,15,56,0,199 - movdqu 48(%rsi),%xmm3 - leaq K256(%rip),%rbp -.byte 102,15,56,0,207 - movdqa 0(%rbp),%xmm4 - movdqa 32(%rbp),%xmm5 -.byte 102,15,56,0,215 - paddd %xmm0,%xmm4 - movdqa 64(%rbp),%xmm6 -.byte 102,15,56,0,223 - movdqa 96(%rbp),%xmm7 - paddd %xmm1,%xmm5 - paddd %xmm2,%xmm6 - paddd %xmm3,%xmm7 - movdqa %xmm4,0(%rsp) - movl %eax,%r14d - movdqa %xmm5,16(%rsp) - movl %ebx,%edi - movdqa %xmm6,32(%rsp) - xorl %ecx,%edi - movdqa %xmm7,48(%rsp) - movl %r8d,%r13d - jmp .Lssse3_00_47 - -.align 16 -.Lssse3_00_47: - subq $-128,%rbp - rorl $14,%r13d - movdqa %xmm1,%xmm4 - movl %r14d,%eax - movl %r9d,%r12d - movdqa %xmm3,%xmm7 - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d -.byte 102,15,58,15,224,4 - andl %r8d,%r12d - xorl %r8d,%r13d -.byte 102,15,58,15,250,4 - addl 0(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %ebx,%r15d - addl %r12d,%r11d - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - paddd %xmm7,%xmm0 - rorl $2,%r14d - addl %r11d,%edx - psrld $7,%xmm6 - addl %edi,%r11d - movl %edx,%r13d - pshufd $250,%xmm3,%xmm7 - addl %r11d,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%r11d - movl %r8d,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %r11d,%r14d - pxor %xmm5,%xmm4 - andl %edx,%r12d - xorl %edx,%r13d - pslld $11,%xmm5 - addl 4(%rsp),%r10d - movl %r11d,%edi - pxor %xmm6,%xmm4 - xorl %r9d,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %eax,%edi - addl %r12d,%r10d - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - psrld $10,%xmm7 - addl %r13d,%r10d - xorl %eax,%r15d - paddd %xmm4,%xmm0 - rorl $2,%r14d - addl %r10d,%ecx - psrlq $17,%xmm6 - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %ecx,%r13d - xorl %r8d,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %ecx,%r13d - addl 8(%rsp),%r9d - movl %r10d,%r15d - psrldq $8,%xmm7 - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - paddd %xmm7,%xmm0 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - pshufd $80,%xmm0,%xmm7 - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - movdqa %xmm7,%xmm6 - addl %edi,%r9d - movl %ebx,%r13d - psrld $10,%xmm7 - addl %r9d,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%r9d - movl %ecx,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - psrlq $2,%xmm6 - andl %ebx,%r12d - xorl %ebx,%r13d - addl 12(%rsp),%r8d - pxor %xmm6,%xmm7 - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %r10d,%edi - addl %r12d,%r8d - movdqa 0(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - paddd %xmm7,%xmm0 - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - paddd %xmm0,%xmm6 - movl %eax,%r13d - addl %r8d,%r14d - movdqa %xmm6,0(%rsp) - rorl $14,%r13d - movdqa %xmm2,%xmm4 - movl %r14d,%r8d - movl %ebx,%r12d - movdqa %xmm0,%xmm7 - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d -.byte 102,15,58,15,225,4 - andl %eax,%r12d - xorl %eax,%r13d -.byte 102,15,58,15,251,4 - addl 16(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %r9d,%r15d - addl %r12d,%edx - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - paddd %xmm7,%xmm1 - rorl $2,%r14d - addl %edx,%r11d - psrld $7,%xmm6 - addl %edi,%edx - movl %r11d,%r13d - pshufd $250,%xmm0,%xmm7 - addl %edx,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%edx - movl %eax,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %edx,%r14d - pxor %xmm5,%xmm4 - andl %r11d,%r12d - xorl %r11d,%r13d - pslld $11,%xmm5 - addl 20(%rsp),%ecx - movl %edx,%edi - pxor %xmm6,%xmm4 - xorl %ebx,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %r8d,%edi - addl %r12d,%ecx - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - psrld $10,%xmm7 - addl %r13d,%ecx - xorl %r8d,%r15d - paddd %xmm4,%xmm1 - rorl $2,%r14d - addl %ecx,%r10d - psrlq $17,%xmm6 - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %r10d,%r13d - xorl %eax,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %r10d,%r13d - addl 24(%rsp),%ebx - movl %ecx,%r15d - psrldq $8,%xmm7 - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - paddd %xmm7,%xmm1 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - pshufd $80,%xmm1,%xmm7 - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - movdqa %xmm7,%xmm6 - addl %edi,%ebx - movl %r9d,%r13d - psrld $10,%xmm7 - addl %ebx,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%ebx - movl %r10d,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - psrlq $2,%xmm6 - andl %r9d,%r12d - xorl %r9d,%r13d - addl 28(%rsp),%eax - pxor %xmm6,%xmm7 - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %ecx,%edi - addl %r12d,%eax - movdqa 32(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - paddd %xmm7,%xmm1 - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - paddd %xmm1,%xmm6 - movl %r8d,%r13d - addl %eax,%r14d - movdqa %xmm6,16(%rsp) - rorl $14,%r13d - movdqa %xmm3,%xmm4 - movl %r14d,%eax - movl %r9d,%r12d - movdqa %xmm1,%xmm7 - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d -.byte 102,15,58,15,226,4 - andl %r8d,%r12d - xorl %r8d,%r13d -.byte 102,15,58,15,248,4 - addl 32(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %ebx,%r15d - addl %r12d,%r11d - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - paddd %xmm7,%xmm2 - rorl $2,%r14d - addl %r11d,%edx - psrld $7,%xmm6 - addl %edi,%r11d - movl %edx,%r13d - pshufd $250,%xmm1,%xmm7 - addl %r11d,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%r11d - movl %r8d,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %r11d,%r14d - pxor %xmm5,%xmm4 - andl %edx,%r12d - xorl %edx,%r13d - pslld $11,%xmm5 - addl 36(%rsp),%r10d - movl %r11d,%edi - pxor %xmm6,%xmm4 - xorl %r9d,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %eax,%edi - addl %r12d,%r10d - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - psrld $10,%xmm7 - addl %r13d,%r10d - xorl %eax,%r15d - paddd %xmm4,%xmm2 - rorl $2,%r14d - addl %r10d,%ecx - psrlq $17,%xmm6 - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %ecx,%r13d - xorl %r8d,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %ecx,%r13d - addl 40(%rsp),%r9d - movl %r10d,%r15d - psrldq $8,%xmm7 - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - paddd %xmm7,%xmm2 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - pshufd $80,%xmm2,%xmm7 - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - movdqa %xmm7,%xmm6 - addl %edi,%r9d - movl %ebx,%r13d - psrld $10,%xmm7 - addl %r9d,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%r9d - movl %ecx,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - psrlq $2,%xmm6 - andl %ebx,%r12d - xorl %ebx,%r13d - addl 44(%rsp),%r8d - pxor %xmm6,%xmm7 - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %r10d,%edi - addl %r12d,%r8d - movdqa 64(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - paddd %xmm7,%xmm2 - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - paddd %xmm2,%xmm6 - movl %eax,%r13d - addl %r8d,%r14d - movdqa %xmm6,32(%rsp) - rorl $14,%r13d - movdqa %xmm0,%xmm4 - movl %r14d,%r8d - movl %ebx,%r12d - movdqa %xmm2,%xmm7 - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d -.byte 102,15,58,15,227,4 - andl %eax,%r12d - xorl %eax,%r13d -.byte 102,15,58,15,249,4 - addl 48(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %r9d,%r15d - addl %r12d,%edx - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - paddd %xmm7,%xmm3 - rorl $2,%r14d - addl %edx,%r11d - psrld $7,%xmm6 - addl %edi,%edx - movl %r11d,%r13d - pshufd $250,%xmm2,%xmm7 - addl %edx,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%edx - movl %eax,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %edx,%r14d - pxor %xmm5,%xmm4 - andl %r11d,%r12d - xorl %r11d,%r13d - pslld $11,%xmm5 - addl 52(%rsp),%ecx - movl %edx,%edi - pxor %xmm6,%xmm4 - xorl %ebx,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %r8d,%edi - addl %r12d,%ecx - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - psrld $10,%xmm7 - addl %r13d,%ecx - xorl %r8d,%r15d - paddd %xmm4,%xmm3 - rorl $2,%r14d - addl %ecx,%r10d - psrlq $17,%xmm6 - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %r10d,%r13d - xorl %eax,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %r10d,%r13d - addl 56(%rsp),%ebx - movl %ecx,%r15d - psrldq $8,%xmm7 - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - paddd %xmm7,%xmm3 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - pshufd $80,%xmm3,%xmm7 - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - movdqa %xmm7,%xmm6 - addl %edi,%ebx - movl %r9d,%r13d - psrld $10,%xmm7 - addl %ebx,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%ebx - movl %r10d,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - psrlq $2,%xmm6 - andl %r9d,%r12d - xorl %r9d,%r13d - addl 60(%rsp),%eax - pxor %xmm6,%xmm7 - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %ecx,%edi - addl %r12d,%eax - movdqa 96(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - paddd %xmm7,%xmm3 - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - paddd %xmm3,%xmm6 - movl %r8d,%r13d - addl %eax,%r14d - movdqa %xmm6,48(%rsp) - cmpb $0,131(%rbp) - jne .Lssse3_00_47 - rorl $14,%r13d - movl %r14d,%eax - movl %r9d,%r12d - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 0(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - rorl $6,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - rorl $2,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - rorl $14,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 4(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - rorl $11,%r14d - xorl %eax,%edi - addl %r12d,%r10d - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - rorl $2,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 8(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - rorl $14,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 12(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - rorl $6,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - rorl $14,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 16(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - rorl $6,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - rorl $2,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - rorl $14,%r13d - movl %r14d,%edx - movl %eax,%r12d - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 20(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - rorl $11,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - rorl $2,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 24(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - rorl $14,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 28(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - xorl %ecx,%edi - addl %r12d,%eax - rorl $6,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - rorl $14,%r13d - movl %r14d,%eax - movl %r9d,%r12d - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 32(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - rorl $6,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - rorl $2,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - rorl $14,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 36(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - rorl $11,%r14d - xorl %eax,%edi - addl %r12d,%r10d - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - rorl $2,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 40(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - rorl $14,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 44(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - rorl $6,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - rorl $14,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 48(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - rorl $6,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - rorl $2,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - rorl $14,%r13d - movl %r14d,%edx - movl %eax,%r12d - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 52(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - rorl $11,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - rorl $2,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 56(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - rorl $14,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 60(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - xorl %ecx,%edi - addl %r12d,%eax - rorl $6,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - movq 64+0(%rsp),%rdi - movl %r14d,%eax - - addl 0(%rdi),%eax - leaq 64(%rsi),%rsi - addl 4(%rdi),%ebx - addl 8(%rdi),%ecx - addl 12(%rdi),%edx - addl 16(%rdi),%r8d - addl 20(%rdi),%r9d - addl 24(%rdi),%r10d - addl 28(%rdi),%r11d - - cmpq 64+16(%rsp),%rsi - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - jb .Lloop_ssse3 - - movq 88(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue_ssse3: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3 -.type sha256_block_data_order_avx,@function -.align 64 -sha256_block_data_order_avx: -.cfi_startproc -.Lavx_shortcut: - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - shlq $4,%rdx - subq $96,%rsp - leaq (%rsi,%rdx,4),%rdx - andq $-64,%rsp - movq %rdi,64+0(%rsp) - movq %rsi,64+8(%rsp) - movq %rdx,64+16(%rsp) - movq %rax,88(%rsp) -.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 -.Lprologue_avx: - - vzeroupper - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - movl 16(%rdi),%r8d - movl 20(%rdi),%r9d - movl 24(%rdi),%r10d - movl 28(%rdi),%r11d - vmovdqa K256+512+32(%rip),%xmm8 - vmovdqa K256+512+64(%rip),%xmm9 - jmp .Lloop_avx -.align 16 -.Lloop_avx: - vmovdqa K256+512(%rip),%xmm7 - vmovdqu 0(%rsi),%xmm0 - vmovdqu 16(%rsi),%xmm1 - vmovdqu 32(%rsi),%xmm2 - vmovdqu 48(%rsi),%xmm3 - vpshufb %xmm7,%xmm0,%xmm0 - leaq K256(%rip),%rbp - vpshufb %xmm7,%xmm1,%xmm1 - vpshufb %xmm7,%xmm2,%xmm2 - vpaddd 0(%rbp),%xmm0,%xmm4 - vpshufb %xmm7,%xmm3,%xmm3 - vpaddd 32(%rbp),%xmm1,%xmm5 - vpaddd 64(%rbp),%xmm2,%xmm6 - vpaddd 96(%rbp),%xmm3,%xmm7 - vmovdqa %xmm4,0(%rsp) - movl %eax,%r14d - vmovdqa %xmm5,16(%rsp) - movl %ebx,%edi - vmovdqa %xmm6,32(%rsp) - xorl %ecx,%edi - vmovdqa %xmm7,48(%rsp) - movl %r8d,%r13d - jmp .Lavx_00_47 - -.align 16 -.Lavx_00_47: - subq $-128,%rbp - vpalignr $4,%xmm0,%xmm1,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - vpalignr $4,%xmm2,%xmm3,%xmm7 - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - vpaddd %xmm7,%xmm0,%xmm0 - xorl %r8d,%r13d - addl 0(%rsp),%r11d - movl %eax,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - vpshufd $250,%xmm3,%xmm7 - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - vpsrld $11,%xmm6,%xmm6 - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 4(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - vpaddd %xmm4,%xmm0,%xmm0 - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - vpxor %xmm7,%xmm6,%xmm6 - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - vpaddd %xmm6,%xmm0,%xmm0 - andl %ecx,%r12d - xorl %ecx,%r13d - addl 8(%rsp),%r9d - vpshufd $80,%xmm0,%xmm7 - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - vpxor %xmm7,%xmm6,%xmm6 - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - vpaddd %xmm6,%xmm0,%xmm0 - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - vpaddd 0(%rbp),%xmm0,%xmm6 - xorl %ebx,%r13d - addl 12(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - vmovdqa %xmm6,0(%rsp) - vpalignr $4,%xmm1,%xmm2,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - vpalignr $4,%xmm3,%xmm0,%xmm7 - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - vpaddd %xmm7,%xmm1,%xmm1 - xorl %eax,%r13d - addl 16(%rsp),%edx - movl %r8d,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - vpshufd $250,%xmm0,%xmm7 - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - vpsrld $11,%xmm6,%xmm6 - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 20(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - vpaddd %xmm4,%xmm1,%xmm1 - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - vpxor %xmm7,%xmm6,%xmm6 - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - vpaddd %xmm6,%xmm1,%xmm1 - andl %r10d,%r12d - xorl %r10d,%r13d - addl 24(%rsp),%ebx - vpshufd $80,%xmm1,%xmm7 - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - vpxor %xmm7,%xmm6,%xmm6 - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - vpaddd %xmm6,%xmm1,%xmm1 - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - vpaddd 32(%rbp),%xmm1,%xmm6 - xorl %r9d,%r13d - addl 28(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - vmovdqa %xmm6,16(%rsp) - vpalignr $4,%xmm2,%xmm3,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - vpalignr $4,%xmm0,%xmm1,%xmm7 - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - vpaddd %xmm7,%xmm2,%xmm2 - xorl %r8d,%r13d - addl 32(%rsp),%r11d - movl %eax,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - vpshufd $250,%xmm1,%xmm7 - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - vpsrld $11,%xmm6,%xmm6 - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 36(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - vpaddd %xmm4,%xmm2,%xmm2 - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - vpxor %xmm7,%xmm6,%xmm6 - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - vpaddd %xmm6,%xmm2,%xmm2 - andl %ecx,%r12d - xorl %ecx,%r13d - addl 40(%rsp),%r9d - vpshufd $80,%xmm2,%xmm7 - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - vpxor %xmm7,%xmm6,%xmm6 - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - vpaddd %xmm6,%xmm2,%xmm2 - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - vpaddd 64(%rbp),%xmm2,%xmm6 - xorl %ebx,%r13d - addl 44(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - vmovdqa %xmm6,32(%rsp) - vpalignr $4,%xmm3,%xmm0,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - vpalignr $4,%xmm1,%xmm2,%xmm7 - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - vpaddd %xmm7,%xmm3,%xmm3 - xorl %eax,%r13d - addl 48(%rsp),%edx - movl %r8d,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - vpshufd $250,%xmm2,%xmm7 - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - vpsrld $11,%xmm6,%xmm6 - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 52(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - vpaddd %xmm4,%xmm3,%xmm3 - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - vpxor %xmm7,%xmm6,%xmm6 - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - vpaddd %xmm6,%xmm3,%xmm3 - andl %r10d,%r12d - xorl %r10d,%r13d - addl 56(%rsp),%ebx - vpshufd $80,%xmm3,%xmm7 - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - vpxor %xmm7,%xmm6,%xmm6 - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - vpaddd %xmm6,%xmm3,%xmm3 - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - vpaddd 96(%rbp),%xmm3,%xmm6 - xorl %r9d,%r13d - addl 60(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - vmovdqa %xmm6,48(%rsp) - cmpb $0,131(%rbp) - jne .Lavx_00_47 - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 0(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 4(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 8(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 12(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 16(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 20(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 24(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 28(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 32(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 36(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 40(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 44(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 48(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 52(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 56(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 60(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - movq 64+0(%rsp),%rdi - movl %r14d,%eax - - addl 0(%rdi),%eax - leaq 64(%rsi),%rsi - addl 4(%rdi),%ebx - addl 8(%rdi),%ecx - addl 12(%rdi),%edx - addl 16(%rdi),%r8d - addl 20(%rdi),%r9d - addl 24(%rdi),%r10d - addl 28(%rdi),%r11d - - cmpq 64+16(%rsp),%rsi - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - jb .Lloop_avx - - movq 88(%rsp),%rsi -.cfi_def_cfa %rsi,8 - vzeroupper - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue_avx: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha256_block_data_order_avx,.-sha256_block_data_order_avx -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha512-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha512-x86_64.S deleted file mode 100644 index afc47f139b..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/sha512-x86_64.S +++ /dev/null @@ -1,2992 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P -.globl sha512_block_data_order -.hidden sha512_block_data_order -.type sha512_block_data_order,@function -.align 16 -sha512_block_data_order: -.cfi_startproc - leaq OPENSSL_ia32cap_P(%rip),%r11 - movl 0(%r11),%r9d - movl 4(%r11),%r10d - movl 8(%r11),%r11d - andl $1073741824,%r9d - andl $268435968,%r10d - orl %r9d,%r10d - cmpl $1342177792,%r10d - je .Lavx_shortcut - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - shlq $4,%rdx - subq $128+32,%rsp - leaq (%rsi,%rdx,8),%rdx - andq $-64,%rsp - movq %rdi,128+0(%rsp) - movq %rsi,128+8(%rsp) - movq %rdx,128+16(%rsp) - movq %rax,152(%rsp) -.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 -.Lprologue: - - movq 0(%rdi),%rax - movq 8(%rdi),%rbx - movq 16(%rdi),%rcx - movq 24(%rdi),%rdx - movq 32(%rdi),%r8 - movq 40(%rdi),%r9 - movq 48(%rdi),%r10 - movq 56(%rdi),%r11 - jmp .Lloop - -.align 16 -.Lloop: - movq %rbx,%rdi - leaq K512(%rip),%rbp - xorq %rcx,%rdi - movq 0(%rsi),%r12 - movq %r8,%r13 - movq %rax,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,0(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - addq %r14,%r11 - movq 8(%rsi),%r12 - movq %rdx,%r13 - movq %r11,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,8(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - addq %r14,%r10 - movq 16(%rsi),%r12 - movq %rcx,%r13 - movq %r10,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,16(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - addq %r14,%r9 - movq 24(%rsi),%r12 - movq %rbx,%r13 - movq %r9,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,24(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - addq %r14,%r8 - movq 32(%rsi),%r12 - movq %rax,%r13 - movq %r8,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,32(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - addq %r14,%rdx - movq 40(%rsi),%r12 - movq %r11,%r13 - movq %rdx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,40(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - addq %r14,%rcx - movq 48(%rsi),%r12 - movq %r10,%r13 - movq %rcx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,48(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - addq %r14,%rbx - movq 56(%rsi),%r12 - movq %r9,%r13 - movq %rbx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,56(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - addq %r14,%rax - movq 64(%rsi),%r12 - movq %r8,%r13 - movq %rax,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,64(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - addq %r14,%r11 - movq 72(%rsi),%r12 - movq %rdx,%r13 - movq %r11,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,72(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - addq %r14,%r10 - movq 80(%rsi),%r12 - movq %rcx,%r13 - movq %r10,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,80(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - addq %r14,%r9 - movq 88(%rsi),%r12 - movq %rbx,%r13 - movq %r9,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,88(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - addq %r14,%r8 - movq 96(%rsi),%r12 - movq %rax,%r13 - movq %r8,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,96(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - addq %r14,%rdx - movq 104(%rsi),%r12 - movq %r11,%r13 - movq %rdx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,104(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - addq %r14,%rcx - movq 112(%rsi),%r12 - movq %r10,%r13 - movq %rcx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,112(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - addq %r14,%rbx - movq 120(%rsi),%r12 - movq %r9,%r13 - movq %rbx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,120(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - jmp .Lrounds_16_xx -.align 16 -.Lrounds_16_xx: - movq 8(%rsp),%r13 - movq 112(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rax - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 72(%rsp),%r12 - - addq 0(%rsp),%r12 - movq %r8,%r13 - addq %r15,%r12 - movq %rax,%r14 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,0(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - movq 16(%rsp),%r13 - movq 120(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r11 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 80(%rsp),%r12 - - addq 8(%rsp),%r12 - movq %rdx,%r13 - addq %rdi,%r12 - movq %r11,%r14 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,8(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - movq 24(%rsp),%r13 - movq 0(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r10 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 88(%rsp),%r12 - - addq 16(%rsp),%r12 - movq %rcx,%r13 - addq %r15,%r12 - movq %r10,%r14 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,16(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - movq 32(%rsp),%r13 - movq 8(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r9 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 96(%rsp),%r12 - - addq 24(%rsp),%r12 - movq %rbx,%r13 - addq %rdi,%r12 - movq %r9,%r14 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,24(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - movq 40(%rsp),%r13 - movq 16(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r8 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 104(%rsp),%r12 - - addq 32(%rsp),%r12 - movq %rax,%r13 - addq %r15,%r12 - movq %r8,%r14 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,32(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - movq 48(%rsp),%r13 - movq 24(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rdx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 112(%rsp),%r12 - - addq 40(%rsp),%r12 - movq %r11,%r13 - addq %rdi,%r12 - movq %rdx,%r14 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,40(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - movq 56(%rsp),%r13 - movq 32(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rcx - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 120(%rsp),%r12 - - addq 48(%rsp),%r12 - movq %r10,%r13 - addq %r15,%r12 - movq %rcx,%r14 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,48(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - movq 64(%rsp),%r13 - movq 40(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rbx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 0(%rsp),%r12 - - addq 56(%rsp),%r12 - movq %r9,%r13 - addq %rdi,%r12 - movq %rbx,%r14 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,56(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - movq 72(%rsp),%r13 - movq 48(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rax - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 8(%rsp),%r12 - - addq 64(%rsp),%r12 - movq %r8,%r13 - addq %r15,%r12 - movq %rax,%r14 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,64(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - movq 80(%rsp),%r13 - movq 56(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r11 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 16(%rsp),%r12 - - addq 72(%rsp),%r12 - movq %rdx,%r13 - addq %rdi,%r12 - movq %r11,%r14 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,72(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - movq 88(%rsp),%r13 - movq 64(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r10 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 24(%rsp),%r12 - - addq 80(%rsp),%r12 - movq %rcx,%r13 - addq %r15,%r12 - movq %r10,%r14 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,80(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - movq 96(%rsp),%r13 - movq 72(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r9 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 32(%rsp),%r12 - - addq 88(%rsp),%r12 - movq %rbx,%r13 - addq %rdi,%r12 - movq %r9,%r14 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,88(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - movq 104(%rsp),%r13 - movq 80(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r8 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 40(%rsp),%r12 - - addq 96(%rsp),%r12 - movq %rax,%r13 - addq %r15,%r12 - movq %r8,%r14 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,96(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - movq 112(%rsp),%r13 - movq 88(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rdx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 48(%rsp),%r12 - - addq 104(%rsp),%r12 - movq %r11,%r13 - addq %rdi,%r12 - movq %rdx,%r14 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,104(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - movq 120(%rsp),%r13 - movq 96(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rcx - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 56(%rsp),%r12 - - addq 112(%rsp),%r12 - movq %r10,%r13 - addq %r15,%r12 - movq %rcx,%r14 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,112(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - movq 0(%rsp),%r13 - movq 104(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rbx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 64(%rsp),%r12 - - addq 120(%rsp),%r12 - movq %r9,%r13 - addq %rdi,%r12 - movq %rbx,%r14 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,120(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - cmpb $0,7(%rbp) - jnz .Lrounds_16_xx - - movq 128+0(%rsp),%rdi - addq %r14,%rax - leaq 128(%rsi),%rsi - - addq 0(%rdi),%rax - addq 8(%rdi),%rbx - addq 16(%rdi),%rcx - addq 24(%rdi),%rdx - addq 32(%rdi),%r8 - addq 40(%rdi),%r9 - addq 48(%rdi),%r10 - addq 56(%rdi),%r11 - - cmpq 128+16(%rsp),%rsi - - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,16(%rdi) - movq %rdx,24(%rdi) - movq %r8,32(%rdi) - movq %r9,40(%rdi) - movq %r10,48(%rdi) - movq %r11,56(%rdi) - jb .Lloop - - movq 152(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha512_block_data_order,.-sha512_block_data_order -.align 64 -.type K512,@object -K512: -.quad 0x428a2f98d728ae22,0x7137449123ef65cd -.quad 0x428a2f98d728ae22,0x7137449123ef65cd -.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc -.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc -.quad 0x3956c25bf348b538,0x59f111f1b605d019 -.quad 0x3956c25bf348b538,0x59f111f1b605d019 -.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 -.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 -.quad 0xd807aa98a3030242,0x12835b0145706fbe -.quad 0xd807aa98a3030242,0x12835b0145706fbe -.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 -.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 -.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 -.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 -.quad 0x9bdc06a725c71235,0xc19bf174cf692694 -.quad 0x9bdc06a725c71235,0xc19bf174cf692694 -.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 -.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 -.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 -.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 -.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 -.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 -.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 -.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 -.quad 0x983e5152ee66dfab,0xa831c66d2db43210 -.quad 0x983e5152ee66dfab,0xa831c66d2db43210 -.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 -.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 -.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 -.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 -.quad 0x06ca6351e003826f,0x142929670a0e6e70 -.quad 0x06ca6351e003826f,0x142929670a0e6e70 -.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 -.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 -.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df -.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df -.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 -.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 -.quad 0x81c2c92e47edaee6,0x92722c851482353b -.quad 0x81c2c92e47edaee6,0x92722c851482353b -.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 -.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 -.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 -.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 -.quad 0xd192e819d6ef5218,0xd69906245565a910 -.quad 0xd192e819d6ef5218,0xd69906245565a910 -.quad 0xf40e35855771202a,0x106aa07032bbd1b8 -.quad 0xf40e35855771202a,0x106aa07032bbd1b8 -.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 -.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 -.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 -.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 -.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb -.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb -.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 -.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 -.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 -.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 -.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec -.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec -.quad 0x90befffa23631e28,0xa4506cebde82bde9 -.quad 0x90befffa23631e28,0xa4506cebde82bde9 -.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b -.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b -.quad 0xca273eceea26619c,0xd186b8c721c0c207 -.quad 0xca273eceea26619c,0xd186b8c721c0c207 -.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 -.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 -.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 -.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 -.quad 0x113f9804bef90dae,0x1b710b35131c471b -.quad 0x113f9804bef90dae,0x1b710b35131c471b -.quad 0x28db77f523047d84,0x32caab7b40c72493 -.quad 0x28db77f523047d84,0x32caab7b40c72493 -.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c -.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c -.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a -.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a -.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 -.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 - -.quad 0x0001020304050607,0x08090a0b0c0d0e0f -.quad 0x0001020304050607,0x08090a0b0c0d0e0f -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.type sha512_block_data_order_avx,@function -.align 64 -sha512_block_data_order_avx: -.cfi_startproc -.Lavx_shortcut: - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - shlq $4,%rdx - subq $160,%rsp - leaq (%rsi,%rdx,8),%rdx - andq $-64,%rsp - movq %rdi,128+0(%rsp) - movq %rsi,128+8(%rsp) - movq %rdx,128+16(%rsp) - movq %rax,152(%rsp) -.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 -.Lprologue_avx: - - vzeroupper - movq 0(%rdi),%rax - movq 8(%rdi),%rbx - movq 16(%rdi),%rcx - movq 24(%rdi),%rdx - movq 32(%rdi),%r8 - movq 40(%rdi),%r9 - movq 48(%rdi),%r10 - movq 56(%rdi),%r11 - jmp .Lloop_avx -.align 16 -.Lloop_avx: - vmovdqa K512+1280(%rip),%xmm11 - vmovdqu 0(%rsi),%xmm0 - leaq K512+128(%rip),%rbp - vmovdqu 16(%rsi),%xmm1 - vmovdqu 32(%rsi),%xmm2 - vpshufb %xmm11,%xmm0,%xmm0 - vmovdqu 48(%rsi),%xmm3 - vpshufb %xmm11,%xmm1,%xmm1 - vmovdqu 64(%rsi),%xmm4 - vpshufb %xmm11,%xmm2,%xmm2 - vmovdqu 80(%rsi),%xmm5 - vpshufb %xmm11,%xmm3,%xmm3 - vmovdqu 96(%rsi),%xmm6 - vpshufb %xmm11,%xmm4,%xmm4 - vmovdqu 112(%rsi),%xmm7 - vpshufb %xmm11,%xmm5,%xmm5 - vpaddq -128(%rbp),%xmm0,%xmm8 - vpshufb %xmm11,%xmm6,%xmm6 - vpaddq -96(%rbp),%xmm1,%xmm9 - vpshufb %xmm11,%xmm7,%xmm7 - vpaddq -64(%rbp),%xmm2,%xmm10 - vpaddq -32(%rbp),%xmm3,%xmm11 - vmovdqa %xmm8,0(%rsp) - vpaddq 0(%rbp),%xmm4,%xmm8 - vmovdqa %xmm9,16(%rsp) - vpaddq 32(%rbp),%xmm5,%xmm9 - vmovdqa %xmm10,32(%rsp) - vpaddq 64(%rbp),%xmm6,%xmm10 - vmovdqa %xmm11,48(%rsp) - vpaddq 96(%rbp),%xmm7,%xmm11 - vmovdqa %xmm8,64(%rsp) - movq %rax,%r14 - vmovdqa %xmm9,80(%rsp) - movq %rbx,%rdi - vmovdqa %xmm10,96(%rsp) - xorq %rcx,%rdi - vmovdqa %xmm11,112(%rsp) - movq %r8,%r13 - jmp .Lavx_00_47 - -.align 16 -.Lavx_00_47: - addq $256,%rbp - vpalignr $8,%xmm0,%xmm1,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rax - vpalignr $8,%xmm4,%xmm5,%xmm11 - movq %r9,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r8,%r13 - xorq %r10,%r12 - vpaddq %xmm11,%xmm0,%xmm0 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r8,%r12 - xorq %r8,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 0(%rsp),%r11 - movq %rax,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rbx,%r15 - addq %r12,%r11 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rax,%r14 - addq %r13,%r11 - vpxor %xmm10,%xmm8,%xmm8 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm7,%xmm11 - addq %r11,%rdx - addq %rdi,%r11 - vpxor %xmm9,%xmm8,%xmm8 - movq %rdx,%r13 - addq %r11,%r14 - vpsllq $3,%xmm7,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r11 - vpaddq %xmm8,%xmm0,%xmm0 - movq %r8,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm7,%xmm9 - xorq %rdx,%r13 - xorq %r9,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rdx,%r12 - xorq %rdx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 8(%rsp),%r10 - movq %r11,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r9,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rax,%rdi - addq %r12,%r10 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm0,%xmm0 - xorq %r11,%r14 - addq %r13,%r10 - vpaddq -128(%rbp),%xmm0,%xmm10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - vmovdqa %xmm10,0(%rsp) - vpalignr $8,%xmm1,%xmm2,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r10 - vpalignr $8,%xmm5,%xmm6,%xmm11 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rcx,%r13 - xorq %r8,%r12 - vpaddq %xmm11,%xmm1,%xmm1 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rcx,%r12 - xorq %rcx,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 16(%rsp),%r9 - movq %r10,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r11,%r15 - addq %r12,%r9 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r10,%r14 - addq %r13,%r9 - vpxor %xmm10,%xmm8,%xmm8 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm0,%xmm11 - addq %r9,%rbx - addq %rdi,%r9 - vpxor %xmm9,%xmm8,%xmm8 - movq %rbx,%r13 - addq %r9,%r14 - vpsllq $3,%xmm0,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r9 - vpaddq %xmm8,%xmm1,%xmm1 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm0,%xmm9 - xorq %rbx,%r13 - xorq %rdx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rbx,%r12 - xorq %rbx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 24(%rsp),%r8 - movq %r9,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r10,%rdi - addq %r12,%r8 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm1,%xmm1 - xorq %r9,%r14 - addq %r13,%r8 - vpaddq -96(%rbp),%xmm1,%xmm10 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - vmovdqa %xmm10,16(%rsp) - vpalignr $8,%xmm2,%xmm3,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r8 - vpalignr $8,%xmm6,%xmm7,%xmm11 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rax,%r13 - xorq %rcx,%r12 - vpaddq %xmm11,%xmm2,%xmm2 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rax,%r12 - xorq %rax,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 32(%rsp),%rdx - movq %r8,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r9,%r15 - addq %r12,%rdx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r8,%r14 - addq %r13,%rdx - vpxor %xmm10,%xmm8,%xmm8 - xorq %r9,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm1,%xmm11 - addq %rdx,%r11 - addq %rdi,%rdx - vpxor %xmm9,%xmm8,%xmm8 - movq %r11,%r13 - addq %rdx,%r14 - vpsllq $3,%xmm1,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rdx - vpaddq %xmm8,%xmm2,%xmm2 - movq %rax,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm1,%xmm9 - xorq %r11,%r13 - xorq %rbx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r11,%r12 - xorq %r11,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 40(%rsp),%rcx - movq %rdx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r8,%rdi - addq %r12,%rcx - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm2,%xmm2 - xorq %rdx,%r14 - addq %r13,%rcx - vpaddq -64(%rbp),%xmm2,%xmm10 - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - vmovdqa %xmm10,32(%rsp) - vpalignr $8,%xmm3,%xmm4,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rcx - vpalignr $8,%xmm7,%xmm0,%xmm11 - movq %r11,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r10,%r13 - xorq %rax,%r12 - vpaddq %xmm11,%xmm3,%xmm3 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r10,%r12 - xorq %r10,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 48(%rsp),%rbx - movq %rcx,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rdx,%r15 - addq %r12,%rbx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rcx,%r14 - addq %r13,%rbx - vpxor %xmm10,%xmm8,%xmm8 - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm2,%xmm11 - addq %rbx,%r9 - addq %rdi,%rbx - vpxor %xmm9,%xmm8,%xmm8 - movq %r9,%r13 - addq %rbx,%r14 - vpsllq $3,%xmm2,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rbx - vpaddq %xmm8,%xmm3,%xmm3 - movq %r10,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm2,%xmm9 - xorq %r9,%r13 - xorq %r11,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r9,%r12 - xorq %r9,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 56(%rsp),%rax - movq %rbx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r11,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rcx,%rdi - addq %r12,%rax - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm3,%xmm3 - xorq %rbx,%r14 - addq %r13,%rax - vpaddq -32(%rbp),%xmm3,%xmm10 - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - vmovdqa %xmm10,48(%rsp) - vpalignr $8,%xmm4,%xmm5,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rax - vpalignr $8,%xmm0,%xmm1,%xmm11 - movq %r9,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r8,%r13 - xorq %r10,%r12 - vpaddq %xmm11,%xmm4,%xmm4 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r8,%r12 - xorq %r8,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 64(%rsp),%r11 - movq %rax,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rbx,%r15 - addq %r12,%r11 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rax,%r14 - addq %r13,%r11 - vpxor %xmm10,%xmm8,%xmm8 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm3,%xmm11 - addq %r11,%rdx - addq %rdi,%r11 - vpxor %xmm9,%xmm8,%xmm8 - movq %rdx,%r13 - addq %r11,%r14 - vpsllq $3,%xmm3,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r11 - vpaddq %xmm8,%xmm4,%xmm4 - movq %r8,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm3,%xmm9 - xorq %rdx,%r13 - xorq %r9,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rdx,%r12 - xorq %rdx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 72(%rsp),%r10 - movq %r11,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r9,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rax,%rdi - addq %r12,%r10 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm4,%xmm4 - xorq %r11,%r14 - addq %r13,%r10 - vpaddq 0(%rbp),%xmm4,%xmm10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - vmovdqa %xmm10,64(%rsp) - vpalignr $8,%xmm5,%xmm6,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r10 - vpalignr $8,%xmm1,%xmm2,%xmm11 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rcx,%r13 - xorq %r8,%r12 - vpaddq %xmm11,%xmm5,%xmm5 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rcx,%r12 - xorq %rcx,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 80(%rsp),%r9 - movq %r10,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r11,%r15 - addq %r12,%r9 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r10,%r14 - addq %r13,%r9 - vpxor %xmm10,%xmm8,%xmm8 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm4,%xmm11 - addq %r9,%rbx - addq %rdi,%r9 - vpxor %xmm9,%xmm8,%xmm8 - movq %rbx,%r13 - addq %r9,%r14 - vpsllq $3,%xmm4,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r9 - vpaddq %xmm8,%xmm5,%xmm5 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm4,%xmm9 - xorq %rbx,%r13 - xorq %rdx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rbx,%r12 - xorq %rbx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 88(%rsp),%r8 - movq %r9,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r10,%rdi - addq %r12,%r8 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm5,%xmm5 - xorq %r9,%r14 - addq %r13,%r8 - vpaddq 32(%rbp),%xmm5,%xmm10 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - vmovdqa %xmm10,80(%rsp) - vpalignr $8,%xmm6,%xmm7,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r8 - vpalignr $8,%xmm2,%xmm3,%xmm11 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rax,%r13 - xorq %rcx,%r12 - vpaddq %xmm11,%xmm6,%xmm6 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rax,%r12 - xorq %rax,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 96(%rsp),%rdx - movq %r8,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r9,%r15 - addq %r12,%rdx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r8,%r14 - addq %r13,%rdx - vpxor %xmm10,%xmm8,%xmm8 - xorq %r9,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm5,%xmm11 - addq %rdx,%r11 - addq %rdi,%rdx - vpxor %xmm9,%xmm8,%xmm8 - movq %r11,%r13 - addq %rdx,%r14 - vpsllq $3,%xmm5,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rdx - vpaddq %xmm8,%xmm6,%xmm6 - movq %rax,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm5,%xmm9 - xorq %r11,%r13 - xorq %rbx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r11,%r12 - xorq %r11,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 104(%rsp),%rcx - movq %rdx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r8,%rdi - addq %r12,%rcx - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm6,%xmm6 - xorq %rdx,%r14 - addq %r13,%rcx - vpaddq 64(%rbp),%xmm6,%xmm10 - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - vmovdqa %xmm10,96(%rsp) - vpalignr $8,%xmm7,%xmm0,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rcx - vpalignr $8,%xmm3,%xmm4,%xmm11 - movq %r11,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r10,%r13 - xorq %rax,%r12 - vpaddq %xmm11,%xmm7,%xmm7 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r10,%r12 - xorq %r10,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 112(%rsp),%rbx - movq %rcx,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rdx,%r15 - addq %r12,%rbx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rcx,%r14 - addq %r13,%rbx - vpxor %xmm10,%xmm8,%xmm8 - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm6,%xmm11 - addq %rbx,%r9 - addq %rdi,%rbx - vpxor %xmm9,%xmm8,%xmm8 - movq %r9,%r13 - addq %rbx,%r14 - vpsllq $3,%xmm6,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rbx - vpaddq %xmm8,%xmm7,%xmm7 - movq %r10,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm6,%xmm9 - xorq %r9,%r13 - xorq %r11,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r9,%r12 - xorq %r9,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 120(%rsp),%rax - movq %rbx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r11,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rcx,%rdi - addq %r12,%rax - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm7,%xmm7 - xorq %rbx,%r14 - addq %r13,%rax - vpaddq 96(%rbp),%xmm7,%xmm10 - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - vmovdqa %xmm10,112(%rsp) - cmpb $0,135(%rbp) - jne .Lavx_00_47 - shrdq $23,%r13,%r13 - movq %r14,%rax - movq %r9,%r12 - shrdq $5,%r14,%r14 - xorq %r8,%r13 - xorq %r10,%r12 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - andq %r8,%r12 - xorq %r8,%r13 - addq 0(%rsp),%r11 - movq %rax,%r15 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - xorq %rbx,%r15 - addq %r12,%r11 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rax,%r14 - addq %r13,%r11 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - addq %r11,%rdx - addq %rdi,%r11 - movq %rdx,%r13 - addq %r11,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r11 - movq %r8,%r12 - shrdq $5,%r14,%r14 - xorq %rdx,%r13 - xorq %r9,%r12 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - andq %rdx,%r12 - xorq %rdx,%r13 - addq 8(%rsp),%r10 - movq %r11,%rdi - xorq %r9,%r12 - shrdq $6,%r14,%r14 - xorq %rax,%rdi - addq %r12,%r10 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r11,%r14 - addq %r13,%r10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r10 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - xorq %rcx,%r13 - xorq %r8,%r12 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - andq %rcx,%r12 - xorq %rcx,%r13 - addq 16(%rsp),%r9 - movq %r10,%r15 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - xorq %r11,%r15 - addq %r12,%r9 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r10,%r14 - addq %r13,%r9 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - addq %r9,%rbx - addq %rdi,%r9 - movq %rbx,%r13 - addq %r9,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r9 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - xorq %rbx,%r13 - xorq %rdx,%r12 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - andq %rbx,%r12 - xorq %rbx,%r13 - addq 24(%rsp),%r8 - movq %r9,%rdi - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - xorq %r10,%rdi - addq %r12,%r8 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r9,%r14 - addq %r13,%r8 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r8 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - xorq %rax,%r13 - xorq %rcx,%r12 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - andq %rax,%r12 - xorq %rax,%r13 - addq 32(%rsp),%rdx - movq %r8,%r15 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - xorq %r9,%r15 - addq %r12,%rdx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r8,%r14 - addq %r13,%rdx - xorq %r9,%rdi - shrdq $28,%r14,%r14 - addq %rdx,%r11 - addq %rdi,%rdx - movq %r11,%r13 - addq %rdx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rdx - movq %rax,%r12 - shrdq $5,%r14,%r14 - xorq %r11,%r13 - xorq %rbx,%r12 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - andq %r11,%r12 - xorq %r11,%r13 - addq 40(%rsp),%rcx - movq %rdx,%rdi - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - xorq %r8,%rdi - addq %r12,%rcx - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rdx,%r14 - addq %r13,%rcx - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rcx - movq %r11,%r12 - shrdq $5,%r14,%r14 - xorq %r10,%r13 - xorq %rax,%r12 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - andq %r10,%r12 - xorq %r10,%r13 - addq 48(%rsp),%rbx - movq %rcx,%r15 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - xorq %rdx,%r15 - addq %r12,%rbx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rcx,%r14 - addq %r13,%rbx - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - addq %rbx,%r9 - addq %rdi,%rbx - movq %r9,%r13 - addq %rbx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rbx - movq %r10,%r12 - shrdq $5,%r14,%r14 - xorq %r9,%r13 - xorq %r11,%r12 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - andq %r9,%r12 - xorq %r9,%r13 - addq 56(%rsp),%rax - movq %rbx,%rdi - xorq %r11,%r12 - shrdq $6,%r14,%r14 - xorq %rcx,%rdi - addq %r12,%rax - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rbx,%r14 - addq %r13,%rax - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rax - movq %r9,%r12 - shrdq $5,%r14,%r14 - xorq %r8,%r13 - xorq %r10,%r12 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - andq %r8,%r12 - xorq %r8,%r13 - addq 64(%rsp),%r11 - movq %rax,%r15 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - xorq %rbx,%r15 - addq %r12,%r11 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rax,%r14 - addq %r13,%r11 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - addq %r11,%rdx - addq %rdi,%r11 - movq %rdx,%r13 - addq %r11,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r11 - movq %r8,%r12 - shrdq $5,%r14,%r14 - xorq %rdx,%r13 - xorq %r9,%r12 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - andq %rdx,%r12 - xorq %rdx,%r13 - addq 72(%rsp),%r10 - movq %r11,%rdi - xorq %r9,%r12 - shrdq $6,%r14,%r14 - xorq %rax,%rdi - addq %r12,%r10 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r11,%r14 - addq %r13,%r10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r10 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - xorq %rcx,%r13 - xorq %r8,%r12 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - andq %rcx,%r12 - xorq %rcx,%r13 - addq 80(%rsp),%r9 - movq %r10,%r15 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - xorq %r11,%r15 - addq %r12,%r9 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r10,%r14 - addq %r13,%r9 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - addq %r9,%rbx - addq %rdi,%r9 - movq %rbx,%r13 - addq %r9,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r9 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - xorq %rbx,%r13 - xorq %rdx,%r12 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - andq %rbx,%r12 - xorq %rbx,%r13 - addq 88(%rsp),%r8 - movq %r9,%rdi - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - xorq %r10,%rdi - addq %r12,%r8 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r9,%r14 - addq %r13,%r8 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r8 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - xorq %rax,%r13 - xorq %rcx,%r12 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - andq %rax,%r12 - xorq %rax,%r13 - addq 96(%rsp),%rdx - movq %r8,%r15 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - xorq %r9,%r15 - addq %r12,%rdx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r8,%r14 - addq %r13,%rdx - xorq %r9,%rdi - shrdq $28,%r14,%r14 - addq %rdx,%r11 - addq %rdi,%rdx - movq %r11,%r13 - addq %rdx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rdx - movq %rax,%r12 - shrdq $5,%r14,%r14 - xorq %r11,%r13 - xorq %rbx,%r12 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - andq %r11,%r12 - xorq %r11,%r13 - addq 104(%rsp),%rcx - movq %rdx,%rdi - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - xorq %r8,%rdi - addq %r12,%rcx - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rdx,%r14 - addq %r13,%rcx - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rcx - movq %r11,%r12 - shrdq $5,%r14,%r14 - xorq %r10,%r13 - xorq %rax,%r12 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - andq %r10,%r12 - xorq %r10,%r13 - addq 112(%rsp),%rbx - movq %rcx,%r15 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - xorq %rdx,%r15 - addq %r12,%rbx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rcx,%r14 - addq %r13,%rbx - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - addq %rbx,%r9 - addq %rdi,%rbx - movq %r9,%r13 - addq %rbx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rbx - movq %r10,%r12 - shrdq $5,%r14,%r14 - xorq %r9,%r13 - xorq %r11,%r12 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - andq %r9,%r12 - xorq %r9,%r13 - addq 120(%rsp),%rax - movq %rbx,%rdi - xorq %r11,%r12 - shrdq $6,%r14,%r14 - xorq %rcx,%rdi - addq %r12,%rax - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rbx,%r14 - addq %r13,%rax - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - movq 128+0(%rsp),%rdi - movq %r14,%rax - - addq 0(%rdi),%rax - leaq 128(%rsi),%rsi - addq 8(%rdi),%rbx - addq 16(%rdi),%rcx - addq 24(%rdi),%rdx - addq 32(%rdi),%r8 - addq 40(%rdi),%r9 - addq 48(%rdi),%r10 - addq 56(%rdi),%r11 - - cmpq 128+16(%rsp),%rsi - - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,16(%rdi) - movq %rdx,24(%rdi) - movq %r8,32(%rdi) - movq %r9,40(%rdi) - movq %r10,48(%rdi) - movq %r11,56(%rdi) - jb .Lloop_avx - - movq 152(%rsp),%rsi -.cfi_def_cfa %rsi,8 - vzeroupper - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lepilogue_avx: - .byte 0xf3,0xc3 -.cfi_endproc -.size sha512_block_data_order_avx,.-sha512_block_data_order_avx -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S deleted file mode 100644 index 27a34617a3..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S +++ /dev/null @@ -1,1133 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - - - - - - - - - - - - - -.type _vpaes_encrypt_core,@function -.align 16 -_vpaes_encrypt_core: -.cfi_startproc - movq %rdx,%r9 - movq $16,%r11 - movl 240(%rdx),%eax - movdqa %xmm9,%xmm1 - movdqa .Lk_ipt(%rip),%xmm2 - pandn %xmm0,%xmm1 - movdqu (%r9),%xmm5 - psrld $4,%xmm1 - pand %xmm9,%xmm0 -.byte 102,15,56,0,208 - movdqa .Lk_ipt+16(%rip),%xmm0 -.byte 102,15,56,0,193 - pxor %xmm5,%xmm2 - addq $16,%r9 - pxor %xmm2,%xmm0 - leaq .Lk_mc_backward(%rip),%r10 - jmp .Lenc_entry - -.align 16 -.Lenc_loop: - - movdqa %xmm13,%xmm4 - movdqa %xmm12,%xmm0 -.byte 102,15,56,0,226 -.byte 102,15,56,0,195 - pxor %xmm5,%xmm4 - movdqa %xmm15,%xmm5 - pxor %xmm4,%xmm0 - movdqa -64(%r11,%r10,1),%xmm1 -.byte 102,15,56,0,234 - movdqa (%r11,%r10,1),%xmm4 - movdqa %xmm14,%xmm2 -.byte 102,15,56,0,211 - movdqa %xmm0,%xmm3 - pxor %xmm5,%xmm2 -.byte 102,15,56,0,193 - addq $16,%r9 - pxor %xmm2,%xmm0 -.byte 102,15,56,0,220 - addq $16,%r11 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,193 - andq $0x30,%r11 - subq $1,%rax - pxor %xmm3,%xmm0 - -.Lenc_entry: - - movdqa %xmm9,%xmm1 - movdqa %xmm11,%xmm5 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm0 -.byte 102,15,56,0,232 - movdqa %xmm10,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm10,%xmm4 - pxor %xmm5,%xmm3 -.byte 102,15,56,0,224 - movdqa %xmm10,%xmm2 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm10,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%r9),%xmm5 - pxor %xmm1,%xmm3 - jnz .Lenc_loop - - - movdqa -96(%r10),%xmm4 - movdqa -80(%r10),%xmm0 -.byte 102,15,56,0,226 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,195 - movdqa 64(%r11,%r10,1),%xmm1 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,193 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_encrypt_core,.-_vpaes_encrypt_core - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -.type _vpaes_encrypt_core_2x,@function -.align 16 -_vpaes_encrypt_core_2x: -.cfi_startproc - movq %rdx,%r9 - movq $16,%r11 - movl 240(%rdx),%eax - movdqa %xmm9,%xmm1 - movdqa %xmm9,%xmm7 - movdqa .Lk_ipt(%rip),%xmm2 - movdqa %xmm2,%xmm8 - pandn %xmm0,%xmm1 - pandn %xmm6,%xmm7 - movdqu (%r9),%xmm5 - - psrld $4,%xmm1 - psrld $4,%xmm7 - pand %xmm9,%xmm0 - pand %xmm9,%xmm6 -.byte 102,15,56,0,208 -.byte 102,68,15,56,0,198 - movdqa .Lk_ipt+16(%rip),%xmm0 - movdqa %xmm0,%xmm6 -.byte 102,15,56,0,193 -.byte 102,15,56,0,247 - pxor %xmm5,%xmm2 - pxor %xmm5,%xmm8 - addq $16,%r9 - pxor %xmm2,%xmm0 - pxor %xmm8,%xmm6 - leaq .Lk_mc_backward(%rip),%r10 - jmp .Lenc2x_entry - -.align 16 -.Lenc2x_loop: - - movdqa .Lk_sb1(%rip),%xmm4 - movdqa .Lk_sb1+16(%rip),%xmm0 - movdqa %xmm4,%xmm12 - movdqa %xmm0,%xmm6 -.byte 102,15,56,0,226 -.byte 102,69,15,56,0,224 -.byte 102,15,56,0,195 -.byte 102,65,15,56,0,243 - pxor %xmm5,%xmm4 - pxor %xmm5,%xmm12 - movdqa .Lk_sb2(%rip),%xmm5 - movdqa %xmm5,%xmm13 - pxor %xmm4,%xmm0 - pxor %xmm12,%xmm6 - movdqa -64(%r11,%r10,1),%xmm1 - -.byte 102,15,56,0,234 -.byte 102,69,15,56,0,232 - movdqa (%r11,%r10,1),%xmm4 - - movdqa .Lk_sb2+16(%rip),%xmm2 - movdqa %xmm2,%xmm8 -.byte 102,15,56,0,211 -.byte 102,69,15,56,0,195 - movdqa %xmm0,%xmm3 - movdqa %xmm6,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm13,%xmm8 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - addq $16,%r9 - pxor %xmm2,%xmm0 - pxor %xmm8,%xmm6 -.byte 102,15,56,0,220 -.byte 102,68,15,56,0,220 - addq $16,%r11 - pxor %xmm0,%xmm3 - pxor %xmm6,%xmm11 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - andq $0x30,%r11 - subq $1,%rax - pxor %xmm3,%xmm0 - pxor %xmm11,%xmm6 - -.Lenc2x_entry: - - movdqa %xmm9,%xmm1 - movdqa %xmm9,%xmm7 - movdqa .Lk_inv+16(%rip),%xmm5 - movdqa %xmm5,%xmm13 - pandn %xmm0,%xmm1 - pandn %xmm6,%xmm7 - psrld $4,%xmm1 - psrld $4,%xmm7 - pand %xmm9,%xmm0 - pand %xmm9,%xmm6 -.byte 102,15,56,0,232 -.byte 102,68,15,56,0,238 - movdqa %xmm10,%xmm3 - movdqa %xmm10,%xmm11 - pxor %xmm1,%xmm0 - pxor %xmm7,%xmm6 -.byte 102,15,56,0,217 -.byte 102,68,15,56,0,223 - movdqa %xmm10,%xmm4 - movdqa %xmm10,%xmm12 - pxor %xmm5,%xmm3 - pxor %xmm13,%xmm11 -.byte 102,15,56,0,224 -.byte 102,68,15,56,0,230 - movdqa %xmm10,%xmm2 - movdqa %xmm10,%xmm8 - pxor %xmm5,%xmm4 - pxor %xmm13,%xmm12 -.byte 102,15,56,0,211 -.byte 102,69,15,56,0,195 - movdqa %xmm10,%xmm3 - movdqa %xmm10,%xmm11 - pxor %xmm0,%xmm2 - pxor %xmm6,%xmm8 -.byte 102,15,56,0,220 -.byte 102,69,15,56,0,220 - movdqu (%r9),%xmm5 - - pxor %xmm1,%xmm3 - pxor %xmm7,%xmm11 - jnz .Lenc2x_loop - - - movdqa -96(%r10),%xmm4 - movdqa -80(%r10),%xmm0 - movdqa %xmm4,%xmm12 - movdqa %xmm0,%xmm6 -.byte 102,15,56,0,226 -.byte 102,69,15,56,0,224 - pxor %xmm5,%xmm4 - pxor %xmm5,%xmm12 -.byte 102,15,56,0,195 -.byte 102,65,15,56,0,243 - movdqa 64(%r11,%r10,1),%xmm1 - - pxor %xmm4,%xmm0 - pxor %xmm12,%xmm6 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_encrypt_core_2x,.-_vpaes_encrypt_core_2x - - - - - - -.type _vpaes_decrypt_core,@function -.align 16 -_vpaes_decrypt_core: -.cfi_startproc - movq %rdx,%r9 - movl 240(%rdx),%eax - movdqa %xmm9,%xmm1 - movdqa .Lk_dipt(%rip),%xmm2 - pandn %xmm0,%xmm1 - movq %rax,%r11 - psrld $4,%xmm1 - movdqu (%r9),%xmm5 - shlq $4,%r11 - pand %xmm9,%xmm0 -.byte 102,15,56,0,208 - movdqa .Lk_dipt+16(%rip),%xmm0 - xorq $0x30,%r11 - leaq .Lk_dsbd(%rip),%r10 -.byte 102,15,56,0,193 - andq $0x30,%r11 - pxor %xmm5,%xmm2 - movdqa .Lk_mc_forward+48(%rip),%xmm5 - pxor %xmm2,%xmm0 - addq $16,%r9 - addq %r10,%r11 - jmp .Ldec_entry - -.align 16 -.Ldec_loop: - - - - movdqa -32(%r10),%xmm4 - movdqa -16(%r10),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 0(%r10),%xmm4 - pxor %xmm1,%xmm0 - movdqa 16(%r10),%xmm1 - -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 32(%r10),%xmm4 - pxor %xmm1,%xmm0 - movdqa 48(%r10),%xmm1 - -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 64(%r10),%xmm4 - pxor %xmm1,%xmm0 - movdqa 80(%r10),%xmm1 - -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - addq $16,%r9 -.byte 102,15,58,15,237,12 - pxor %xmm1,%xmm0 - subq $1,%rax - -.Ldec_entry: - - movdqa %xmm9,%xmm1 - pandn %xmm0,%xmm1 - movdqa %xmm11,%xmm2 - psrld $4,%xmm1 - pand %xmm9,%xmm0 -.byte 102,15,56,0,208 - movdqa %xmm10,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm10,%xmm4 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm10,%xmm2 -.byte 102,15,56,0,211 - movdqa %xmm10,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%r9),%xmm0 - pxor %xmm1,%xmm3 - jnz .Ldec_loop - - - movdqa 96(%r10),%xmm4 -.byte 102,15,56,0,226 - pxor %xmm0,%xmm4 - movdqa 112(%r10),%xmm0 - movdqa -352(%r11),%xmm2 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,194 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_decrypt_core,.-_vpaes_decrypt_core - - - - - - -.type _vpaes_schedule_core,@function -.align 16 -_vpaes_schedule_core: -.cfi_startproc - - - - - - call _vpaes_preheat - movdqa .Lk_rcon(%rip),%xmm8 - movdqu (%rdi),%xmm0 - - - movdqa %xmm0,%xmm3 - leaq .Lk_ipt(%rip),%r11 - call _vpaes_schedule_transform - movdqa %xmm0,%xmm7 - - leaq .Lk_sr(%rip),%r10 - testq %rcx,%rcx - jnz .Lschedule_am_decrypting - - - movdqu %xmm0,(%rdx) - jmp .Lschedule_go - -.Lschedule_am_decrypting: - - movdqa (%r8,%r10,1),%xmm1 -.byte 102,15,56,0,217 - movdqu %xmm3,(%rdx) - xorq $0x30,%r8 - -.Lschedule_go: - cmpl $192,%esi - ja .Lschedule_256 - je .Lschedule_192 - - - - - - - - - - -.Lschedule_128: - movl $10,%esi - -.Loop_schedule_128: - call _vpaes_schedule_round - decq %rsi - jz .Lschedule_mangle_last - call _vpaes_schedule_mangle - jmp .Loop_schedule_128 - - - - - - - - - - - - - - - - -.align 16 -.Lschedule_192: - movdqu 8(%rdi),%xmm0 - call _vpaes_schedule_transform - movdqa %xmm0,%xmm6 - pxor %xmm4,%xmm4 - movhlps %xmm4,%xmm6 - movl $4,%esi - -.Loop_schedule_192: - call _vpaes_schedule_round -.byte 102,15,58,15,198,8 - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - call _vpaes_schedule_mangle - call _vpaes_schedule_round - decq %rsi - jz .Lschedule_mangle_last - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - jmp .Loop_schedule_192 - - - - - - - - - - - -.align 16 -.Lschedule_256: - movdqu 16(%rdi),%xmm0 - call _vpaes_schedule_transform - movl $7,%esi - -.Loop_schedule_256: - call _vpaes_schedule_mangle - movdqa %xmm0,%xmm6 - - - call _vpaes_schedule_round - decq %rsi - jz .Lschedule_mangle_last - call _vpaes_schedule_mangle - - - pshufd $0xFF,%xmm0,%xmm0 - movdqa %xmm7,%xmm5 - movdqa %xmm6,%xmm7 - call _vpaes_schedule_low_round - movdqa %xmm5,%xmm7 - - jmp .Loop_schedule_256 - - - - - - - - - - - - -.align 16 -.Lschedule_mangle_last: - - leaq .Lk_deskew(%rip),%r11 - testq %rcx,%rcx - jnz .Lschedule_mangle_last_dec - - - movdqa (%r8,%r10,1),%xmm1 -.byte 102,15,56,0,193 - leaq .Lk_opt(%rip),%r11 - addq $32,%rdx - -.Lschedule_mangle_last_dec: - addq $-16,%rdx - pxor .Lk_s63(%rip),%xmm0 - call _vpaes_schedule_transform - movdqu %xmm0,(%rdx) - - - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_schedule_core,.-_vpaes_schedule_core - - - - - - - - - - - - - - - -.type _vpaes_schedule_192_smear,@function -.align 16 -_vpaes_schedule_192_smear: -.cfi_startproc - pshufd $0x80,%xmm6,%xmm1 - pshufd $0xFE,%xmm7,%xmm0 - pxor %xmm1,%xmm6 - pxor %xmm1,%xmm1 - pxor %xmm0,%xmm6 - movdqa %xmm6,%xmm0 - movhlps %xmm1,%xmm6 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear - - - - - - - - - - - - - - - - - - - -.type _vpaes_schedule_round,@function -.align 16 -_vpaes_schedule_round: -.cfi_startproc - - pxor %xmm1,%xmm1 -.byte 102,65,15,58,15,200,15 -.byte 102,69,15,58,15,192,15 - pxor %xmm1,%xmm7 - - - pshufd $0xFF,%xmm0,%xmm0 -.byte 102,15,58,15,192,1 - - - - -_vpaes_schedule_low_round: - - movdqa %xmm7,%xmm1 - pslldq $4,%xmm7 - pxor %xmm1,%xmm7 - movdqa %xmm7,%xmm1 - pslldq $8,%xmm7 - pxor %xmm1,%xmm7 - pxor .Lk_s63(%rip),%xmm7 - - - movdqa %xmm9,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm0 - movdqa %xmm11,%xmm2 -.byte 102,15,56,0,208 - pxor %xmm1,%xmm0 - movdqa %xmm10,%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - movdqa %xmm10,%xmm4 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm10,%xmm2 -.byte 102,15,56,0,211 - pxor %xmm0,%xmm2 - movdqa %xmm10,%xmm3 -.byte 102,15,56,0,220 - pxor %xmm1,%xmm3 - movdqa %xmm13,%xmm4 -.byte 102,15,56,0,226 - movdqa %xmm12,%xmm0 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 - - - pxor %xmm7,%xmm0 - movdqa %xmm0,%xmm7 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_schedule_round,.-_vpaes_schedule_round - - - - - - - - - - -.type _vpaes_schedule_transform,@function -.align 16 -_vpaes_schedule_transform: -.cfi_startproc - movdqa %xmm9,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm0 - movdqa (%r11),%xmm2 -.byte 102,15,56,0,208 - movdqa 16(%r11),%xmm0 -.byte 102,15,56,0,193 - pxor %xmm2,%xmm0 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_schedule_transform,.-_vpaes_schedule_transform - - - - - - - - - - - - - - - - - - - - - - - - -.type _vpaes_schedule_mangle,@function -.align 16 -_vpaes_schedule_mangle: -.cfi_startproc - movdqa %xmm0,%xmm4 - movdqa .Lk_mc_forward(%rip),%xmm5 - testq %rcx,%rcx - jnz .Lschedule_mangle_dec - - - addq $16,%rdx - pxor .Lk_s63(%rip),%xmm4 -.byte 102,15,56,0,229 - movdqa %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 - - jmp .Lschedule_mangle_both -.align 16 -.Lschedule_mangle_dec: - - leaq .Lk_dksd(%rip),%r11 - movdqa %xmm9,%xmm1 - pandn %xmm4,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm4 - - movdqa 0(%r11),%xmm2 -.byte 102,15,56,0,212 - movdqa 16(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - - movdqa 32(%r11),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 48(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - - movdqa 64(%r11),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 80(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - - movdqa 96(%r11),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 112(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - - addq $-16,%rdx - -.Lschedule_mangle_both: - movdqa (%r8,%r10,1),%xmm1 -.byte 102,15,56,0,217 - addq $-16,%r8 - andq $0x30,%r8 - movdqu %xmm3,(%rdx) - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle - - - - -.globl vpaes_set_encrypt_key -.hidden vpaes_set_encrypt_key -.type vpaes_set_encrypt_key,@function -.align 16 -vpaes_set_encrypt_key: -.cfi_startproc -#ifdef BORINGSSL_DISPATCH_TEST -.extern BORINGSSL_function_hit -.hidden BORINGSSL_function_hit - movb $1,BORINGSSL_function_hit+5(%rip) -#endif - - movl %esi,%eax - shrl $5,%eax - addl $5,%eax - movl %eax,240(%rdx) - - movl $0,%ecx - movl $0x30,%r8d - call _vpaes_schedule_core - xorl %eax,%eax - .byte 0xf3,0xc3 -.cfi_endproc -.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key - -.globl vpaes_set_decrypt_key -.hidden vpaes_set_decrypt_key -.type vpaes_set_decrypt_key,@function -.align 16 -vpaes_set_decrypt_key: -.cfi_startproc - movl %esi,%eax - shrl $5,%eax - addl $5,%eax - movl %eax,240(%rdx) - shll $4,%eax - leaq 16(%rdx,%rax,1),%rdx - - movl $1,%ecx - movl %esi,%r8d - shrl $1,%r8d - andl $32,%r8d - xorl $32,%r8d - call _vpaes_schedule_core - xorl %eax,%eax - .byte 0xf3,0xc3 -.cfi_endproc -.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key - -.globl vpaes_encrypt -.hidden vpaes_encrypt -.type vpaes_encrypt,@function -.align 16 -vpaes_encrypt: -.cfi_startproc -#ifdef BORINGSSL_DISPATCH_TEST -.extern BORINGSSL_function_hit -.hidden BORINGSSL_function_hit - movb $1,BORINGSSL_function_hit+4(%rip) -#endif - movdqu (%rdi),%xmm0 - call _vpaes_preheat - call _vpaes_encrypt_core - movdqu %xmm0,(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size vpaes_encrypt,.-vpaes_encrypt - -.globl vpaes_decrypt -.hidden vpaes_decrypt -.type vpaes_decrypt,@function -.align 16 -vpaes_decrypt: -.cfi_startproc - movdqu (%rdi),%xmm0 - call _vpaes_preheat - call _vpaes_decrypt_core - movdqu %xmm0,(%rsi) - .byte 0xf3,0xc3 -.cfi_endproc -.size vpaes_decrypt,.-vpaes_decrypt -.globl vpaes_cbc_encrypt -.hidden vpaes_cbc_encrypt -.type vpaes_cbc_encrypt,@function -.align 16 -vpaes_cbc_encrypt: -.cfi_startproc - xchgq %rcx,%rdx - subq $16,%rcx - jc .Lcbc_abort - movdqu (%r8),%xmm6 - subq %rdi,%rsi - call _vpaes_preheat - cmpl $0,%r9d - je .Lcbc_dec_loop - jmp .Lcbc_enc_loop -.align 16 -.Lcbc_enc_loop: - movdqu (%rdi),%xmm0 - pxor %xmm6,%xmm0 - call _vpaes_encrypt_core - movdqa %xmm0,%xmm6 - movdqu %xmm0,(%rsi,%rdi,1) - leaq 16(%rdi),%rdi - subq $16,%rcx - jnc .Lcbc_enc_loop - jmp .Lcbc_done -.align 16 -.Lcbc_dec_loop: - movdqu (%rdi),%xmm0 - movdqa %xmm0,%xmm7 - call _vpaes_decrypt_core - pxor %xmm6,%xmm0 - movdqa %xmm7,%xmm6 - movdqu %xmm0,(%rsi,%rdi,1) - leaq 16(%rdi),%rdi - subq $16,%rcx - jnc .Lcbc_dec_loop -.Lcbc_done: - movdqu %xmm6,(%r8) -.Lcbc_abort: - .byte 0xf3,0xc3 -.cfi_endproc -.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt -.globl vpaes_ctr32_encrypt_blocks -.hidden vpaes_ctr32_encrypt_blocks -.type vpaes_ctr32_encrypt_blocks,@function -.align 16 -vpaes_ctr32_encrypt_blocks: -.cfi_startproc - - xchgq %rcx,%rdx - testq %rcx,%rcx - jz .Lctr32_abort - movdqu (%r8),%xmm0 - movdqa .Lctr_add_one(%rip),%xmm8 - subq %rdi,%rsi - call _vpaes_preheat - movdqa %xmm0,%xmm6 - pshufb .Lrev_ctr(%rip),%xmm6 - - testq $1,%rcx - jz .Lctr32_prep_loop - - - - movdqu (%rdi),%xmm7 - call _vpaes_encrypt_core - pxor %xmm7,%xmm0 - paddd %xmm8,%xmm6 - movdqu %xmm0,(%rsi,%rdi,1) - subq $1,%rcx - leaq 16(%rdi),%rdi - jz .Lctr32_done - -.Lctr32_prep_loop: - - - movdqa %xmm6,%xmm14 - movdqa %xmm6,%xmm15 - paddd %xmm8,%xmm15 - -.Lctr32_loop: - movdqa .Lrev_ctr(%rip),%xmm1 - movdqa %xmm14,%xmm0 - movdqa %xmm15,%xmm6 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - call _vpaes_encrypt_core_2x - movdqu (%rdi),%xmm1 - movdqu 16(%rdi),%xmm2 - movdqa .Lctr_add_two(%rip),%xmm3 - pxor %xmm1,%xmm0 - pxor %xmm2,%xmm6 - paddd %xmm3,%xmm14 - paddd %xmm3,%xmm15 - movdqu %xmm0,(%rsi,%rdi,1) - movdqu %xmm6,16(%rsi,%rdi,1) - subq $2,%rcx - leaq 32(%rdi),%rdi - jnz .Lctr32_loop - -.Lctr32_done: -.Lctr32_abort: - .byte 0xf3,0xc3 -.cfi_endproc -.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks - - - - - - -.type _vpaes_preheat,@function -.align 16 -_vpaes_preheat: -.cfi_startproc - leaq .Lk_s0F(%rip),%r10 - movdqa -32(%r10),%xmm10 - movdqa -16(%r10),%xmm11 - movdqa 0(%r10),%xmm9 - movdqa 48(%r10),%xmm13 - movdqa 64(%r10),%xmm12 - movdqa 80(%r10),%xmm15 - movdqa 96(%r10),%xmm14 - .byte 0xf3,0xc3 -.cfi_endproc -.size _vpaes_preheat,.-_vpaes_preheat - - - - - -.type _vpaes_consts,@object -.align 64 -_vpaes_consts: -.Lk_inv: -.quad 0x0E05060F0D080180, 0x040703090A0B0C02 -.quad 0x01040A060F0B0780, 0x030D0E0C02050809 - -.Lk_s0F: -.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F - -.Lk_ipt: -.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 -.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 - -.Lk_sb1: -.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 -.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF -.Lk_sb2: -.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD -.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A -.Lk_sbo: -.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 -.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA - -.Lk_mc_forward: -.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 -.quad 0x080B0A0904070605, 0x000302010C0F0E0D -.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 -.quad 0x000302010C0F0E0D, 0x080B0A0904070605 - -.Lk_mc_backward: -.quad 0x0605040702010003, 0x0E0D0C0F0A09080B -.quad 0x020100030E0D0C0F, 0x0A09080B06050407 -.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 -.quad 0x0A09080B06050407, 0x020100030E0D0C0F - -.Lk_sr: -.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 -.quad 0x030E09040F0A0500, 0x0B06010C07020D08 -.quad 0x0F060D040B020900, 0x070E050C030A0108 -.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 - -.Lk_rcon: -.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 - -.Lk_s63: -.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B - -.Lk_opt: -.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 -.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 - -.Lk_deskew: -.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A -.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 - - - - - -.Lk_dksd: -.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 -.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E -.Lk_dksb: -.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 -.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 -.Lk_dkse: -.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 -.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 -.Lk_dks9: -.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC -.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE - - - - - -.Lk_dipt: -.quad 0x0F505B040B545F00, 0x154A411E114E451A -.quad 0x86E383E660056500, 0x12771772F491F194 - -.Lk_dsb9: -.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 -.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 -.Lk_dsbd: -.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 -.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 -.Lk_dsbb: -.quad 0xD022649296B44200, 0x602646F6B0F2D404 -.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B -.Lk_dsbe: -.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 -.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 -.Lk_dsbo: -.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D -.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C - - -.Lrev_ctr: -.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908 - - -.Lctr_add_one: -.quad 0x0000000000000000, 0x0000000100000000 -.Lctr_add_two: -.quad 0x0000000000000000, 0x0000000200000000 - -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 -.align 64 -.size _vpaes_consts,.-_vpaes_consts -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/x86_64-mont.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/x86_64-mont.S deleted file mode 100644 index bdb4454212..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/x86_64-mont.S +++ /dev/null @@ -1,1260 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - -.globl bn_mul_mont -.hidden bn_mul_mont -.type bn_mul_mont,@function -.align 16 -bn_mul_mont: -.cfi_startproc - movl %r9d,%r9d - movq %rsp,%rax -.cfi_def_cfa_register %rax - testl $3,%r9d - jnz .Lmul_enter - cmpl $8,%r9d - jb .Lmul_enter - leaq OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - cmpq %rsi,%rdx - jne .Lmul4x_enter - testl $7,%r9d - jz .Lsqr8x_enter - jmp .Lmul4x_enter - -.align 16 -.Lmul_enter: - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - - negq %r9 - movq %rsp,%r11 - leaq -16(%rsp,%r9,8),%r10 - negq %r9 - andq $-1024,%r10 - - - - - - - - - - subq %r10,%r11 - andq $-4096,%r11 - leaq (%r10,%r11,1),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja .Lmul_page_walk - jmp .Lmul_page_walk_done - -.align 16 -.Lmul_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja .Lmul_page_walk -.Lmul_page_walk_done: - - movq %rax,8(%rsp,%r9,8) -.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 -.Lmul_body: - movq %rdx,%r12 - movq (%r8),%r8 - movq (%r12),%rbx - movq (%rsi),%rax - - xorq %r14,%r14 - xorq %r15,%r15 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp .L1st_enter - -.align 16 -.L1st: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r13 - movq %r10,%r11 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -.L1st_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - leaq 1(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - cmpq %r9,%r15 - jne .L1st - - addq %rax,%r13 - movq (%rsi),%rax - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - movq %r10,%r11 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - jmp .Louter -.align 16 -.Louter: - movq (%r12,%r14,8),%rbx - xorq %r15,%r15 - movq %r8,%rbp - movq (%rsp),%r10 - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq 8(%rsp),%r10 - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp .Linner_enter - -.align 16 -.Linner: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -.Linner_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - leaq 1(%r15),%r15 - - mulq %rbp - cmpq %r9,%r15 - jne .Linner - - addq %rax,%r13 - movq (%rsi),%rax - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - cmpq %r9,%r14 - jb .Louter - - xorq %r14,%r14 - movq (%rsp),%rax - movq %r9,%r15 - -.align 16 -.Lsub: sbbq (%rcx,%r14,8),%rax - movq %rax,(%rdi,%r14,8) - movq 8(%rsp,%r14,8),%rax - leaq 1(%r14),%r14 - decq %r15 - jnz .Lsub - - sbbq $0,%rax - movq $-1,%rbx - xorq %rax,%rbx - xorq %r14,%r14 - movq %r9,%r15 - -.Lcopy: - movq (%rdi,%r14,8),%rcx - movq (%rsp,%r14,8),%rdx - andq %rbx,%rcx - andq %rax,%rdx - movq %r9,(%rsp,%r14,8) - orq %rcx,%rdx - movq %rdx,(%rdi,%r14,8) - leaq 1(%r14),%r14 - subq $1,%r15 - jnz .Lcopy - - movq 8(%rsp,%r9,8),%rsi -.cfi_def_cfa %rsi,8 - movq $1,%rax - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lmul_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_mul_mont,.-bn_mul_mont -.type bn_mul4x_mont,@function -.align 16 -bn_mul4x_mont: -.cfi_startproc - movl %r9d,%r9d - movq %rsp,%rax -.cfi_def_cfa_register %rax -.Lmul4x_enter: - andl $0x80100,%r11d - cmpl $0x80100,%r11d - je .Lmulx4x_enter - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - - negq %r9 - movq %rsp,%r11 - leaq -32(%rsp,%r9,8),%r10 - negq %r9 - andq $-1024,%r10 - - subq %r10,%r11 - andq $-4096,%r11 - leaq (%r10,%r11,1),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja .Lmul4x_page_walk - jmp .Lmul4x_page_walk_done - -.Lmul4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja .Lmul4x_page_walk -.Lmul4x_page_walk_done: - - movq %rax,8(%rsp,%r9,8) -.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 -.Lmul4x_body: - movq %rdi,16(%rsp,%r9,8) - movq %rdx,%r12 - movq (%r8),%r8 - movq (%r12),%rbx - movq (%rsi),%rax - - xorq %r14,%r14 - xorq %r15,%r15 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 4(%r15),%r15 - adcq $0,%rdx - movq %rdi,(%rsp) - movq %rdx,%r13 - jmp .L1st4x -.align 16 -.L1st4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx,%r15,8),%rax - adcq $0,%rdx - leaq 4(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq -16(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-32(%rsp,%r15,8) - movq %rdx,%r13 - cmpq %r9,%r15 - jb .L1st4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - movq %r13,-8(%rsp,%r15,8) - movq %rdi,(%rsp,%r15,8) - - leaq 1(%r14),%r14 -.align 4 -.Louter4x: - movq (%r12,%r14,8),%rbx - xorq %r15,%r15 - movq (%rsp),%r10 - movq %r8,%rbp - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - addq 8(%rsp),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 4(%r15),%r15 - adcq $0,%rdx - movq %rdi,(%rsp) - movq %rdx,%r13 - jmp .Linner4x -.align 16 -.Linner4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -16(%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -8(%rsp,%r15,8),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - addq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx,%r15,8),%rax - adcq $0,%rdx - addq 8(%rsp,%r15,8),%r11 - adcq $0,%rdx - leaq 4(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq -16(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-32(%rsp,%r15,8) - movq %rdx,%r13 - cmpq %r9,%r15 - jb .Linner4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -16(%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -8(%rsp,%r15,8),%r11 - adcq $0,%rdx - leaq 1(%r14),%r14 - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - addq (%rsp,%r9,8),%r13 - adcq $0,%rdi - movq %r13,-8(%rsp,%r15,8) - movq %rdi,(%rsp,%r15,8) - - cmpq %r9,%r14 - jb .Louter4x - movq 16(%rsp,%r9,8),%rdi - leaq -4(%r9),%r15 - movq 0(%rsp),%rax - movq 8(%rsp),%rdx - shrq $2,%r15 - leaq (%rsp),%rsi - xorq %r14,%r14 - - subq 0(%rcx),%rax - movq 16(%rsi),%rbx - movq 24(%rsi),%rbp - sbbq 8(%rcx),%rdx - -.Lsub4x: - movq %rax,0(%rdi,%r14,8) - movq %rdx,8(%rdi,%r14,8) - sbbq 16(%rcx,%r14,8),%rbx - movq 32(%rsi,%r14,8),%rax - movq 40(%rsi,%r14,8),%rdx - sbbq 24(%rcx,%r14,8),%rbp - movq %rbx,16(%rdi,%r14,8) - movq %rbp,24(%rdi,%r14,8) - sbbq 32(%rcx,%r14,8),%rax - movq 48(%rsi,%r14,8),%rbx - movq 56(%rsi,%r14,8),%rbp - sbbq 40(%rcx,%r14,8),%rdx - leaq 4(%r14),%r14 - decq %r15 - jnz .Lsub4x - - movq %rax,0(%rdi,%r14,8) - movq 32(%rsi,%r14,8),%rax - sbbq 16(%rcx,%r14,8),%rbx - movq %rdx,8(%rdi,%r14,8) - sbbq 24(%rcx,%r14,8),%rbp - movq %rbx,16(%rdi,%r14,8) - - sbbq $0,%rax - movq %rbp,24(%rdi,%r14,8) - pxor %xmm0,%xmm0 -.byte 102,72,15,110,224 - pcmpeqd %xmm5,%xmm5 - pshufd $0,%xmm4,%xmm4 - movq %r9,%r15 - pxor %xmm4,%xmm5 - shrq $2,%r15 - xorl %eax,%eax - - jmp .Lcopy4x -.align 16 -.Lcopy4x: - movdqa (%rsp,%rax,1),%xmm1 - movdqu (%rdi,%rax,1),%xmm2 - pand %xmm4,%xmm1 - pand %xmm5,%xmm2 - movdqa 16(%rsp,%rax,1),%xmm3 - movdqa %xmm0,(%rsp,%rax,1) - por %xmm2,%xmm1 - movdqu 16(%rdi,%rax,1),%xmm2 - movdqu %xmm1,(%rdi,%rax,1) - pand %xmm4,%xmm3 - pand %xmm5,%xmm2 - movdqa %xmm0,16(%rsp,%rax,1) - por %xmm2,%xmm3 - movdqu %xmm3,16(%rdi,%rax,1) - leaq 32(%rax),%rax - decq %r15 - jnz .Lcopy4x - movq 8(%rsp,%r9,8),%rsi -.cfi_def_cfa %rsi, 8 - movq $1,%rax - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lmul4x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_mul4x_mont,.-bn_mul4x_mont -.extern bn_sqrx8x_internal -.hidden bn_sqrx8x_internal -.extern bn_sqr8x_internal -.hidden bn_sqr8x_internal - -.type bn_sqr8x_mont,@function -.align 32 -bn_sqr8x_mont: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax -.Lsqr8x_enter: - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lsqr8x_prologue: - - movl %r9d,%r10d - shll $3,%r9d - shlq $3+2,%r10 - negq %r9 - - - - - - - leaq -64(%rsp,%r9,2),%r11 - movq %rsp,%rbp - movq (%r8),%r8 - subq %rsi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb .Lsqr8x_sp_alt - subq %r11,%rbp - leaq -64(%rbp,%r9,2),%rbp - jmp .Lsqr8x_sp_done - -.align 32 -.Lsqr8x_sp_alt: - leaq 4096-64(,%r9,2),%r10 - leaq -64(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -.Lsqr8x_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lsqr8x_page_walk - jmp .Lsqr8x_page_walk_done - -.align 16 -.Lsqr8x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lsqr8x_page_walk -.Lsqr8x_page_walk_done: - - movq %r9,%r10 - negq %r9 - - movq %r8,32(%rsp) - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 -.Lsqr8x_body: - -.byte 102,72,15,110,209 - pxor %xmm0,%xmm0 -.byte 102,72,15,110,207 -.byte 102,73,15,110,218 - leaq OPENSSL_ia32cap_P(%rip),%rax - movl 8(%rax),%eax - andl $0x80100,%eax - cmpl $0x80100,%eax - jne .Lsqr8x_nox - - call bn_sqrx8x_internal - - - - - leaq (%r8,%rcx,1),%rbx - movq %rcx,%r9 - movq %rcx,%rdx -.byte 102,72,15,126,207 - sarq $3+2,%rcx - jmp .Lsqr8x_sub - -.align 32 -.Lsqr8x_nox: - call bn_sqr8x_internal - - - - - leaq (%rdi,%r9,1),%rbx - movq %r9,%rcx - movq %r9,%rdx -.byte 102,72,15,126,207 - sarq $3+2,%rcx - jmp .Lsqr8x_sub - -.align 32 -.Lsqr8x_sub: - movq 0(%rbx),%r12 - movq 8(%rbx),%r13 - movq 16(%rbx),%r14 - movq 24(%rbx),%r15 - leaq 32(%rbx),%rbx - sbbq 0(%rbp),%r12 - sbbq 8(%rbp),%r13 - sbbq 16(%rbp),%r14 - sbbq 24(%rbp),%r15 - leaq 32(%rbp),%rbp - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r14,16(%rdi) - movq %r15,24(%rdi) - leaq 32(%rdi),%rdi - incq %rcx - jnz .Lsqr8x_sub - - sbbq $0,%rax - leaq (%rbx,%r9,1),%rbx - leaq (%rdi,%r9,1),%rdi - -.byte 102,72,15,110,200 - pxor %xmm0,%xmm0 - pshufd $0,%xmm1,%xmm1 - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - jmp .Lsqr8x_cond_copy - -.align 32 -.Lsqr8x_cond_copy: - movdqa 0(%rbx),%xmm2 - movdqa 16(%rbx),%xmm3 - leaq 32(%rbx),%rbx - movdqu 0(%rdi),%xmm4 - movdqu 16(%rdi),%xmm5 - leaq 32(%rdi),%rdi - movdqa %xmm0,-32(%rbx) - movdqa %xmm0,-16(%rbx) - movdqa %xmm0,-32(%rbx,%rdx,1) - movdqa %xmm0,-16(%rbx,%rdx,1) - pcmpeqd %xmm1,%xmm0 - pand %xmm1,%xmm2 - pand %xmm1,%xmm3 - pand %xmm0,%xmm4 - pand %xmm0,%xmm5 - pxor %xmm0,%xmm0 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqu %xmm4,-32(%rdi) - movdqu %xmm5,-16(%rdi) - addq $32,%r9 - jnz .Lsqr8x_cond_copy - - movq $1,%rax - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lsqr8x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_sqr8x_mont,.-bn_sqr8x_mont -.type bn_mulx4x_mont,@function -.align 32 -bn_mulx4x_mont: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax -.Lmulx4x_enter: - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lmulx4x_prologue: - - shll $3,%r9d - xorq %r10,%r10 - subq %r9,%r10 - movq (%r8),%r8 - leaq -72(%rsp,%r10,1),%rbp - andq $-128,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lmulx4x_page_walk - jmp .Lmulx4x_page_walk_done - -.align 16 -.Lmulx4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lmulx4x_page_walk -.Lmulx4x_page_walk_done: - - leaq (%rdx,%r9,1),%r10 - - - - - - - - - - - - - movq %r9,0(%rsp) - shrq $5,%r9 - movq %r10,16(%rsp) - subq $1,%r9 - movq %r8,24(%rsp) - movq %rdi,32(%rsp) - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 - movq %r9,48(%rsp) - jmp .Lmulx4x_body - -.align 32 -.Lmulx4x_body: - leaq 8(%rdx),%rdi - movq (%rdx),%rdx - leaq 64+32(%rsp),%rbx - movq %rdx,%r9 - - mulxq 0(%rsi),%r8,%rax - mulxq 8(%rsi),%r11,%r14 - addq %rax,%r11 - movq %rdi,8(%rsp) - mulxq 16(%rsi),%r12,%r13 - adcq %r14,%r12 - adcq $0,%r13 - - movq %r8,%rdi - imulq 24(%rsp),%r8 - xorq %rbp,%rbp - - mulxq 24(%rsi),%rax,%r14 - movq %r8,%rdx - leaq 32(%rsi),%rsi - adcxq %rax,%r13 - adcxq %rbp,%r14 - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%rdi - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 -.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 - movq 48(%rsp),%rdi - movq %r10,-32(%rbx) - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-24(%rbx) - adcxq %rax,%r12 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r12,-16(%rbx) - - jmp .Lmulx4x_1st - -.align 32 -.Lmulx4x_1st: - adcxq %rbp,%r15 - mulxq 0(%rsi),%r10,%rax - adcxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 -.byte 0x67,0x67 - movq %r8,%rdx - adcxq %rax,%r13 - adcxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - movq %r11,-32(%rbx) - adoxq %r15,%r13 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r13,-16(%rbx) - - decq %rdi - jnz .Lmulx4x_1st - - movq 0(%rsp),%rax - movq 8(%rsp),%rdi - adcq %rbp,%r15 - addq %r15,%r14 - sbbq %r15,%r15 - movq %r14,-8(%rbx) - jmp .Lmulx4x_outer - -.align 32 -.Lmulx4x_outer: - movq (%rdi),%rdx - leaq 8(%rdi),%rdi - subq %rax,%rsi - movq %r15,(%rbx) - leaq 64+32(%rsp),%rbx - subq %rax,%rcx - - mulxq 0(%rsi),%r8,%r11 - xorl %ebp,%ebp - movq %rdx,%r9 - mulxq 8(%rsi),%r14,%r12 - adoxq -32(%rbx),%r8 - adcxq %r14,%r11 - mulxq 16(%rsi),%r15,%r13 - adoxq -24(%rbx),%r11 - adcxq %r15,%r12 - adoxq -16(%rbx),%r12 - adcxq %rbp,%r13 - adoxq %rbp,%r13 - - movq %rdi,8(%rsp) - movq %r8,%r15 - imulq 24(%rsp),%r8 - xorl %ebp,%ebp - - mulxq 24(%rsi),%rax,%r14 - movq %r8,%rdx - adcxq %rax,%r13 - adoxq -8(%rbx),%r13 - adcxq %rbp,%r14 - leaq 32(%rsi),%rsi - adoxq %rbp,%r14 - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%r15 - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - mulxq 16(%rcx),%rax,%r12 - movq %r10,-32(%rbx) - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-24(%rbx) - leaq 32(%rcx),%rcx - adcxq %rax,%r12 - adoxq %rbp,%r15 - movq 48(%rsp),%rdi - movq %r12,-16(%rbx) - - jmp .Lmulx4x_inner - -.align 32 -.Lmulx4x_inner: - mulxq 0(%rsi),%r10,%rax - adcxq %rbp,%r15 - adoxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq 0(%rbx),%r10 - adoxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq 8(%rbx),%r11 - adoxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 - movq %r8,%rdx - adcxq 16(%rbx),%r12 - adoxq %rax,%r13 - adcxq 24(%rbx),%r13 - adoxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - adcxq %rbp,%r14 - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - adoxq %r15,%r13 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-32(%rbx) - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r13,-16(%rbx) - - decq %rdi - jnz .Lmulx4x_inner - - movq 0(%rsp),%rax - movq 8(%rsp),%rdi - adcq %rbp,%r15 - subq 0(%rbx),%rbp - adcq %r15,%r14 - sbbq %r15,%r15 - movq %r14,-8(%rbx) - - cmpq 16(%rsp),%rdi - jne .Lmulx4x_outer - - leaq 64(%rsp),%rbx - subq %rax,%rcx - negq %r15 - movq %rax,%rdx - shrq $3+2,%rax - movq 32(%rsp),%rdi - jmp .Lmulx4x_sub - -.align 32 -.Lmulx4x_sub: - movq 0(%rbx),%r11 - movq 8(%rbx),%r12 - movq 16(%rbx),%r13 - movq 24(%rbx),%r14 - leaq 32(%rbx),%rbx - sbbq 0(%rcx),%r11 - sbbq 8(%rcx),%r12 - sbbq 16(%rcx),%r13 - sbbq 24(%rcx),%r14 - leaq 32(%rcx),%rcx - movq %r11,0(%rdi) - movq %r12,8(%rdi) - movq %r13,16(%rdi) - movq %r14,24(%rdi) - leaq 32(%rdi),%rdi - decq %rax - jnz .Lmulx4x_sub - - sbbq $0,%r15 - leaq 64(%rsp),%rbx - subq %rdx,%rdi - -.byte 102,73,15,110,207 - pxor %xmm0,%xmm0 - pshufd $0,%xmm1,%xmm1 - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - jmp .Lmulx4x_cond_copy - -.align 32 -.Lmulx4x_cond_copy: - movdqa 0(%rbx),%xmm2 - movdqa 16(%rbx),%xmm3 - leaq 32(%rbx),%rbx - movdqu 0(%rdi),%xmm4 - movdqu 16(%rdi),%xmm5 - leaq 32(%rdi),%rdi - movdqa %xmm0,-32(%rbx) - movdqa %xmm0,-16(%rbx) - pcmpeqd %xmm1,%xmm0 - pand %xmm1,%xmm2 - pand %xmm1,%xmm3 - pand %xmm0,%xmm4 - pand %xmm0,%xmm5 - pxor %xmm0,%xmm0 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqu %xmm4,-32(%rdi) - movdqu %xmm5,-16(%rdi) - subq $32,%rdx - jnz .Lmulx4x_cond_copy - - movq %rdx,(%rbx) - - movq $1,%rax - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lmulx4x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_mulx4x_mont,.-bn_mulx4x_mont -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 16 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/x86_64-mont5.S b/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/x86_64-mont5.S deleted file mode 100644 index c86b3b0a59..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/fipsmodule/x86_64-mont5.S +++ /dev/null @@ -1,3790 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P - -.globl bn_mul_mont_gather5 -.hidden bn_mul_mont_gather5 -.type bn_mul_mont_gather5,@function -.align 64 -bn_mul_mont_gather5: -.cfi_startproc - movl %r9d,%r9d - movq %rsp,%rax -.cfi_def_cfa_register %rax - testl $7,%r9d - jnz .Lmul_enter - leaq OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - jmp .Lmul4x_enter - -.align 16 -.Lmul_enter: - movd 8(%rsp),%xmm5 - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 - - negq %r9 - movq %rsp,%r11 - leaq -280(%rsp,%r9,8),%r10 - negq %r9 - andq $-1024,%r10 - - - - - - - - - - subq %r10,%r11 - andq $-4096,%r11 - leaq (%r10,%r11,1),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja .Lmul_page_walk - jmp .Lmul_page_walk_done - -.Lmul_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja .Lmul_page_walk -.Lmul_page_walk_done: - - leaq .Linc(%rip),%r10 - movq %rax,8(%rsp,%r9,8) -.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 -.Lmul_body: - - leaq 128(%rdx),%r12 - movdqa 0(%r10),%xmm0 - movdqa 16(%r10),%xmm1 - leaq 24-112(%rsp,%r9,8),%r10 - andq $-16,%r10 - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 - movdqa %xmm1,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 -.byte 0x67 - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,112(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,128(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,144(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,160(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,176(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,192(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,208(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,224(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,240(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,256(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,272(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,288(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,304(%r10) - - paddd %xmm2,%xmm3 -.byte 0x67 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,320(%r10) - - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,336(%r10) - pand 64(%r12),%xmm0 - - pand 80(%r12),%xmm1 - pand 96(%r12),%xmm2 - movdqa %xmm3,352(%r10) - pand 112(%r12),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -128(%r12),%xmm4 - movdqa -112(%r12),%xmm5 - movdqa -96(%r12),%xmm2 - pand 112(%r10),%xmm4 - movdqa -80(%r12),%xmm3 - pand 128(%r10),%xmm5 - por %xmm4,%xmm0 - pand 144(%r10),%xmm2 - por %xmm5,%xmm1 - pand 160(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -64(%r12),%xmm4 - movdqa -48(%r12),%xmm5 - movdqa -32(%r12),%xmm2 - pand 176(%r10),%xmm4 - movdqa -16(%r12),%xmm3 - pand 192(%r10),%xmm5 - por %xmm4,%xmm0 - pand 208(%r10),%xmm2 - por %xmm5,%xmm1 - pand 224(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa 0(%r12),%xmm4 - movdqa 16(%r12),%xmm5 - movdqa 32(%r12),%xmm2 - pand 240(%r10),%xmm4 - movdqa 48(%r12),%xmm3 - pand 256(%r10),%xmm5 - por %xmm4,%xmm0 - pand 272(%r10),%xmm2 - por %xmm5,%xmm1 - pand 288(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - por %xmm1,%xmm0 - pshufd $0x4e,%xmm0,%xmm1 - por %xmm1,%xmm0 - leaq 256(%r12),%r12 -.byte 102,72,15,126,195 - - movq (%r8),%r8 - movq (%rsi),%rax - - xorq %r14,%r14 - xorq %r15,%r15 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp .L1st_enter - -.align 16 -.L1st: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r13 - movq %r10,%r11 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -.L1st_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - leaq 1(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - cmpq %r9,%r15 - jne .L1st - - - addq %rax,%r13 - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-16(%rsp,%r9,8) - movq %rdx,%r13 - movq %r10,%r11 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - jmp .Louter -.align 16 -.Louter: - leaq 24+128(%rsp,%r9,8),%rdx - andq $-16,%rdx - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - movdqa -128(%r12),%xmm0 - movdqa -112(%r12),%xmm1 - movdqa -96(%r12),%xmm2 - movdqa -80(%r12),%xmm3 - pand -128(%rdx),%xmm0 - pand -112(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -80(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%r12),%xmm0 - movdqa -48(%r12),%xmm1 - movdqa -32(%r12),%xmm2 - movdqa -16(%r12),%xmm3 - pand -64(%rdx),%xmm0 - pand -48(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -16(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%r12),%xmm0 - movdqa 16(%r12),%xmm1 - movdqa 32(%r12),%xmm2 - movdqa 48(%r12),%xmm3 - pand 0(%rdx),%xmm0 - pand 16(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 48(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%r12),%xmm0 - movdqa 80(%r12),%xmm1 - movdqa 96(%r12),%xmm2 - movdqa 112(%r12),%xmm3 - pand 64(%rdx),%xmm0 - pand 80(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 112(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - leaq 256(%r12),%r12 - - movq (%rsi),%rax -.byte 102,72,15,126,195 - - xorq %r15,%r15 - movq %r8,%rbp - movq (%rsp),%r10 - - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq 8(%rsp),%r10 - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp .Linner_enter - -.align 16 -.Linner: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -.Linner_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - leaq 1(%r15),%r15 - - mulq %rbp - cmpq %r9,%r15 - jne .Linner - - addq %rax,%r13 - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r9,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r9,8) - movq %rdx,%r13 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - cmpq %r9,%r14 - jb .Louter - - xorq %r14,%r14 - movq (%rsp),%rax - leaq (%rsp),%rsi - movq %r9,%r15 - jmp .Lsub -.align 16 -.Lsub: sbbq (%rcx,%r14,8),%rax - movq %rax,(%rdi,%r14,8) - movq 8(%rsi,%r14,8),%rax - leaq 1(%r14),%r14 - decq %r15 - jnz .Lsub - - sbbq $0,%rax - movq $-1,%rbx - xorq %rax,%rbx - xorq %r14,%r14 - movq %r9,%r15 - -.Lcopy: - movq (%rdi,%r14,8),%rcx - movq (%rsp,%r14,8),%rdx - andq %rbx,%rcx - andq %rax,%rdx - movq %r14,(%rsp,%r14,8) - orq %rcx,%rdx - movq %rdx,(%rdi,%r14,8) - leaq 1(%r14),%r14 - subq $1,%r15 - jnz .Lcopy - - movq 8(%rsp,%r9,8),%rsi -.cfi_def_cfa %rsi,8 - movq $1,%rax - - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lmul_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_mul_mont_gather5,.-bn_mul_mont_gather5 -.type bn_mul4x_mont_gather5,@function -.align 32 -bn_mul4x_mont_gather5: -.cfi_startproc -.byte 0x67 - movq %rsp,%rax -.cfi_def_cfa_register %rax -.Lmul4x_enter: - andl $0x80108,%r11d - cmpl $0x80108,%r11d - je .Lmulx4x_enter - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lmul4x_prologue: - -.byte 0x67 - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - - - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb .Lmul4xsp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp .Lmul4xsp_done - -.align 32 -.Lmul4xsp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -.Lmul4xsp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lmul4x_page_walk - jmp .Lmul4x_page_walk_done - -.Lmul4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lmul4x_page_walk -.Lmul4x_page_walk_done: - - negq %r9 - - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 -.Lmul4x_body: - - call mul4x_internal - - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq $1,%rax - - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lmul4x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5 - -.type mul4x_internal,@function -.align 32 -mul4x_internal: -.cfi_startproc - shlq $5,%r9 - movd 8(%rax),%xmm5 - leaq .Linc(%rip),%rax - leaq 128(%rdx,%r9,1),%r13 - shrq $5,%r9 - movdqa 0(%rax),%xmm0 - movdqa 16(%rax),%xmm1 - leaq 88-112(%rsp,%r9,1),%r10 - leaq 128(%rdx),%r12 - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 -.byte 0x67,0x67 - movdqa %xmm1,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 -.byte 0x67 - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,112(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,128(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,144(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,160(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,176(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,192(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,208(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,224(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,240(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,256(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,272(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,288(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,304(%r10) - - paddd %xmm2,%xmm3 -.byte 0x67 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,320(%r10) - - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,336(%r10) - pand 64(%r12),%xmm0 - - pand 80(%r12),%xmm1 - pand 96(%r12),%xmm2 - movdqa %xmm3,352(%r10) - pand 112(%r12),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -128(%r12),%xmm4 - movdqa -112(%r12),%xmm5 - movdqa -96(%r12),%xmm2 - pand 112(%r10),%xmm4 - movdqa -80(%r12),%xmm3 - pand 128(%r10),%xmm5 - por %xmm4,%xmm0 - pand 144(%r10),%xmm2 - por %xmm5,%xmm1 - pand 160(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -64(%r12),%xmm4 - movdqa -48(%r12),%xmm5 - movdqa -32(%r12),%xmm2 - pand 176(%r10),%xmm4 - movdqa -16(%r12),%xmm3 - pand 192(%r10),%xmm5 - por %xmm4,%xmm0 - pand 208(%r10),%xmm2 - por %xmm5,%xmm1 - pand 224(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa 0(%r12),%xmm4 - movdqa 16(%r12),%xmm5 - movdqa 32(%r12),%xmm2 - pand 240(%r10),%xmm4 - movdqa 48(%r12),%xmm3 - pand 256(%r10),%xmm5 - por %xmm4,%xmm0 - pand 272(%r10),%xmm2 - por %xmm5,%xmm1 - pand 288(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - por %xmm1,%xmm0 - pshufd $0x4e,%xmm0,%xmm1 - por %xmm1,%xmm0 - leaq 256(%r12),%r12 -.byte 102,72,15,126,195 - - movq %r13,16+8(%rsp) - movq %rdi,56+8(%rsp) - - movq (%r8),%r8 - movq (%rsi),%rax - leaq (%rsi,%r9,1),%rsi - negq %r9 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - leaq 64+8(%rsp),%r14 - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi,%r9,1),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%r9),%r15 - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %rdi,(%r14) - movq %rdx,%r13 - jmp .L1st4x - -.align 32 -.L1st4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%r14) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq 0(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %rdi,(%r14) - movq %rdx,%r13 - - addq $32,%r15 - jnz .L1st4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%r14) - movq %rdx,%r13 - - leaq (%rcx,%r9,1),%rcx - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - movq %r13,-8(%r14) - - jmp .Louter4x - -.align 32 -.Louter4x: - leaq 16+128(%r14),%rdx - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - movdqa -128(%r12),%xmm0 - movdqa -112(%r12),%xmm1 - movdqa -96(%r12),%xmm2 - movdqa -80(%r12),%xmm3 - pand -128(%rdx),%xmm0 - pand -112(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -80(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%r12),%xmm0 - movdqa -48(%r12),%xmm1 - movdqa -32(%r12),%xmm2 - movdqa -16(%r12),%xmm3 - pand -64(%rdx),%xmm0 - pand -48(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -16(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%r12),%xmm0 - movdqa 16(%r12),%xmm1 - movdqa 32(%r12),%xmm2 - movdqa 48(%r12),%xmm3 - pand 0(%rdx),%xmm0 - pand 16(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 48(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%r12),%xmm0 - movdqa 80(%r12),%xmm1 - movdqa 96(%r12),%xmm2 - movdqa 112(%r12),%xmm3 - pand 64(%rdx),%xmm0 - pand 80(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 112(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - leaq 256(%r12),%r12 -.byte 102,72,15,126,195 - - movq (%r14,%r9,1),%r10 - movq %r8,%rbp - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - movq %rdi,(%r14) - - leaq (%r14,%r9,1),%r14 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi,%r9,1),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - addq 8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%r9),%r15 - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %rdx,%r13 - jmp .Linner4x - -.align 32 -.Linner4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - adcq $0,%rdx - addq 16(%r14),%r10 - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %rdi,-32(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx),%rax - adcq $0,%rdx - addq -8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq 0(%rcx),%rax - adcq $0,%rdx - addq (%r14),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %rdi,-16(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - addq 8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %r13,-8(%r14) - movq %rdx,%r13 - - addq $32,%r15 - jnz .Linner4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - adcq $0,%rdx - addq 16(%r14),%r10 - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %rdi,-32(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq %rbp,%rax - movq -8(%rcx),%rbp - adcq $0,%rdx - addq -8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%r13 - - movq %rdi,-16(%r14) - leaq (%rcx,%r9,1),%rcx - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - addq (%r14),%r13 - adcq $0,%rdi - movq %r13,-8(%r14) - - cmpq 16+8(%rsp),%r12 - jb .Louter4x - xorq %rax,%rax - subq %r13,%rbp - adcq %r15,%r15 - orq %r15,%rdi - subq %rdi,%rax - leaq (%r14,%r9,1),%rbx - movq (%rcx),%r12 - leaq (%rcx),%rbp - movq %r9,%rcx - sarq $3+2,%rcx - movq 56+8(%rsp),%rdi - decq %r12 - xorq %r10,%r10 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp .Lsqr4x_sub_entry -.cfi_endproc -.size mul4x_internal,.-mul4x_internal -.globl bn_power5 -.hidden bn_power5 -.type bn_power5,@function -.align 32 -bn_power5: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax - leaq OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - andl $0x80108,%r11d - cmpl $0x80108,%r11d - je .Lpowerx5_enter - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lpower5_prologue: - - shll $3,%r9d - leal (%r9,%r9,2),%r10d - negq %r9 - movq (%r8),%r8 - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb .Lpwr_sp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp .Lpwr_sp_done - -.align 32 -.Lpwr_sp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -.Lpwr_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lpwr_page_walk - jmp .Lpwr_page_walk_done - -.Lpwr_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lpwr_page_walk -.Lpwr_page_walk_done: - - movq %r9,%r10 - negq %r9 - - - - - - - - - - - movq %r8,32(%rsp) - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 -.Lpower5_body: -.byte 102,72,15,110,207 -.byte 102,72,15,110,209 -.byte 102,73,15,110,218 -.byte 102,72,15,110,226 - - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - -.byte 102,72,15,126,209 -.byte 102,72,15,126,226 - movq %rsi,%rdi - movq 40(%rsp),%rax - leaq 32(%rsp),%r8 - - call mul4x_internal - - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq $1,%rax - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lpower5_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_power5,.-bn_power5 - -.globl bn_sqr8x_internal -.hidden bn_sqr8x_internal -.hidden bn_sqr8x_internal -.type bn_sqr8x_internal,@function -.align 32 -bn_sqr8x_internal: -__bn_sqr8x_internal: -.cfi_startproc - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - leaq 32(%r10),%rbp - leaq (%rsi,%r9,1),%rsi - - movq %r9,%rcx - - - movq -32(%rsi,%rbp,1),%r14 - leaq 48+8(%rsp,%r9,2),%rdi - movq -24(%rsi,%rbp,1),%rax - leaq -32(%rdi,%rbp,1),%rdi - movq -16(%rsi,%rbp,1),%rbx - movq %rax,%r15 - - mulq %r14 - movq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - movq %r10,-24(%rdi,%rbp,1) - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - adcq $0,%rdx - movq %r11,-16(%rdi,%rbp,1) - movq %rdx,%r10 - - - movq -8(%rsi,%rbp,1),%rbx - mulq %r15 - movq %rax,%r12 - movq %rbx,%rax - movq %rdx,%r13 - - leaq (%rbp),%rcx - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - movq %r10,-8(%rdi,%rcx,1) - jmp .Lsqr4x_1st - -.align 32 -.Lsqr4x_1st: - movq (%rsi,%rcx,1),%rbx - mulq %r15 - addq %rax,%r13 - movq %rbx,%rax - movq %rdx,%r12 - adcq $0,%r12 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq 8(%rsi,%rcx,1),%rbx - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - adcq $0,%r10 - - - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - movq %r11,(%rdi,%rcx,1) - movq %rdx,%r13 - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq 16(%rsi,%rcx,1),%rbx - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - - mulq %r15 - addq %rax,%r13 - movq %rbx,%rax - movq %r10,8(%rdi,%rcx,1) - movq %rdx,%r12 - adcq $0,%r12 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq 24(%rsi,%rcx,1),%rbx - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - adcq $0,%r10 - - - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - movq %r11,16(%rdi,%rcx,1) - movq %rdx,%r13 - adcq $0,%r13 - leaq 32(%rcx),%rcx - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - movq %r10,-8(%rdi,%rcx,1) - - cmpq $0,%rcx - jne .Lsqr4x_1st - - mulq %r15 - addq %rax,%r13 - leaq 16(%rbp),%rbp - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - - movq %r13,(%rdi) - movq %rdx,%r12 - movq %rdx,8(%rdi) - jmp .Lsqr4x_outer - -.align 32 -.Lsqr4x_outer: - movq -32(%rsi,%rbp,1),%r14 - leaq 48+8(%rsp,%r9,2),%rdi - movq -24(%rsi,%rbp,1),%rax - leaq -32(%rdi,%rbp,1),%rdi - movq -16(%rsi,%rbp,1),%rbx - movq %rax,%r15 - - mulq %r14 - movq -24(%rdi,%rbp,1),%r10 - addq %rax,%r10 - movq %rbx,%rax - adcq $0,%rdx - movq %r10,-24(%rdi,%rbp,1) - movq %rdx,%r11 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - adcq $0,%rdx - addq -16(%rdi,%rbp,1),%r11 - movq %rdx,%r10 - adcq $0,%r10 - movq %r11,-16(%rdi,%rbp,1) - - xorq %r12,%r12 - - movq -8(%rsi,%rbp,1),%rbx - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - adcq $0,%rdx - addq -8(%rdi,%rbp,1),%r12 - movq %rdx,%r13 - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - adcq $0,%rdx - addq %r12,%r10 - movq %rdx,%r11 - adcq $0,%r11 - movq %r10,-8(%rdi,%rbp,1) - - leaq (%rbp),%rcx - jmp .Lsqr4x_inner - -.align 32 -.Lsqr4x_inner: - movq (%rsi,%rcx,1),%rbx - mulq %r15 - addq %rax,%r13 - movq %rbx,%rax - movq %rdx,%r12 - adcq $0,%r12 - addq (%rdi,%rcx,1),%r13 - adcq $0,%r12 - -.byte 0x67 - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq 8(%rsi,%rcx,1),%rbx - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - adcq $0,%r10 - - mulq %r15 - addq %rax,%r12 - movq %r11,(%rdi,%rcx,1) - movq %rbx,%rax - movq %rdx,%r13 - adcq $0,%r13 - addq 8(%rdi,%rcx,1),%r12 - leaq 16(%rcx),%rcx - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - adcq $0,%rdx - addq %r12,%r10 - movq %rdx,%r11 - adcq $0,%r11 - movq %r10,-8(%rdi,%rcx,1) - - cmpq $0,%rcx - jne .Lsqr4x_inner - -.byte 0x67 - mulq %r15 - addq %rax,%r13 - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - - movq %r13,(%rdi) - movq %rdx,%r12 - movq %rdx,8(%rdi) - - addq $16,%rbp - jnz .Lsqr4x_outer - - - movq -32(%rsi),%r14 - leaq 48+8(%rsp,%r9,2),%rdi - movq -24(%rsi),%rax - leaq -32(%rdi,%rbp,1),%rdi - movq -16(%rsi),%rbx - movq %rax,%r15 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq %r10,-24(%rdi) - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - movq -8(%rsi),%rbx - adcq $0,%r10 - - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - movq %r11,-16(%rdi) - movq %rdx,%r13 - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - movq %r10,-8(%rdi) - - mulq %r15 - addq %rax,%r13 - movq -16(%rsi),%rax - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - - movq %r13,(%rdi) - movq %rdx,%r12 - movq %rdx,8(%rdi) - - mulq %rbx - addq $16,%rbp - xorq %r14,%r14 - subq %r9,%rbp - xorq %r15,%r15 - - addq %r12,%rax - adcq $0,%rdx - movq %rax,8(%rdi) - movq %rdx,16(%rdi) - movq %r15,24(%rdi) - - movq -16(%rsi,%rbp,1),%rax - leaq 48+8(%rsp),%rdi - xorq %r10,%r10 - movq 8(%rdi),%r11 - - leaq (%r14,%r10,2),%r12 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq 16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 24(%rdi),%r11 - adcq %rax,%r12 - movq -8(%rsi,%rbp,1),%rax - movq %r12,(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,8(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - movq 32(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 40(%rdi),%r11 - adcq %rax,%rbx - movq 0(%rsi,%rbp,1),%rax - movq %rbx,16(%rdi) - adcq %rdx,%r8 - leaq 16(%rbp),%rbp - movq %r8,24(%rdi) - sbbq %r15,%r15 - leaq 64(%rdi),%rdi - jmp .Lsqr4x_shift_n_add - -.align 32 -.Lsqr4x_shift_n_add: - leaq (%r14,%r10,2),%r12 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq -16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq -8(%rdi),%r11 - adcq %rax,%r12 - movq -8(%rsi,%rbp,1),%rax - movq %r12,-32(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,-24(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - movq 0(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 8(%rdi),%r11 - adcq %rax,%rbx - movq 0(%rsi,%rbp,1),%rax - movq %rbx,-16(%rdi) - adcq %rdx,%r8 - - leaq (%r14,%r10,2),%r12 - movq %r8,-8(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq 16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 24(%rdi),%r11 - adcq %rax,%r12 - movq 8(%rsi,%rbp,1),%rax - movq %r12,0(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,8(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - movq 32(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 40(%rdi),%r11 - adcq %rax,%rbx - movq 16(%rsi,%rbp,1),%rax - movq %rbx,16(%rdi) - adcq %rdx,%r8 - movq %r8,24(%rdi) - sbbq %r15,%r15 - leaq 64(%rdi),%rdi - addq $32,%rbp - jnz .Lsqr4x_shift_n_add - - leaq (%r14,%r10,2),%r12 -.byte 0x67 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq -16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq -8(%rdi),%r11 - adcq %rax,%r12 - movq -8(%rsi),%rax - movq %r12,-32(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,-24(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - mulq %rax - negq %r15 - adcq %rax,%rbx - adcq %rdx,%r8 - movq %rbx,-16(%rdi) - movq %r8,-8(%rdi) -.byte 102,72,15,126,213 -__bn_sqr8x_reduction: - xorq %rax,%rax - leaq (%r9,%rbp,1),%rcx - leaq 48+8(%rsp,%r9,2),%rdx - movq %rcx,0+8(%rsp) - leaq 48+8(%rsp,%r9,1),%rdi - movq %rdx,8+8(%rsp) - negq %r9 - jmp .L8x_reduction_loop - -.align 32 -.L8x_reduction_loop: - leaq (%rdi,%r9,1),%rdi -.byte 0x66 - movq 0(%rdi),%rbx - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%r12 - movq 40(%rdi),%r13 - movq 48(%rdi),%r14 - movq 56(%rdi),%r15 - movq %rax,(%rdx) - leaq 64(%rdi),%rdi - -.byte 0x67 - movq %rbx,%r8 - imulq 32+8(%rsp),%rbx - movq 0(%rbp),%rax - movl $8,%ecx - jmp .L8x_reduce - -.align 32 -.L8x_reduce: - mulq %rbx - movq 8(%rbp),%rax - negq %r8 - movq %rdx,%r8 - adcq $0,%r8 - - mulq %rbx - addq %rax,%r9 - movq 16(%rbp),%rax - adcq $0,%rdx - addq %r9,%r8 - movq %rbx,48-8+8(%rsp,%rcx,8) - movq %rdx,%r9 - adcq $0,%r9 - - mulq %rbx - addq %rax,%r10 - movq 24(%rbp),%rax - adcq $0,%rdx - addq %r10,%r9 - movq 32+8(%rsp),%rsi - movq %rdx,%r10 - adcq $0,%r10 - - mulq %rbx - addq %rax,%r11 - movq 32(%rbp),%rax - adcq $0,%rdx - imulq %r8,%rsi - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - - mulq %rbx - addq %rax,%r12 - movq 40(%rbp),%rax - adcq $0,%rdx - addq %r12,%r11 - movq %rdx,%r12 - adcq $0,%r12 - - mulq %rbx - addq %rax,%r13 - movq 48(%rbp),%rax - adcq $0,%rdx - addq %r13,%r12 - movq %rdx,%r13 - adcq $0,%r13 - - mulq %rbx - addq %rax,%r14 - movq 56(%rbp),%rax - adcq $0,%rdx - addq %r14,%r13 - movq %rdx,%r14 - adcq $0,%r14 - - mulq %rbx - movq %rsi,%rbx - addq %rax,%r15 - movq 0(%rbp),%rax - adcq $0,%rdx - addq %r15,%r14 - movq %rdx,%r15 - adcq $0,%r15 - - decl %ecx - jnz .L8x_reduce - - leaq 64(%rbp),%rbp - xorq %rax,%rax - movq 8+8(%rsp),%rdx - cmpq 0+8(%rsp),%rbp - jae .L8x_no_tail - -.byte 0x66 - addq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - sbbq %rsi,%rsi - - movq 48+56+8(%rsp),%rbx - movl $8,%ecx - movq 0(%rbp),%rax - jmp .L8x_tail - -.align 32 -.L8x_tail: - mulq %rbx - addq %rax,%r8 - movq 8(%rbp),%rax - movq %r8,(%rdi) - movq %rdx,%r8 - adcq $0,%r8 - - mulq %rbx - addq %rax,%r9 - movq 16(%rbp),%rax - adcq $0,%rdx - addq %r9,%r8 - leaq 8(%rdi),%rdi - movq %rdx,%r9 - adcq $0,%r9 - - mulq %rbx - addq %rax,%r10 - movq 24(%rbp),%rax - adcq $0,%rdx - addq %r10,%r9 - movq %rdx,%r10 - adcq $0,%r10 - - mulq %rbx - addq %rax,%r11 - movq 32(%rbp),%rax - adcq $0,%rdx - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - - mulq %rbx - addq %rax,%r12 - movq 40(%rbp),%rax - adcq $0,%rdx - addq %r12,%r11 - movq %rdx,%r12 - adcq $0,%r12 - - mulq %rbx - addq %rax,%r13 - movq 48(%rbp),%rax - adcq $0,%rdx - addq %r13,%r12 - movq %rdx,%r13 - adcq $0,%r13 - - mulq %rbx - addq %rax,%r14 - movq 56(%rbp),%rax - adcq $0,%rdx - addq %r14,%r13 - movq %rdx,%r14 - adcq $0,%r14 - - mulq %rbx - movq 48-16+8(%rsp,%rcx,8),%rbx - addq %rax,%r15 - adcq $0,%rdx - addq %r15,%r14 - movq 0(%rbp),%rax - movq %rdx,%r15 - adcq $0,%r15 - - decl %ecx - jnz .L8x_tail - - leaq 64(%rbp),%rbp - movq 8+8(%rsp),%rdx - cmpq 0+8(%rsp),%rbp - jae .L8x_tail_done - - movq 48+56+8(%rsp),%rbx - negq %rsi - movq 0(%rbp),%rax - adcq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - sbbq %rsi,%rsi - - movl $8,%ecx - jmp .L8x_tail - -.align 32 -.L8x_tail_done: - xorq %rax,%rax - addq (%rdx),%r8 - adcq $0,%r9 - adcq $0,%r10 - adcq $0,%r11 - adcq $0,%r12 - adcq $0,%r13 - adcq $0,%r14 - adcq $0,%r15 - adcq $0,%rax - - negq %rsi -.L8x_no_tail: - adcq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - adcq $0,%rax - movq -8(%rbp),%rcx - xorq %rsi,%rsi - -.byte 102,72,15,126,213 - - movq %r8,0(%rdi) - movq %r9,8(%rdi) -.byte 102,73,15,126,217 - movq %r10,16(%rdi) - movq %r11,24(%rdi) - movq %r12,32(%rdi) - movq %r13,40(%rdi) - movq %r14,48(%rdi) - movq %r15,56(%rdi) - leaq 64(%rdi),%rdi - - cmpq %rdx,%rdi - jb .L8x_reduction_loop - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_sqr8x_internal,.-bn_sqr8x_internal -.type __bn_post4x_internal,@function -.align 32 -__bn_post4x_internal: -.cfi_startproc - movq 0(%rbp),%r12 - leaq (%rdi,%r9,1),%rbx - movq %r9,%rcx -.byte 102,72,15,126,207 - negq %rax -.byte 102,72,15,126,206 - sarq $3+2,%rcx - decq %r12 - xorq %r10,%r10 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp .Lsqr4x_sub_entry - -.align 16 -.Lsqr4x_sub: - movq 0(%rbp),%r12 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 -.Lsqr4x_sub_entry: - leaq 32(%rbp),%rbp - notq %r12 - notq %r13 - notq %r14 - notq %r15 - andq %rax,%r12 - andq %rax,%r13 - andq %rax,%r14 - andq %rax,%r15 - - negq %r10 - adcq 0(%rbx),%r12 - adcq 8(%rbx),%r13 - adcq 16(%rbx),%r14 - adcq 24(%rbx),%r15 - movq %r12,0(%rdi) - leaq 32(%rbx),%rbx - movq %r13,8(%rdi) - sbbq %r10,%r10 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - leaq 32(%rdi),%rdi - - incq %rcx - jnz .Lsqr4x_sub - - movq %r9,%r10 - negq %r9 - .byte 0xf3,0xc3 -.cfi_endproc -.size __bn_post4x_internal,.-__bn_post4x_internal -.globl bn_from_montgomery -.hidden bn_from_montgomery -.type bn_from_montgomery,@function -.align 32 -bn_from_montgomery: -.cfi_startproc - testl $7,%r9d - jz bn_from_mont8x - xorl %eax,%eax - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_from_montgomery,.-bn_from_montgomery - -.type bn_from_mont8x,@function -.align 32 -bn_from_mont8x: -.cfi_startproc -.byte 0x67 - movq %rsp,%rax -.cfi_def_cfa_register %rax - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lfrom_prologue: - - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - movq (%r8),%r8 - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb .Lfrom_sp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp .Lfrom_sp_done - -.align 32 -.Lfrom_sp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -.Lfrom_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lfrom_page_walk - jmp .Lfrom_page_walk_done - -.Lfrom_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lfrom_page_walk -.Lfrom_page_walk_done: - - movq %r9,%r10 - negq %r9 - - - - - - - - - - - movq %r8,32(%rsp) - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 -.Lfrom_body: - movq %r9,%r11 - leaq 48(%rsp),%rax - pxor %xmm0,%xmm0 - jmp .Lmul_by_1 - -.align 32 -.Lmul_by_1: - movdqu (%rsi),%xmm1 - movdqu 16(%rsi),%xmm2 - movdqu 32(%rsi),%xmm3 - movdqa %xmm0,(%rax,%r9,1) - movdqu 48(%rsi),%xmm4 - movdqa %xmm0,16(%rax,%r9,1) -.byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 - movdqa %xmm1,(%rax) - movdqa %xmm0,32(%rax,%r9,1) - movdqa %xmm2,16(%rax) - movdqa %xmm0,48(%rax,%r9,1) - movdqa %xmm3,32(%rax) - movdqa %xmm4,48(%rax) - leaq 64(%rax),%rax - subq $64,%r11 - jnz .Lmul_by_1 - -.byte 102,72,15,110,207 -.byte 102,72,15,110,209 -.byte 0x67 - movq %rcx,%rbp -.byte 102,73,15,110,218 - leaq OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - andl $0x80108,%r11d - cmpl $0x80108,%r11d - jne .Lfrom_mont_nox - - leaq (%rax,%r9,1),%rdi - call __bn_sqrx8x_reduction - call __bn_postx4x_internal - - pxor %xmm0,%xmm0 - leaq 48(%rsp),%rax - jmp .Lfrom_mont_zero - -.align 32 -.Lfrom_mont_nox: - call __bn_sqr8x_reduction - call __bn_post4x_internal - - pxor %xmm0,%xmm0 - leaq 48(%rsp),%rax - jmp .Lfrom_mont_zero - -.align 32 -.Lfrom_mont_zero: - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movdqa %xmm0,0(%rax) - movdqa %xmm0,16(%rax) - movdqa %xmm0,32(%rax) - movdqa %xmm0,48(%rax) - leaq 64(%rax),%rax - subq $32,%r9 - jnz .Lfrom_mont_zero - - movq $1,%rax - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lfrom_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_from_mont8x,.-bn_from_mont8x -.type bn_mulx4x_mont_gather5,@function -.align 32 -bn_mulx4x_mont_gather5: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax -.Lmulx4x_enter: - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lmulx4x_prologue: - - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - movq (%r8),%r8 - - - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb .Lmulx4xsp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp .Lmulx4xsp_done - -.Lmulx4xsp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -.Lmulx4xsp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lmulx4x_page_walk - jmp .Lmulx4x_page_walk_done - -.Lmulx4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lmulx4x_page_walk -.Lmulx4x_page_walk_done: - - - - - - - - - - - - - - movq %r8,32(%rsp) - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 -.Lmulx4x_body: - call mulx4x_internal - - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq $1,%rax - - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lmulx4x_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5 - -.type mulx4x_internal,@function -.align 32 -mulx4x_internal: -.cfi_startproc - movq %r9,8(%rsp) - movq %r9,%r10 - negq %r9 - shlq $5,%r9 - negq %r10 - leaq 128(%rdx,%r9,1),%r13 - shrq $5+5,%r9 - movd 8(%rax),%xmm5 - subq $1,%r9 - leaq .Linc(%rip),%rax - movq %r13,16+8(%rsp) - movq %r9,24+8(%rsp) - movq %rdi,56+8(%rsp) - movdqa 0(%rax),%xmm0 - movdqa 16(%rax),%xmm1 - leaq 88-112(%rsp,%r10,1),%r10 - leaq 128(%rdx),%rdi - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 -.byte 0x67 - movdqa %xmm1,%xmm2 -.byte 0x67 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,112(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,128(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,144(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,160(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,176(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,192(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,208(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,224(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,240(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,256(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,272(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,288(%r10) - movdqa %xmm4,%xmm3 -.byte 0x67 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,304(%r10) - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,320(%r10) - - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,336(%r10) - - pand 64(%rdi),%xmm0 - pand 80(%rdi),%xmm1 - pand 96(%rdi),%xmm2 - movdqa %xmm3,352(%r10) - pand 112(%rdi),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -128(%rdi),%xmm4 - movdqa -112(%rdi),%xmm5 - movdqa -96(%rdi),%xmm2 - pand 112(%r10),%xmm4 - movdqa -80(%rdi),%xmm3 - pand 128(%r10),%xmm5 - por %xmm4,%xmm0 - pand 144(%r10),%xmm2 - por %xmm5,%xmm1 - pand 160(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -64(%rdi),%xmm4 - movdqa -48(%rdi),%xmm5 - movdqa -32(%rdi),%xmm2 - pand 176(%r10),%xmm4 - movdqa -16(%rdi),%xmm3 - pand 192(%r10),%xmm5 - por %xmm4,%xmm0 - pand 208(%r10),%xmm2 - por %xmm5,%xmm1 - pand 224(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa 0(%rdi),%xmm4 - movdqa 16(%rdi),%xmm5 - movdqa 32(%rdi),%xmm2 - pand 240(%r10),%xmm4 - movdqa 48(%rdi),%xmm3 - pand 256(%r10),%xmm5 - por %xmm4,%xmm0 - pand 272(%r10),%xmm2 - por %xmm5,%xmm1 - pand 288(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - pxor %xmm1,%xmm0 - pshufd $0x4e,%xmm0,%xmm1 - por %xmm1,%xmm0 - leaq 256(%rdi),%rdi -.byte 102,72,15,126,194 - leaq 64+32+8(%rsp),%rbx - - movq %rdx,%r9 - mulxq 0(%rsi),%r8,%rax - mulxq 8(%rsi),%r11,%r12 - addq %rax,%r11 - mulxq 16(%rsi),%rax,%r13 - adcq %rax,%r12 - adcq $0,%r13 - mulxq 24(%rsi),%rax,%r14 - - movq %r8,%r15 - imulq 32+8(%rsp),%r8 - xorq %rbp,%rbp - movq %r8,%rdx - - movq %rdi,8+8(%rsp) - - leaq 32(%rsi),%rsi - adcxq %rax,%r13 - adcxq %rbp,%r14 - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%r15 - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - mulxq 16(%rcx),%rax,%r12 - movq 24+8(%rsp),%rdi - movq %r10,-32(%rbx) - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-24(%rbx) - adcxq %rax,%r12 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r12,-16(%rbx) - jmp .Lmulx4x_1st - -.align 32 -.Lmulx4x_1st: - adcxq %rbp,%r15 - mulxq 0(%rsi),%r10,%rax - adcxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 -.byte 0x67,0x67 - movq %r8,%rdx - adcxq %rax,%r13 - adcxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - movq %r11,-32(%rbx) - adoxq %r15,%r13 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r13,-16(%rbx) - - decq %rdi - jnz .Lmulx4x_1st - - movq 8(%rsp),%rax - adcq %rbp,%r15 - leaq (%rsi,%rax,1),%rsi - addq %r15,%r14 - movq 8+8(%rsp),%rdi - adcq %rbp,%rbp - movq %r14,-8(%rbx) - jmp .Lmulx4x_outer - -.align 32 -.Lmulx4x_outer: - leaq 16-256(%rbx),%r10 - pxor %xmm4,%xmm4 -.byte 0x67,0x67 - pxor %xmm5,%xmm5 - movdqa -128(%rdi),%xmm0 - movdqa -112(%rdi),%xmm1 - movdqa -96(%rdi),%xmm2 - pand 256(%r10),%xmm0 - movdqa -80(%rdi),%xmm3 - pand 272(%r10),%xmm1 - por %xmm0,%xmm4 - pand 288(%r10),%xmm2 - por %xmm1,%xmm5 - pand 304(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%rdi),%xmm0 - movdqa -48(%rdi),%xmm1 - movdqa -32(%rdi),%xmm2 - pand 320(%r10),%xmm0 - movdqa -16(%rdi),%xmm3 - pand 336(%r10),%xmm1 - por %xmm0,%xmm4 - pand 352(%r10),%xmm2 - por %xmm1,%xmm5 - pand 368(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%rdi),%xmm0 - movdqa 16(%rdi),%xmm1 - movdqa 32(%rdi),%xmm2 - pand 384(%r10),%xmm0 - movdqa 48(%rdi),%xmm3 - pand 400(%r10),%xmm1 - por %xmm0,%xmm4 - pand 416(%r10),%xmm2 - por %xmm1,%xmm5 - pand 432(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%rdi),%xmm0 - movdqa 80(%rdi),%xmm1 - movdqa 96(%rdi),%xmm2 - pand 448(%r10),%xmm0 - movdqa 112(%rdi),%xmm3 - pand 464(%r10),%xmm1 - por %xmm0,%xmm4 - pand 480(%r10),%xmm2 - por %xmm1,%xmm5 - pand 496(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - leaq 256(%rdi),%rdi -.byte 102,72,15,126,194 - - movq %rbp,(%rbx) - leaq 32(%rbx,%rax,1),%rbx - mulxq 0(%rsi),%r8,%r11 - xorq %rbp,%rbp - movq %rdx,%r9 - mulxq 8(%rsi),%r14,%r12 - adoxq -32(%rbx),%r8 - adcxq %r14,%r11 - mulxq 16(%rsi),%r15,%r13 - adoxq -24(%rbx),%r11 - adcxq %r15,%r12 - mulxq 24(%rsi),%rdx,%r14 - adoxq -16(%rbx),%r12 - adcxq %rdx,%r13 - leaq (%rcx,%rax,1),%rcx - leaq 32(%rsi),%rsi - adoxq -8(%rbx),%r13 - adcxq %rbp,%r14 - adoxq %rbp,%r14 - - movq %r8,%r15 - imulq 32+8(%rsp),%r8 - - movq %r8,%rdx - xorq %rbp,%rbp - movq %rdi,8+8(%rsp) - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%r15 - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - mulxq 16(%rcx),%rax,%r12 - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq 24+8(%rsp),%rdi - movq %r10,-32(%rbx) - adcxq %rax,%r12 - movq %r11,-24(%rbx) - adoxq %rbp,%r15 - movq %r12,-16(%rbx) - leaq 32(%rcx),%rcx - jmp .Lmulx4x_inner - -.align 32 -.Lmulx4x_inner: - mulxq 0(%rsi),%r10,%rax - adcxq %rbp,%r15 - adoxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq 0(%rbx),%r10 - adoxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq 8(%rbx),%r11 - adoxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 - movq %r8,%rdx - adcxq 16(%rbx),%r12 - adoxq %rax,%r13 - adcxq 24(%rbx),%r13 - adoxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - adcxq %rbp,%r14 - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - adoxq %r15,%r13 - movq %r11,-32(%rbx) - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - leaq 32(%rcx),%rcx - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - movq %r13,-16(%rbx) - - decq %rdi - jnz .Lmulx4x_inner - - movq 0+8(%rsp),%rax - adcq %rbp,%r15 - subq 0(%rbx),%rdi - movq 8+8(%rsp),%rdi - movq 16+8(%rsp),%r10 - adcq %r15,%r14 - leaq (%rsi,%rax,1),%rsi - adcq %rbp,%rbp - movq %r14,-8(%rbx) - - cmpq %r10,%rdi - jb .Lmulx4x_outer - - movq -8(%rcx),%r10 - movq %rbp,%r8 - movq (%rcx,%rax,1),%r12 - leaq (%rcx,%rax,1),%rbp - movq %rax,%rcx - leaq (%rbx,%rax,1),%rdi - xorl %eax,%eax - xorq %r15,%r15 - subq %r14,%r10 - adcq %r15,%r15 - orq %r15,%r8 - sarq $3+2,%rcx - subq %r8,%rax - movq 56+8(%rsp),%rdx - decq %r12 - movq 8(%rbp),%r13 - xorq %r8,%r8 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp .Lsqrx4x_sub_entry -.cfi_endproc -.size mulx4x_internal,.-mulx4x_internal -.type bn_powerx5,@function -.align 32 -bn_powerx5: -.cfi_startproc - movq %rsp,%rax -.cfi_def_cfa_register %rax -.Lpowerx5_enter: - pushq %rbx -.cfi_offset %rbx,-16 - pushq %rbp -.cfi_offset %rbp,-24 - pushq %r12 -.cfi_offset %r12,-32 - pushq %r13 -.cfi_offset %r13,-40 - pushq %r14 -.cfi_offset %r14,-48 - pushq %r15 -.cfi_offset %r15,-56 -.Lpowerx5_prologue: - - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - movq (%r8),%r8 - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb .Lpwrx_sp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp .Lpwrx_sp_done - -.align 32 -.Lpwrx_sp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -.Lpwrx_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lpwrx_page_walk - jmp .Lpwrx_page_walk_done - -.Lpwrx_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja .Lpwrx_page_walk -.Lpwrx_page_walk_done: - - movq %r9,%r10 - negq %r9 - - - - - - - - - - - - - pxor %xmm0,%xmm0 -.byte 102,72,15,110,207 -.byte 102,72,15,110,209 -.byte 102,73,15,110,218 -.byte 102,72,15,110,226 - movq %r8,32(%rsp) - movq %rax,40(%rsp) -.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 -.Lpowerx5_body: - - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - - movq %r10,%r9 - movq %rsi,%rdi -.byte 102,72,15,126,209 -.byte 102,72,15,126,226 - movq 40(%rsp),%rax - - call mulx4x_internal - - movq 40(%rsp),%rsi -.cfi_def_cfa %rsi,8 - movq $1,%rax - - movq -48(%rsi),%r15 -.cfi_restore %r15 - movq -40(%rsi),%r14 -.cfi_restore %r14 - movq -32(%rsi),%r13 -.cfi_restore %r13 - movq -24(%rsi),%r12 -.cfi_restore %r12 - movq -16(%rsi),%rbp -.cfi_restore %rbp - movq -8(%rsi),%rbx -.cfi_restore %rbx - leaq (%rsi),%rsp -.cfi_def_cfa_register %rsp -.Lpowerx5_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_powerx5,.-bn_powerx5 - -.globl bn_sqrx8x_internal -.hidden bn_sqrx8x_internal -.hidden bn_sqrx8x_internal -.type bn_sqrx8x_internal,@function -.align 32 -bn_sqrx8x_internal: -__bn_sqrx8x_internal: -.cfi_startproc - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - leaq 48+8(%rsp),%rdi - leaq (%rsi,%r9,1),%rbp - movq %r9,0+8(%rsp) - movq %rbp,8+8(%rsp) - jmp .Lsqr8x_zero_start - -.align 32 -.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 -.Lsqrx8x_zero: -.byte 0x3e - movdqa %xmm0,0(%rdi) - movdqa %xmm0,16(%rdi) - movdqa %xmm0,32(%rdi) - movdqa %xmm0,48(%rdi) -.Lsqr8x_zero_start: - movdqa %xmm0,64(%rdi) - movdqa %xmm0,80(%rdi) - movdqa %xmm0,96(%rdi) - movdqa %xmm0,112(%rdi) - leaq 128(%rdi),%rdi - subq $64,%r9 - jnz .Lsqrx8x_zero - - movq 0(%rsi),%rdx - - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r12,%r12 - xorq %r13,%r13 - xorq %r14,%r14 - xorq %r15,%r15 - leaq 48+8(%rsp),%rdi - xorq %rbp,%rbp - jmp .Lsqrx8x_outer_loop - -.align 32 -.Lsqrx8x_outer_loop: - mulxq 8(%rsi),%r8,%rax - adcxq %r9,%r8 - adoxq %rax,%r10 - mulxq 16(%rsi),%r9,%rax - adcxq %r10,%r9 - adoxq %rax,%r11 -.byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 - adcxq %r11,%r10 - adoxq %rax,%r12 -.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 - adcxq %r12,%r11 - adoxq %rax,%r13 - mulxq 40(%rsi),%r12,%rax - adcxq %r13,%r12 - adoxq %rax,%r14 - mulxq 48(%rsi),%r13,%rax - adcxq %r14,%r13 - adoxq %r15,%rax - mulxq 56(%rsi),%r14,%r15 - movq 8(%rsi),%rdx - adcxq %rax,%r14 - adoxq %rbp,%r15 - adcq 64(%rdi),%r15 - movq %r8,8(%rdi) - movq %r9,16(%rdi) - sbbq %rcx,%rcx - xorq %rbp,%rbp - - - mulxq 16(%rsi),%r8,%rbx - mulxq 24(%rsi),%r9,%rax - adcxq %r10,%r8 - adoxq %rbx,%r9 - mulxq 32(%rsi),%r10,%rbx - adcxq %r11,%r9 - adoxq %rax,%r10 -.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 - adcxq %r12,%r10 - adoxq %rbx,%r11 -.byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 - adcxq %r13,%r11 - adoxq %r14,%r12 -.byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 - movq 16(%rsi),%rdx - adcxq %rax,%r12 - adoxq %rbx,%r13 - adcxq %r15,%r13 - adoxq %rbp,%r14 - adcxq %rbp,%r14 - - movq %r8,24(%rdi) - movq %r9,32(%rdi) - - mulxq 24(%rsi),%r8,%rbx - mulxq 32(%rsi),%r9,%rax - adcxq %r10,%r8 - adoxq %rbx,%r9 - mulxq 40(%rsi),%r10,%rbx - adcxq %r11,%r9 - adoxq %rax,%r10 -.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 - adcxq %r12,%r10 - adoxq %r13,%r11 -.byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 -.byte 0x3e - movq 24(%rsi),%rdx - adcxq %rbx,%r11 - adoxq %rax,%r12 - adcxq %r14,%r12 - movq %r8,40(%rdi) - movq %r9,48(%rdi) - mulxq 32(%rsi),%r8,%rax - adoxq %rbp,%r13 - adcxq %rbp,%r13 - - mulxq 40(%rsi),%r9,%rbx - adcxq %r10,%r8 - adoxq %rax,%r9 - mulxq 48(%rsi),%r10,%rax - adcxq %r11,%r9 - adoxq %r12,%r10 - mulxq 56(%rsi),%r11,%r12 - movq 32(%rsi),%rdx - movq 40(%rsi),%r14 - adcxq %rbx,%r10 - adoxq %rax,%r11 - movq 48(%rsi),%r15 - adcxq %r13,%r11 - adoxq %rbp,%r12 - adcxq %rbp,%r12 - - movq %r8,56(%rdi) - movq %r9,64(%rdi) - - mulxq %r14,%r9,%rax - movq 56(%rsi),%r8 - adcxq %r10,%r9 - mulxq %r15,%r10,%rbx - adoxq %rax,%r10 - adcxq %r11,%r10 - mulxq %r8,%r11,%rax - movq %r14,%rdx - adoxq %rbx,%r11 - adcxq %r12,%r11 - - adcxq %rbp,%rax - - mulxq %r15,%r14,%rbx - mulxq %r8,%r12,%r13 - movq %r15,%rdx - leaq 64(%rsi),%rsi - adcxq %r14,%r11 - adoxq %rbx,%r12 - adcxq %rax,%r12 - adoxq %rbp,%r13 - -.byte 0x67,0x67 - mulxq %r8,%r8,%r14 - adcxq %r8,%r13 - adcxq %rbp,%r14 - - cmpq 8+8(%rsp),%rsi - je .Lsqrx8x_outer_break - - negq %rcx - movq $-8,%rcx - movq %rbp,%r15 - movq 64(%rdi),%r8 - adcxq 72(%rdi),%r9 - adcxq 80(%rdi),%r10 - adcxq 88(%rdi),%r11 - adcq 96(%rdi),%r12 - adcq 104(%rdi),%r13 - adcq 112(%rdi),%r14 - adcq 120(%rdi),%r15 - leaq (%rsi),%rbp - leaq 128(%rdi),%rdi - sbbq %rax,%rax - - movq -64(%rsi),%rdx - movq %rax,16+8(%rsp) - movq %rdi,24+8(%rsp) - - - xorl %eax,%eax - jmp .Lsqrx8x_loop - -.align 32 -.Lsqrx8x_loop: - movq %r8,%rbx - mulxq 0(%rbp),%rax,%r8 - adcxq %rax,%rbx - adoxq %r9,%r8 - - mulxq 8(%rbp),%rax,%r9 - adcxq %rax,%r8 - adoxq %r10,%r9 - - mulxq 16(%rbp),%rax,%r10 - adcxq %rax,%r9 - adoxq %r11,%r10 - - mulxq 24(%rbp),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - -.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 - adcxq %rax,%r11 - adoxq %r13,%r12 - - mulxq 40(%rbp),%rax,%r13 - adcxq %rax,%r12 - adoxq %r14,%r13 - - mulxq 48(%rbp),%rax,%r14 - movq %rbx,(%rdi,%rcx,8) - movl $0,%ebx - adcxq %rax,%r13 - adoxq %r15,%r14 - -.byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 - movq 8(%rsi,%rcx,8),%rdx - adcxq %rax,%r14 - adoxq %rbx,%r15 - adcxq %rbx,%r15 - -.byte 0x67 - incq %rcx - jnz .Lsqrx8x_loop - - leaq 64(%rbp),%rbp - movq $-8,%rcx - cmpq 8+8(%rsp),%rbp - je .Lsqrx8x_break - - subq 16+8(%rsp),%rbx -.byte 0x66 - movq -64(%rsi),%rdx - adcxq 0(%rdi),%r8 - adcxq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - leaq 64(%rdi),%rdi -.byte 0x67 - sbbq %rax,%rax - xorl %ebx,%ebx - movq %rax,16+8(%rsp) - jmp .Lsqrx8x_loop - -.align 32 -.Lsqrx8x_break: - xorq %rbp,%rbp - subq 16+8(%rsp),%rbx - adcxq %rbp,%r8 - movq 24+8(%rsp),%rcx - adcxq %rbp,%r9 - movq 0(%rsi),%rdx - adcq $0,%r10 - movq %r8,0(%rdi) - adcq $0,%r11 - adcq $0,%r12 - adcq $0,%r13 - adcq $0,%r14 - adcq $0,%r15 - cmpq %rcx,%rdi - je .Lsqrx8x_outer_loop - - movq %r9,8(%rdi) - movq 8(%rcx),%r9 - movq %r10,16(%rdi) - movq 16(%rcx),%r10 - movq %r11,24(%rdi) - movq 24(%rcx),%r11 - movq %r12,32(%rdi) - movq 32(%rcx),%r12 - movq %r13,40(%rdi) - movq 40(%rcx),%r13 - movq %r14,48(%rdi) - movq 48(%rcx),%r14 - movq %r15,56(%rdi) - movq 56(%rcx),%r15 - movq %rcx,%rdi - jmp .Lsqrx8x_outer_loop - -.align 32 -.Lsqrx8x_outer_break: - movq %r9,72(%rdi) -.byte 102,72,15,126,217 - movq %r10,80(%rdi) - movq %r11,88(%rdi) - movq %r12,96(%rdi) - movq %r13,104(%rdi) - movq %r14,112(%rdi) - leaq 48+8(%rsp),%rdi - movq (%rsi,%rcx,1),%rdx - - movq 8(%rdi),%r11 - xorq %r10,%r10 - movq 0+8(%rsp),%r9 - adoxq %r11,%r11 - movq 16(%rdi),%r12 - movq 24(%rdi),%r13 - - -.align 32 -.Lsqrx4x_shift_n_add: - mulxq %rdx,%rax,%rbx - adoxq %r12,%r12 - adcxq %r10,%rax -.byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 -.byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 - adoxq %r13,%r13 - adcxq %r11,%rbx - movq 40(%rdi),%r11 - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - - mulxq %rdx,%rax,%rbx - adoxq %r10,%r10 - adcxq %r12,%rax - movq 16(%rsi,%rcx,1),%rdx - movq 48(%rdi),%r12 - adoxq %r11,%r11 - adcxq %r13,%rbx - movq 56(%rdi),%r13 - movq %rax,16(%rdi) - movq %rbx,24(%rdi) - - mulxq %rdx,%rax,%rbx - adoxq %r12,%r12 - adcxq %r10,%rax - movq 24(%rsi,%rcx,1),%rdx - leaq 32(%rcx),%rcx - movq 64(%rdi),%r10 - adoxq %r13,%r13 - adcxq %r11,%rbx - movq 72(%rdi),%r11 - movq %rax,32(%rdi) - movq %rbx,40(%rdi) - - mulxq %rdx,%rax,%rbx - adoxq %r10,%r10 - adcxq %r12,%rax - jrcxz .Lsqrx4x_shift_n_add_break -.byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 - adoxq %r11,%r11 - adcxq %r13,%rbx - movq 80(%rdi),%r12 - movq 88(%rdi),%r13 - movq %rax,48(%rdi) - movq %rbx,56(%rdi) - leaq 64(%rdi),%rdi - nop - jmp .Lsqrx4x_shift_n_add - -.align 32 -.Lsqrx4x_shift_n_add_break: - adcxq %r13,%rbx - movq %rax,48(%rdi) - movq %rbx,56(%rdi) - leaq 64(%rdi),%rdi -.byte 102,72,15,126,213 -__bn_sqrx8x_reduction: - xorl %eax,%eax - movq 32+8(%rsp),%rbx - movq 48+8(%rsp),%rdx - leaq -64(%rbp,%r9,1),%rcx - - movq %rcx,0+8(%rsp) - movq %rdi,8+8(%rsp) - - leaq 48+8(%rsp),%rdi - jmp .Lsqrx8x_reduction_loop - -.align 32 -.Lsqrx8x_reduction_loop: - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%r12 - movq %rdx,%r8 - imulq %rbx,%rdx - movq 40(%rdi),%r13 - movq 48(%rdi),%r14 - movq 56(%rdi),%r15 - movq %rax,24+8(%rsp) - - leaq 64(%rdi),%rdi - xorq %rsi,%rsi - movq $-8,%rcx - jmp .Lsqrx8x_reduce - -.align 32 -.Lsqrx8x_reduce: - movq %r8,%rbx - mulxq 0(%rbp),%rax,%r8 - adcxq %rbx,%rax - adoxq %r9,%r8 - - mulxq 8(%rbp),%rbx,%r9 - adcxq %rbx,%r8 - adoxq %r10,%r9 - - mulxq 16(%rbp),%rbx,%r10 - adcxq %rbx,%r9 - adoxq %r11,%r10 - - mulxq 24(%rbp),%rbx,%r11 - adcxq %rbx,%r10 - adoxq %r12,%r11 - -.byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 - movq %rdx,%rax - movq %r8,%rdx - adcxq %rbx,%r11 - adoxq %r13,%r12 - - mulxq 32+8(%rsp),%rbx,%rdx - movq %rax,%rdx - movq %rax,64+48+8(%rsp,%rcx,8) - - mulxq 40(%rbp),%rax,%r13 - adcxq %rax,%r12 - adoxq %r14,%r13 - - mulxq 48(%rbp),%rax,%r14 - adcxq %rax,%r13 - adoxq %r15,%r14 - - mulxq 56(%rbp),%rax,%r15 - movq %rbx,%rdx - adcxq %rax,%r14 - adoxq %rsi,%r15 - adcxq %rsi,%r15 - -.byte 0x67,0x67,0x67 - incq %rcx - jnz .Lsqrx8x_reduce - - movq %rsi,%rax - cmpq 0+8(%rsp),%rbp - jae .Lsqrx8x_no_tail - - movq 48+8(%rsp),%rdx - addq 0(%rdi),%r8 - leaq 64(%rbp),%rbp - movq $-8,%rcx - adcxq 8(%rdi),%r9 - adcxq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - leaq 64(%rdi),%rdi - sbbq %rax,%rax - - xorq %rsi,%rsi - movq %rax,16+8(%rsp) - jmp .Lsqrx8x_tail - -.align 32 -.Lsqrx8x_tail: - movq %r8,%rbx - mulxq 0(%rbp),%rax,%r8 - adcxq %rax,%rbx - adoxq %r9,%r8 - - mulxq 8(%rbp),%rax,%r9 - adcxq %rax,%r8 - adoxq %r10,%r9 - - mulxq 16(%rbp),%rax,%r10 - adcxq %rax,%r9 - adoxq %r11,%r10 - - mulxq 24(%rbp),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - -.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 - adcxq %rax,%r11 - adoxq %r13,%r12 - - mulxq 40(%rbp),%rax,%r13 - adcxq %rax,%r12 - adoxq %r14,%r13 - - mulxq 48(%rbp),%rax,%r14 - adcxq %rax,%r13 - adoxq %r15,%r14 - - mulxq 56(%rbp),%rax,%r15 - movq 72+48+8(%rsp,%rcx,8),%rdx - adcxq %rax,%r14 - adoxq %rsi,%r15 - movq %rbx,(%rdi,%rcx,8) - movq %r8,%rbx - adcxq %rsi,%r15 - - incq %rcx - jnz .Lsqrx8x_tail - - cmpq 0+8(%rsp),%rbp - jae .Lsqrx8x_tail_done - - subq 16+8(%rsp),%rsi - movq 48+8(%rsp),%rdx - leaq 64(%rbp),%rbp - adcq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - leaq 64(%rdi),%rdi - sbbq %rax,%rax - subq $8,%rcx - - xorq %rsi,%rsi - movq %rax,16+8(%rsp) - jmp .Lsqrx8x_tail - -.align 32 -.Lsqrx8x_tail_done: - xorq %rax,%rax - addq 24+8(%rsp),%r8 - adcq $0,%r9 - adcq $0,%r10 - adcq $0,%r11 - adcq $0,%r12 - adcq $0,%r13 - adcq $0,%r14 - adcq $0,%r15 - adcq $0,%rax - - subq 16+8(%rsp),%rsi -.Lsqrx8x_no_tail: - adcq 0(%rdi),%r8 -.byte 102,72,15,126,217 - adcq 8(%rdi),%r9 - movq 56(%rbp),%rsi -.byte 102,72,15,126,213 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - adcq $0,%rax - - movq 32+8(%rsp),%rbx - movq 64(%rdi,%rcx,1),%rdx - - movq %r8,0(%rdi) - leaq 64(%rdi),%r8 - movq %r9,8(%rdi) - movq %r10,16(%rdi) - movq %r11,24(%rdi) - movq %r12,32(%rdi) - movq %r13,40(%rdi) - movq %r14,48(%rdi) - movq %r15,56(%rdi) - - leaq 64(%rdi,%rcx,1),%rdi - cmpq 8+8(%rsp),%r8 - jb .Lsqrx8x_reduction_loop - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_sqrx8x_internal,.-bn_sqrx8x_internal -.align 32 -.type __bn_postx4x_internal,@function -__bn_postx4x_internal: -.cfi_startproc - movq 0(%rbp),%r12 - movq %rcx,%r10 - movq %rcx,%r9 - negq %rax - sarq $3+2,%rcx - -.byte 102,72,15,126,202 -.byte 102,72,15,126,206 - decq %r12 - movq 8(%rbp),%r13 - xorq %r8,%r8 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp .Lsqrx4x_sub_entry - -.align 16 -.Lsqrx4x_sub: - movq 0(%rbp),%r12 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 -.Lsqrx4x_sub_entry: - andnq %rax,%r12,%r12 - leaq 32(%rbp),%rbp - andnq %rax,%r13,%r13 - andnq %rax,%r14,%r14 - andnq %rax,%r15,%r15 - - negq %r8 - adcq 0(%rdi),%r12 - adcq 8(%rdi),%r13 - adcq 16(%rdi),%r14 - adcq 24(%rdi),%r15 - movq %r12,0(%rdx) - leaq 32(%rdi),%rdi - movq %r13,8(%rdx) - sbbq %r8,%r8 - movq %r14,16(%rdx) - movq %r15,24(%rdx) - leaq 32(%rdx),%rdx - - incq %rcx - jnz .Lsqrx4x_sub - - negq %r9 - - .byte 0xf3,0xc3 -.cfi_endproc -.size __bn_postx4x_internal,.-__bn_postx4x_internal -.globl bn_scatter5 -.hidden bn_scatter5 -.type bn_scatter5,@function -.align 16 -bn_scatter5: -.cfi_startproc - cmpl $0,%esi - jz .Lscatter_epilogue - leaq (%rdx,%rcx,8),%rdx -.Lscatter: - movq (%rdi),%rax - leaq 8(%rdi),%rdi - movq %rax,(%rdx) - leaq 256(%rdx),%rdx - subl $1,%esi - jnz .Lscatter -.Lscatter_epilogue: - .byte 0xf3,0xc3 -.cfi_endproc -.size bn_scatter5,.-bn_scatter5 - -.globl bn_gather5 -.hidden bn_gather5 -.type bn_gather5,@function -.align 32 -bn_gather5: -.cfi_startproc -.LSEH_begin_bn_gather5: - -.byte 0x4c,0x8d,0x14,0x24 -.cfi_def_cfa_register %r10 -.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 - leaq .Linc(%rip),%rax - andq $-16,%rsp - - movd %ecx,%xmm5 - movdqa 0(%rax),%xmm0 - movdqa 16(%rax),%xmm1 - leaq 128(%rdx),%r11 - leaq 128(%rsp),%rax - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 - movdqa %xmm1,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,-128(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,-112(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,-96(%rax) - movdqa %xmm4,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,-80(%rax) - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,-64(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,-48(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,-32(%rax) - movdqa %xmm4,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,-16(%rax) - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,0(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,16(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,32(%rax) - movdqa %xmm4,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,48(%rax) - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,64(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,80(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,96(%rax) - movdqa %xmm4,%xmm2 - movdqa %xmm3,112(%rax) - jmp .Lgather - -.align 32 -.Lgather: - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - movdqa -128(%r11),%xmm0 - movdqa -112(%r11),%xmm1 - movdqa -96(%r11),%xmm2 - pand -128(%rax),%xmm0 - movdqa -80(%r11),%xmm3 - pand -112(%rax),%xmm1 - por %xmm0,%xmm4 - pand -96(%rax),%xmm2 - por %xmm1,%xmm5 - pand -80(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%r11),%xmm0 - movdqa -48(%r11),%xmm1 - movdqa -32(%r11),%xmm2 - pand -64(%rax),%xmm0 - movdqa -16(%r11),%xmm3 - pand -48(%rax),%xmm1 - por %xmm0,%xmm4 - pand -32(%rax),%xmm2 - por %xmm1,%xmm5 - pand -16(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%r11),%xmm0 - movdqa 16(%r11),%xmm1 - movdqa 32(%r11),%xmm2 - pand 0(%rax),%xmm0 - movdqa 48(%r11),%xmm3 - pand 16(%rax),%xmm1 - por %xmm0,%xmm4 - pand 32(%rax),%xmm2 - por %xmm1,%xmm5 - pand 48(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%r11),%xmm0 - movdqa 80(%r11),%xmm1 - movdqa 96(%r11),%xmm2 - pand 64(%rax),%xmm0 - movdqa 112(%r11),%xmm3 - pand 80(%rax),%xmm1 - por %xmm0,%xmm4 - pand 96(%rax),%xmm2 - por %xmm1,%xmm5 - pand 112(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - leaq 256(%r11),%r11 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - movq %xmm0,(%rdi) - leaq 8(%rdi),%rdi - subl $1,%esi - jnz .Lgather - - leaq (%r10),%rsp -.cfi_def_cfa_register %rsp - .byte 0xf3,0xc3 -.LSEH_end_bn_gather5: -.cfi_endproc -.size bn_gather5,.-bn_gather5 -.align 64 -.Linc: -.long 0,0, 1,1 -.long 2,2, 2,2 -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/test/trampoline-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/test/trampoline-x86_64.S deleted file mode 100644 index 9f7c0d817c..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/test/trampoline-x86_64.S +++ /dev/null @@ -1,518 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - - - - - -.type abi_test_trampoline, @function -.globl abi_test_trampoline -.hidden abi_test_trampoline -.align 16 -abi_test_trampoline: -.Labi_test_trampoline_seh_begin: -.cfi_startproc - - - - - - - - - - subq $120,%rsp -.cfi_adjust_cfa_offset 120 -.Labi_test_trampoline_seh_prolog_alloc: - movq %r8,48(%rsp) - movq %rbx,64(%rsp) -.cfi_offset rbx, -64 -.Labi_test_trampoline_seh_prolog_rbx: - movq %rbp,72(%rsp) -.cfi_offset rbp, -56 -.Labi_test_trampoline_seh_prolog_rbp: - movq %r12,80(%rsp) -.cfi_offset r12, -48 -.Labi_test_trampoline_seh_prolog_r12: - movq %r13,88(%rsp) -.cfi_offset r13, -40 -.Labi_test_trampoline_seh_prolog_r13: - movq %r14,96(%rsp) -.cfi_offset r14, -32 -.Labi_test_trampoline_seh_prolog_r14: - movq %r15,104(%rsp) -.cfi_offset r15, -24 -.Labi_test_trampoline_seh_prolog_r15: -.Labi_test_trampoline_seh_prolog_end: - movq 0(%rsi),%rbx - movq 8(%rsi),%rbp - movq 16(%rsi),%r12 - movq 24(%rsi),%r13 - movq 32(%rsi),%r14 - movq 40(%rsi),%r15 - - movq %rdi,32(%rsp) - movq %rsi,40(%rsp) - - - - - movq %rdx,%r10 - movq %rcx,%r11 - decq %r11 - js .Largs_done - movq (%r10),%rdi - addq $8,%r10 - decq %r11 - js .Largs_done - movq (%r10),%rsi - addq $8,%r10 - decq %r11 - js .Largs_done - movq (%r10),%rdx - addq $8,%r10 - decq %r11 - js .Largs_done - movq (%r10),%rcx - addq $8,%r10 - decq %r11 - js .Largs_done - movq (%r10),%r8 - addq $8,%r10 - decq %r11 - js .Largs_done - movq (%r10),%r9 - addq $8,%r10 - leaq 0(%rsp),%rax -.Largs_loop: - decq %r11 - js .Largs_done - - - - - - - movq %r11,56(%rsp) - movq (%r10),%r11 - movq %r11,(%rax) - movq 56(%rsp),%r11 - - addq $8,%r10 - addq $8,%rax - jmp .Largs_loop - -.Largs_done: - movq 32(%rsp),%rax - movq 48(%rsp),%r10 - testq %r10,%r10 - jz .Lno_unwind - - - pushfq - orq $0x100,0(%rsp) - popfq - - - - nop -.globl abi_test_unwind_start -.hidden abi_test_unwind_start -abi_test_unwind_start: - - call *%rax -.globl abi_test_unwind_return -.hidden abi_test_unwind_return -abi_test_unwind_return: - - - - - pushfq - andq $-0x101,0(%rsp) - popfq -.globl abi_test_unwind_stop -.hidden abi_test_unwind_stop -abi_test_unwind_stop: - - jmp .Lcall_done - -.Lno_unwind: - call *%rax - -.Lcall_done: - - movq 40(%rsp),%rsi - movq %rbx,0(%rsi) - movq %rbp,8(%rsi) - movq %r12,16(%rsi) - movq %r13,24(%rsi) - movq %r14,32(%rsi) - movq %r15,40(%rsi) - movq 64(%rsp),%rbx -.cfi_restore rbx - movq 72(%rsp),%rbp -.cfi_restore rbp - movq 80(%rsp),%r12 -.cfi_restore r12 - movq 88(%rsp),%r13 -.cfi_restore r13 - movq 96(%rsp),%r14 -.cfi_restore r14 - movq 104(%rsp),%r15 -.cfi_restore r15 - addq $120,%rsp -.cfi_adjust_cfa_offset -120 - - - .byte 0xf3,0xc3 -.cfi_endproc -.Labi_test_trampoline_seh_end: -.size abi_test_trampoline,.-abi_test_trampoline -.type abi_test_clobber_rax, @function -.globl abi_test_clobber_rax -.hidden abi_test_clobber_rax -.align 16 -abi_test_clobber_rax: - xorq %rax,%rax - .byte 0xf3,0xc3 -.size abi_test_clobber_rax,.-abi_test_clobber_rax -.type abi_test_clobber_rbx, @function -.globl abi_test_clobber_rbx -.hidden abi_test_clobber_rbx -.align 16 -abi_test_clobber_rbx: - xorq %rbx,%rbx - .byte 0xf3,0xc3 -.size abi_test_clobber_rbx,.-abi_test_clobber_rbx -.type abi_test_clobber_rcx, @function -.globl abi_test_clobber_rcx -.hidden abi_test_clobber_rcx -.align 16 -abi_test_clobber_rcx: - xorq %rcx,%rcx - .byte 0xf3,0xc3 -.size abi_test_clobber_rcx,.-abi_test_clobber_rcx -.type abi_test_clobber_rdx, @function -.globl abi_test_clobber_rdx -.hidden abi_test_clobber_rdx -.align 16 -abi_test_clobber_rdx: - xorq %rdx,%rdx - .byte 0xf3,0xc3 -.size abi_test_clobber_rdx,.-abi_test_clobber_rdx -.type abi_test_clobber_rdi, @function -.globl abi_test_clobber_rdi -.hidden abi_test_clobber_rdi -.align 16 -abi_test_clobber_rdi: - xorq %rdi,%rdi - .byte 0xf3,0xc3 -.size abi_test_clobber_rdi,.-abi_test_clobber_rdi -.type abi_test_clobber_rsi, @function -.globl abi_test_clobber_rsi -.hidden abi_test_clobber_rsi -.align 16 -abi_test_clobber_rsi: - xorq %rsi,%rsi - .byte 0xf3,0xc3 -.size abi_test_clobber_rsi,.-abi_test_clobber_rsi -.type abi_test_clobber_rbp, @function -.globl abi_test_clobber_rbp -.hidden abi_test_clobber_rbp -.align 16 -abi_test_clobber_rbp: - xorq %rbp,%rbp - .byte 0xf3,0xc3 -.size abi_test_clobber_rbp,.-abi_test_clobber_rbp -.type abi_test_clobber_r8, @function -.globl abi_test_clobber_r8 -.hidden abi_test_clobber_r8 -.align 16 -abi_test_clobber_r8: - xorq %r8,%r8 - .byte 0xf3,0xc3 -.size abi_test_clobber_r8,.-abi_test_clobber_r8 -.type abi_test_clobber_r9, @function -.globl abi_test_clobber_r9 -.hidden abi_test_clobber_r9 -.align 16 -abi_test_clobber_r9: - xorq %r9,%r9 - .byte 0xf3,0xc3 -.size abi_test_clobber_r9,.-abi_test_clobber_r9 -.type abi_test_clobber_r10, @function -.globl abi_test_clobber_r10 -.hidden abi_test_clobber_r10 -.align 16 -abi_test_clobber_r10: - xorq %r10,%r10 - .byte 0xf3,0xc3 -.size abi_test_clobber_r10,.-abi_test_clobber_r10 -.type abi_test_clobber_r11, @function -.globl abi_test_clobber_r11 -.hidden abi_test_clobber_r11 -.align 16 -abi_test_clobber_r11: - xorq %r11,%r11 - .byte 0xf3,0xc3 -.size abi_test_clobber_r11,.-abi_test_clobber_r11 -.type abi_test_clobber_r12, @function -.globl abi_test_clobber_r12 -.hidden abi_test_clobber_r12 -.align 16 -abi_test_clobber_r12: - xorq %r12,%r12 - .byte 0xf3,0xc3 -.size abi_test_clobber_r12,.-abi_test_clobber_r12 -.type abi_test_clobber_r13, @function -.globl abi_test_clobber_r13 -.hidden abi_test_clobber_r13 -.align 16 -abi_test_clobber_r13: - xorq %r13,%r13 - .byte 0xf3,0xc3 -.size abi_test_clobber_r13,.-abi_test_clobber_r13 -.type abi_test_clobber_r14, @function -.globl abi_test_clobber_r14 -.hidden abi_test_clobber_r14 -.align 16 -abi_test_clobber_r14: - xorq %r14,%r14 - .byte 0xf3,0xc3 -.size abi_test_clobber_r14,.-abi_test_clobber_r14 -.type abi_test_clobber_r15, @function -.globl abi_test_clobber_r15 -.hidden abi_test_clobber_r15 -.align 16 -abi_test_clobber_r15: - xorq %r15,%r15 - .byte 0xf3,0xc3 -.size abi_test_clobber_r15,.-abi_test_clobber_r15 -.type abi_test_clobber_xmm0, @function -.globl abi_test_clobber_xmm0 -.hidden abi_test_clobber_xmm0 -.align 16 -abi_test_clobber_xmm0: - pxor %xmm0,%xmm0 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm0,.-abi_test_clobber_xmm0 -.type abi_test_clobber_xmm1, @function -.globl abi_test_clobber_xmm1 -.hidden abi_test_clobber_xmm1 -.align 16 -abi_test_clobber_xmm1: - pxor %xmm1,%xmm1 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm1,.-abi_test_clobber_xmm1 -.type abi_test_clobber_xmm2, @function -.globl abi_test_clobber_xmm2 -.hidden abi_test_clobber_xmm2 -.align 16 -abi_test_clobber_xmm2: - pxor %xmm2,%xmm2 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm2,.-abi_test_clobber_xmm2 -.type abi_test_clobber_xmm3, @function -.globl abi_test_clobber_xmm3 -.hidden abi_test_clobber_xmm3 -.align 16 -abi_test_clobber_xmm3: - pxor %xmm3,%xmm3 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm3,.-abi_test_clobber_xmm3 -.type abi_test_clobber_xmm4, @function -.globl abi_test_clobber_xmm4 -.hidden abi_test_clobber_xmm4 -.align 16 -abi_test_clobber_xmm4: - pxor %xmm4,%xmm4 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm4,.-abi_test_clobber_xmm4 -.type abi_test_clobber_xmm5, @function -.globl abi_test_clobber_xmm5 -.hidden abi_test_clobber_xmm5 -.align 16 -abi_test_clobber_xmm5: - pxor %xmm5,%xmm5 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm5,.-abi_test_clobber_xmm5 -.type abi_test_clobber_xmm6, @function -.globl abi_test_clobber_xmm6 -.hidden abi_test_clobber_xmm6 -.align 16 -abi_test_clobber_xmm6: - pxor %xmm6,%xmm6 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm6,.-abi_test_clobber_xmm6 -.type abi_test_clobber_xmm7, @function -.globl abi_test_clobber_xmm7 -.hidden abi_test_clobber_xmm7 -.align 16 -abi_test_clobber_xmm7: - pxor %xmm7,%xmm7 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm7,.-abi_test_clobber_xmm7 -.type abi_test_clobber_xmm8, @function -.globl abi_test_clobber_xmm8 -.hidden abi_test_clobber_xmm8 -.align 16 -abi_test_clobber_xmm8: - pxor %xmm8,%xmm8 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm8,.-abi_test_clobber_xmm8 -.type abi_test_clobber_xmm9, @function -.globl abi_test_clobber_xmm9 -.hidden abi_test_clobber_xmm9 -.align 16 -abi_test_clobber_xmm9: - pxor %xmm9,%xmm9 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm9,.-abi_test_clobber_xmm9 -.type abi_test_clobber_xmm10, @function -.globl abi_test_clobber_xmm10 -.hidden abi_test_clobber_xmm10 -.align 16 -abi_test_clobber_xmm10: - pxor %xmm10,%xmm10 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm10,.-abi_test_clobber_xmm10 -.type abi_test_clobber_xmm11, @function -.globl abi_test_clobber_xmm11 -.hidden abi_test_clobber_xmm11 -.align 16 -abi_test_clobber_xmm11: - pxor %xmm11,%xmm11 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm11,.-abi_test_clobber_xmm11 -.type abi_test_clobber_xmm12, @function -.globl abi_test_clobber_xmm12 -.hidden abi_test_clobber_xmm12 -.align 16 -abi_test_clobber_xmm12: - pxor %xmm12,%xmm12 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm12,.-abi_test_clobber_xmm12 -.type abi_test_clobber_xmm13, @function -.globl abi_test_clobber_xmm13 -.hidden abi_test_clobber_xmm13 -.align 16 -abi_test_clobber_xmm13: - pxor %xmm13,%xmm13 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm13,.-abi_test_clobber_xmm13 -.type abi_test_clobber_xmm14, @function -.globl abi_test_clobber_xmm14 -.hidden abi_test_clobber_xmm14 -.align 16 -abi_test_clobber_xmm14: - pxor %xmm14,%xmm14 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm14,.-abi_test_clobber_xmm14 -.type abi_test_clobber_xmm15, @function -.globl abi_test_clobber_xmm15 -.hidden abi_test_clobber_xmm15 -.align 16 -abi_test_clobber_xmm15: - pxor %xmm15,%xmm15 - .byte 0xf3,0xc3 -.size abi_test_clobber_xmm15,.-abi_test_clobber_xmm15 - - - -.type abi_test_bad_unwind_wrong_register, @function -.globl abi_test_bad_unwind_wrong_register -.hidden abi_test_bad_unwind_wrong_register -.align 16 -abi_test_bad_unwind_wrong_register: -.cfi_startproc -.Labi_test_bad_unwind_wrong_register_seh_begin: - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r13,-16 -.Labi_test_bad_unwind_wrong_register_seh_push_r13: - - - - nop - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r12 - .byte 0xf3,0xc3 -.Labi_test_bad_unwind_wrong_register_seh_end: -.cfi_endproc -.size abi_test_bad_unwind_wrong_register,.-abi_test_bad_unwind_wrong_register - - - - -.type abi_test_bad_unwind_temporary, @function -.globl abi_test_bad_unwind_temporary -.hidden abi_test_bad_unwind_temporary -.align 16 -abi_test_bad_unwind_temporary: -.cfi_startproc -.Labi_test_bad_unwind_temporary_seh_begin: - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset %r12,-16 -.Labi_test_bad_unwind_temporary_seh_push_r12: - - movq %r12,%rax - incq %rax - movq %rax,(%rsp) - - - - movq %r12,(%rsp) - - - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r12 - .byte 0xf3,0xc3 -.Labi_test_bad_unwind_temporary_seh_end: -.cfi_endproc -.size abi_test_bad_unwind_temporary,.-abi_test_bad_unwind_temporary - - - - -.type abi_test_set_direction_flag, @function -.globl abi_test_get_and_clear_direction_flag -.hidden abi_test_get_and_clear_direction_flag -abi_test_get_and_clear_direction_flag: - pushfq - popq %rax - andq $0x400,%rax - shrq $10,%rax - cld - .byte 0xf3,0xc3 -.size abi_test_get_and_clear_direction_flag,.-abi_test_get_and_clear_direction_flag - - - -.type abi_test_set_direction_flag, @function -.globl abi_test_set_direction_flag -.hidden abi_test_set_direction_flag -abi_test_set_direction_flag: - std - .byte 0xf3,0xc3 -.size abi_test_set_direction_flag,.-abi_test_set_direction_flag -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/linux-x86_64/crypto/third_party/sike/asm/fp-x86_64.S b/packager/third_party/boringssl/linux-x86_64/crypto/third_party/sike/asm/fp-x86_64.S deleted file mode 100644 index 07f708aa72..0000000000 --- a/packager/third_party/boringssl/linux-x86_64/crypto/third_party/sike/asm/fp-x86_64.S +++ /dev/null @@ -1,1871 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.Lp434x2: -.quad 0xFFFFFFFFFFFFFFFE -.quad 0xFFFFFFFFFFFFFFFF -.quad 0xFB82ECF5C5FFFFFF -.quad 0xF78CB8F062B15D47 -.quad 0xD9F8BFAD038A40AC -.quad 0x0004683E4E2EE688 - - -.Lp434p1: -.quad 0xFDC1767AE3000000 -.quad 0x7BC65C783158AEA3 -.quad 0x6CFC5FD681C52056 -.quad 0x0002341F27177344 - -.extern OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P -.hidden OPENSSL_ia32cap_P -.globl sike_fpadd -.hidden sike_fpadd -.type sike_fpadd,@function -sike_fpadd: -.cfi_startproc - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12, -16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13, -24 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14, -32 - - xorq %rax,%rax - - movq 0(%rdi),%r8 - addq 0(%rsi),%r8 - movq 8(%rdi),%r9 - adcq 8(%rsi),%r9 - movq 16(%rdi),%r10 - adcq 16(%rsi),%r10 - movq 24(%rdi),%r11 - adcq 24(%rsi),%r11 - movq 32(%rdi),%r12 - adcq 32(%rsi),%r12 - movq 40(%rdi),%r13 - adcq 40(%rsi),%r13 - movq 48(%rdi),%r14 - adcq 48(%rsi),%r14 - - movq .Lp434x2(%rip),%rcx - subq %rcx,%r8 - movq 8+.Lp434x2(%rip),%rcx - sbbq %rcx,%r9 - sbbq %rcx,%r10 - movq 16+.Lp434x2(%rip),%rcx - sbbq %rcx,%r11 - movq 24+.Lp434x2(%rip),%rcx - sbbq %rcx,%r12 - movq 32+.Lp434x2(%rip),%rcx - sbbq %rcx,%r13 - movq 40+.Lp434x2(%rip),%rcx - sbbq %rcx,%r14 - - sbbq $0,%rax - - movq .Lp434x2(%rip),%rdi - andq %rax,%rdi - movq 8+.Lp434x2(%rip),%rsi - andq %rax,%rsi - movq 16+.Lp434x2(%rip),%rcx - andq %rax,%rcx - - addq %rdi,%r8 - movq %r8,0(%rdx) - adcq %rsi,%r9 - movq %r9,8(%rdx) - adcq %rsi,%r10 - movq %r10,16(%rdx) - adcq %rcx,%r11 - movq %r11,24(%rdx) - - setc %cl - movq 24+.Lp434x2(%rip),%r8 - andq %rax,%r8 - movq 32+.Lp434x2(%rip),%r9 - andq %rax,%r9 - movq 40+.Lp434x2(%rip),%r10 - andq %rax,%r10 - btq $0,%rcx - - adcq %r8,%r12 - movq %r12,32(%rdx) - adcq %r9,%r13 - movq %r13,40(%rdx) - adcq %r10,%r14 - movq %r14,48(%rdx) - - popq %r14 -.cfi_adjust_cfa_offset -8 - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc -.globl sike_cswap_asm -.hidden sike_cswap_asm -.type sike_cswap_asm,@function -sike_cswap_asm: - - - movq %rdx,%xmm3 - - - - - - pshufd $68,%xmm3,%xmm3 - - movdqu 0(%rdi),%xmm0 - movdqu 0(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,0(%rdi) - movdqu %xmm1,0(%rsi) - - movdqu 16(%rdi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,16(%rdi) - movdqu %xmm1,16(%rsi) - - movdqu 32(%rdi),%xmm0 - movdqu 32(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,32(%rdi) - movdqu %xmm1,32(%rsi) - - movdqu 48(%rdi),%xmm0 - movdqu 48(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,48(%rdi) - movdqu %xmm1,48(%rsi) - - movdqu 64(%rdi),%xmm0 - movdqu 64(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,64(%rdi) - movdqu %xmm1,64(%rsi) - - movdqu 80(%rdi),%xmm0 - movdqu 80(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,80(%rdi) - movdqu %xmm1,80(%rsi) - - movdqu 96(%rdi),%xmm0 - movdqu 96(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,96(%rdi) - movdqu %xmm1,96(%rsi) - - movdqu 112(%rdi),%xmm0 - movdqu 112(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,112(%rdi) - movdqu %xmm1,112(%rsi) - - movdqu 128(%rdi),%xmm0 - movdqu 128(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,128(%rdi) - movdqu %xmm1,128(%rsi) - - movdqu 144(%rdi),%xmm0 - movdqu 144(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,144(%rdi) - movdqu %xmm1,144(%rsi) - - movdqu 160(%rdi),%xmm0 - movdqu 160(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,160(%rdi) - movdqu %xmm1,160(%rsi) - - movdqu 176(%rdi),%xmm0 - movdqu 176(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,176(%rdi) - movdqu %xmm1,176(%rsi) - - movdqu 192(%rdi),%xmm0 - movdqu 192(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,192(%rdi) - movdqu %xmm1,192(%rsi) - - movdqu 208(%rdi),%xmm0 - movdqu 208(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,208(%rdi) - movdqu %xmm1,208(%rsi) - - .byte 0xf3,0xc3 -.globl sike_fpsub -.hidden sike_fpsub -.type sike_fpsub,@function -sike_fpsub: -.cfi_startproc - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12, -16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13, -24 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14, -32 - - xorq %rax,%rax - - movq 0(%rdi),%r8 - subq 0(%rsi),%r8 - movq 8(%rdi),%r9 - sbbq 8(%rsi),%r9 - movq 16(%rdi),%r10 - sbbq 16(%rsi),%r10 - movq 24(%rdi),%r11 - sbbq 24(%rsi),%r11 - movq 32(%rdi),%r12 - sbbq 32(%rsi),%r12 - movq 40(%rdi),%r13 - sbbq 40(%rsi),%r13 - movq 48(%rdi),%r14 - sbbq 48(%rsi),%r14 - - sbbq $0x0,%rax - - movq .Lp434x2(%rip),%rdi - andq %rax,%rdi - movq 8+.Lp434x2(%rip),%rsi - andq %rax,%rsi - movq 16+.Lp434x2(%rip),%rcx - andq %rax,%rcx - - addq %rdi,%r8 - movq %r8,0(%rdx) - adcq %rsi,%r9 - movq %r9,8(%rdx) - adcq %rsi,%r10 - movq %r10,16(%rdx) - adcq %rcx,%r11 - movq %r11,24(%rdx) - - setc %cl - movq 24+.Lp434x2(%rip),%r8 - andq %rax,%r8 - movq 32+.Lp434x2(%rip),%r9 - andq %rax,%r9 - movq 40+.Lp434x2(%rip),%r10 - andq %rax,%r10 - btq $0x0,%rcx - - adcq %r8,%r12 - adcq %r9,%r13 - adcq %r10,%r14 - movq %r12,32(%rdx) - movq %r13,40(%rdx) - movq %r14,48(%rdx) - - popq %r14 -.cfi_adjust_cfa_offset -8 - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc -.globl sike_mpadd_asm -.hidden sike_mpadd_asm -.type sike_mpadd_asm,@function -sike_mpadd_asm: -.cfi_startproc - movq 0(%rdi),%r8; - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%rcx - addq 0(%rsi),%r8 - adcq 8(%rsi),%r9 - adcq 16(%rsi),%r10 - adcq 24(%rsi),%r11 - adcq 32(%rsi),%rcx - movq %r8,0(%rdx) - movq %r9,8(%rdx) - movq %r10,16(%rdx) - movq %r11,24(%rdx) - movq %rcx,32(%rdx) - - movq 40(%rdi),%r8 - movq 48(%rdi),%r9 - adcq 40(%rsi),%r8 - adcq 48(%rsi),%r9 - movq %r8,40(%rdx) - movq %r9,48(%rdx) - .byte 0xf3,0xc3 -.cfi_endproc -.globl sike_mpsubx2_asm -.hidden sike_mpsubx2_asm -.type sike_mpsubx2_asm,@function -sike_mpsubx2_asm: -.cfi_startproc - xorq %rax,%rax - - movq 0(%rdi),%r8 - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%rcx - subq 0(%rsi),%r8 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - sbbq 24(%rsi),%r11 - sbbq 32(%rsi),%rcx - movq %r8,0(%rdx) - movq %r9,8(%rdx) - movq %r10,16(%rdx) - movq %r11,24(%rdx) - movq %rcx,32(%rdx) - - movq 40(%rdi),%r8 - movq 48(%rdi),%r9 - movq 56(%rdi),%r10 - movq 64(%rdi),%r11 - movq 72(%rdi),%rcx - sbbq 40(%rsi),%r8 - sbbq 48(%rsi),%r9 - sbbq 56(%rsi),%r10 - sbbq 64(%rsi),%r11 - sbbq 72(%rsi),%rcx - movq %r8,40(%rdx) - movq %r9,48(%rdx) - movq %r10,56(%rdx) - movq %r11,64(%rdx) - movq %rcx,72(%rdx) - - movq 80(%rdi),%r8 - movq 88(%rdi),%r9 - movq 96(%rdi),%r10 - movq 104(%rdi),%r11 - sbbq 80(%rsi),%r8 - sbbq 88(%rsi),%r9 - sbbq 96(%rsi),%r10 - sbbq 104(%rsi),%r11 - sbbq $0x0,%rax - movq %r8,80(%rdx) - movq %r9,88(%rdx) - movq %r10,96(%rdx) - movq %r11,104(%rdx) - .byte 0xf3,0xc3 -.cfi_endproc -.globl sike_mpdblsubx2_asm -.hidden sike_mpdblsubx2_asm -.type sike_mpdblsubx2_asm,@function -sike_mpdblsubx2_asm: -.cfi_startproc - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12, -16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13, -24 - - xorq %rax,%rax - - - movq 0(%rdx),%r8 - movq 8(%rdx),%r9 - movq 16(%rdx),%r10 - movq 24(%rdx),%r11 - movq 32(%rdx),%r12 - movq 40(%rdx),%r13 - movq 48(%rdx),%rcx - subq 0(%rdi),%r8 - sbbq 8(%rdi),%r9 - sbbq 16(%rdi),%r10 - sbbq 24(%rdi),%r11 - sbbq 32(%rdi),%r12 - sbbq 40(%rdi),%r13 - sbbq 48(%rdi),%rcx - adcq $0x0,%rax - - - subq 0(%rsi),%r8 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - sbbq 24(%rsi),%r11 - sbbq 32(%rsi),%r12 - sbbq 40(%rsi),%r13 - sbbq 48(%rsi),%rcx - adcq $0x0,%rax - - - movq %r8,0(%rdx) - movq %r9,8(%rdx) - movq %r10,16(%rdx) - movq %r11,24(%rdx) - movq %r12,32(%rdx) - movq %r13,40(%rdx) - movq %rcx,48(%rdx) - - - movq 56(%rdx),%r8 - movq 64(%rdx),%r9 - movq 72(%rdx),%r10 - movq 80(%rdx),%r11 - movq 88(%rdx),%r12 - movq 96(%rdx),%r13 - movq 104(%rdx),%rcx - - subq %rax,%r8 - sbbq 56(%rdi),%r8 - sbbq 64(%rdi),%r9 - sbbq 72(%rdi),%r10 - sbbq 80(%rdi),%r11 - sbbq 88(%rdi),%r12 - sbbq 96(%rdi),%r13 - sbbq 104(%rdi),%rcx - - - subq 56(%rsi),%r8 - sbbq 64(%rsi),%r9 - sbbq 72(%rsi),%r10 - sbbq 80(%rsi),%r11 - sbbq 88(%rsi),%r12 - sbbq 96(%rsi),%r13 - sbbq 104(%rsi),%rcx - - - movq %r8,56(%rdx) - movq %r9,64(%rdx) - movq %r10,72(%rdx) - movq %r11,80(%rdx) - movq %r12,88(%rdx) - movq %r13,96(%rdx) - movq %rcx,104(%rdx) - - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc - -.Lrdc_bdw: -.cfi_startproc - -.cfi_adjust_cfa_offset 32 -.cfi_offset r12, -16 -.cfi_offset r13, -24 -.cfi_offset r14, -32 -.cfi_offset r15, -40 - - xorq %rax,%rax - movq 0+0(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r8,%r9 - mulxq 8+.Lp434p1(%rip),%r12,%r10 - mulxq 16+.Lp434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+.Lp434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - xorq %rax,%rax - movq 0+8(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r13,%rcx - adcxq %r13,%r9 - adcxq %rcx,%r10 - - mulxq 8+.Lp434p1(%rip),%rcx,%r13 - adcxq %r13,%r11 - adoxq %rcx,%r10 - - mulxq 16+.Lp434p1(%rip),%rcx,%r13 - adcxq %r13,%r12 - adoxq %rcx,%r11 - - mulxq 24+.Lp434p1(%rip),%rcx,%r13 - adcxq %rax,%r13 - adoxq %rcx,%r12 - adoxq %rax,%r13 - - xorq %rcx,%rcx - addq 24(%rdi),%r8 - adcq 32(%rdi),%r9 - adcq 40(%rdi),%r10 - adcq 48(%rdi),%r11 - adcq 56(%rdi),%r12 - adcq 64(%rdi),%r13 - adcq 72(%rdi),%rcx - movq %r8,24(%rdi) - movq %r9,32(%rdi) - movq %r10,40(%rdi) - movq %r11,48(%rdi) - movq %r12,56(%rdi) - movq %r13,64(%rdi) - movq %rcx,72(%rdi) - movq 80(%rdi),%r8 - movq 88(%rdi),%r9 - movq 96(%rdi),%r10 - movq 104(%rdi),%r11 - adcq $0x0,%r8 - adcq $0x0,%r9 - adcq $0x0,%r10 - adcq $0x0,%r11 - movq %r8,80(%rdi) - movq %r9,88(%rdi) - movq %r10,96(%rdi) - movq %r11,104(%rdi) - - xorq %rax,%rax - movq 16+0(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r8,%r9 - mulxq 8+.Lp434p1(%rip),%r12,%r10 - mulxq 16+.Lp434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+.Lp434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - xorq %rax,%rax - movq 16+8(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r13,%rcx - adcxq %r13,%r9 - adcxq %rcx,%r10 - - mulxq 8+.Lp434p1(%rip),%rcx,%r13 - adcxq %r13,%r11 - adoxq %rcx,%r10 - - mulxq 16+.Lp434p1(%rip),%rcx,%r13 - adcxq %r13,%r12 - adoxq %rcx,%r11 - - mulxq 24+.Lp434p1(%rip),%rcx,%r13 - adcxq %rax,%r13 - adoxq %rcx,%r12 - adoxq %rax,%r13 - - xorq %rcx,%rcx - addq 40(%rdi),%r8 - adcq 48(%rdi),%r9 - adcq 56(%rdi),%r10 - adcq 64(%rdi),%r11 - adcq 72(%rdi),%r12 - adcq 80(%rdi),%r13 - adcq 88(%rdi),%rcx - movq %r8,40(%rdi) - movq %r9,48(%rdi) - movq %r10,56(%rdi) - movq %r11,64(%rdi) - movq %r12,72(%rdi) - movq %r13,80(%rdi) - movq %rcx,88(%rdi) - movq 96(%rdi),%r8 - movq 104(%rdi),%r9 - adcq $0x0,%r8 - adcq $0x0,%r9 - movq %r8,96(%rdi) - movq %r9,104(%rdi) - - xorq %rax,%rax - movq 32+0(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r8,%r9 - mulxq 8+.Lp434p1(%rip),%r12,%r10 - mulxq 16+.Lp434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+.Lp434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - xorq %rax,%rax - movq 32+8(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r13,%rcx - adcxq %r13,%r9 - adcxq %rcx,%r10 - - mulxq 8+.Lp434p1(%rip),%rcx,%r13 - adcxq %r13,%r11 - adoxq %rcx,%r10 - - mulxq 16+.Lp434p1(%rip),%rcx,%r13 - adcxq %r13,%r12 - adoxq %rcx,%r11 - - mulxq 24+.Lp434p1(%rip),%rcx,%r13 - adcxq %rax,%r13 - adoxq %rcx,%r12 - adoxq %rax,%r13 - - xorq %rcx,%rcx - addq 56(%rdi),%r8 - adcq 64(%rdi),%r9 - adcq 72(%rdi),%r10 - adcq 80(%rdi),%r11 - adcq 88(%rdi),%r12 - adcq 96(%rdi),%r13 - adcq 104(%rdi),%rcx - movq %r8,0(%rsi) - movq %r9,8(%rsi) - movq %r10,72(%rdi) - movq %r11,80(%rdi) - movq %r12,88(%rdi) - movq %r13,96(%rdi) - movq %rcx,104(%rdi) - - xorq %rax,%rax - movq 48(%rdi),%rdx - mulxq 0+.Lp434p1(%rip),%r8,%r9 - mulxq 8+.Lp434p1(%rip),%r12,%r10 - mulxq 16+.Lp434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+.Lp434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - addq 72(%rdi),%r8 - adcq 80(%rdi),%r9 - adcq 88(%rdi),%r10 - adcq 96(%rdi),%r11 - adcq 104(%rdi),%r12 - movq %r8,16(%rsi) - movq %r9,24(%rsi) - movq %r10,32(%rsi) - movq %r11,40(%rsi) - movq %r12,48(%rsi) - - - popq %r15 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r15 - popq %r14 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r14 - popq %r13 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r13 - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r12 - .byte 0xf3,0xc3 -.cfi_endproc -.globl sike_fprdc -.hidden sike_fprdc -.type sike_fprdc,@function -sike_fprdc: -.cfi_startproc - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12, -16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13, -24 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14, -32 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset r15, -40 - - - - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lrdc_bdw - - - - - movq 0+0(%rdi),%r14 - movq 0+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r10,%r10 - movq %rax,%r8 - movq %rdx,%r9 - - - movq 8+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r11,%r11 - addq %rax,%r9 - adcq %rdx,%r10 - - - movq 0+8(%rdi),%rcx - movq 0+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - - xorq %r12,%r12 - movq 16+.Lp434p1(%rip),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 8+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 24+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r13,%r13 - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 16+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 24+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r12 - adcq %rdx,%r13 - - - xorq %rcx,%rcx - addq 24(%rdi),%r8 - adcq 32(%rdi),%r9 - adcq 40(%rdi),%r10 - adcq 48(%rdi),%r11 - adcq 56(%rdi),%r12 - adcq 64(%rdi),%r13 - adcq 72(%rdi),%rcx - movq %r8,24(%rdi) - movq %r9,32(%rdi) - movq %r10,40(%rdi) - movq %r11,48(%rdi) - movq %r12,56(%rdi) - movq %r13,64(%rdi) - movq %rcx,72(%rdi) - movq 80(%rdi),%r8 - movq 88(%rdi),%r9 - movq 96(%rdi),%r10 - movq 104(%rdi),%r11 - adcq $0x0,%r8 - adcq $0x0,%r9 - adcq $0x0,%r10 - adcq $0x0,%r11 - movq %r8,80(%rdi) - movq %r9,88(%rdi) - movq %r10,96(%rdi) - movq %r11,104(%rdi) - - - movq 16+0(%rdi),%r14 - movq 0+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r10,%r10 - movq %rax,%r8 - movq %rdx,%r9 - - - movq 8+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r11,%r11 - addq %rax,%r9 - adcq %rdx,%r10 - - - movq 16+8(%rdi),%rcx - movq 0+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - - xorq %r12,%r12 - movq 16+.Lp434p1(%rip),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 8+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 24+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r13,%r13 - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 16+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 24+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r12 - adcq %rdx,%r13 - - - xorq %rcx,%rcx - addq 40(%rdi),%r8 - adcq 48(%rdi),%r9 - adcq 56(%rdi),%r10 - adcq 64(%rdi),%r11 - adcq 72(%rdi),%r12 - adcq 80(%rdi),%r13 - adcq 88(%rdi),%rcx - movq %r8,40(%rdi) - movq %r9,48(%rdi) - movq %r10,56(%rdi) - movq %r11,64(%rdi) - movq %r12,72(%rdi) - movq %r13,80(%rdi) - movq %rcx,88(%rdi) - movq 96(%rdi),%r8 - movq 104(%rdi),%r9 - adcq $0x0,%r8 - adcq $0x0,%r9 - movq %r8,96(%rdi) - movq %r9,104(%rdi) - - - movq 32+0(%rdi),%r14 - movq 0+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r10,%r10 - movq %rax,%r8 - movq %rdx,%r9 - - - movq 8+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r11,%r11 - addq %rax,%r9 - adcq %rdx,%r10 - - - movq 32+8(%rdi),%rcx - movq 0+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - - xorq %r12,%r12 - movq 16+.Lp434p1(%rip),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 8+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 24+.Lp434p1(%rip),%rax - mulq %r14 - xorq %r13,%r13 - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 16+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 24+.Lp434p1(%rip),%rax - mulq %rcx - addq %rax,%r12 - adcq %rdx,%r13 - - - xorq %rcx,%rcx - addq 56(%rdi),%r8 - adcq 64(%rdi),%r9 - adcq 72(%rdi),%r10 - adcq 80(%rdi),%r11 - adcq 88(%rdi),%r12 - adcq 96(%rdi),%r13 - adcq 104(%rdi),%rcx - movq %r8,0(%rsi) - movq %r9,8(%rsi) - movq %r10,72(%rdi) - movq %r11,80(%rdi) - movq %r12,88(%rdi) - movq %r13,96(%rdi) - movq %rcx,104(%rdi) - - movq 48(%rdi),%r13 - - xorq %r10,%r10 - movq 0+.Lp434p1(%rip),%rax - mulq %r13 - movq %rax,%r8 - movq %rdx,%r9 - - xorq %r11,%r11 - movq 8+.Lp434p1(%rip),%rax - mulq %r13 - addq %rax,%r9 - adcq %rdx,%r10 - - xorq %r12,%r12 - movq 16+.Lp434p1(%rip),%rax - mulq %r13 - addq %rax,%r10 - adcq %rdx,%r11 - - movq 24+.Lp434p1(%rip),%rax - mulq %r13 - addq %rax,%r11 - adcq %rdx,%r12 - - addq 72(%rdi),%r8 - adcq 80(%rdi),%r9 - adcq 88(%rdi),%r10 - adcq 96(%rdi),%r11 - adcq 104(%rdi),%r12 - movq %r8,16(%rsi) - movq %r9,24(%rsi) - movq %r10,32(%rsi) - movq %r11,40(%rsi) - movq %r12,48(%rsi) - - - popq %r15 -.cfi_adjust_cfa_offset -8 - popq %r14 -.cfi_adjust_cfa_offset -8 - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc -.Lmul_bdw: -.cfi_startproc - -.cfi_adjust_cfa_offset 32 -.cfi_offset r12, -16 -.cfi_offset r13, -24 -.cfi_offset r14, -32 -.cfi_offset r15, -40 - - - movq %rdx,%rcx - xorq %rax,%rax - - - movq 0(%rdi),%r8 - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - - pushq %rbx -.cfi_adjust_cfa_offset 8 -.cfi_offset rbx, -48 - pushq %rbp -.cfi_offset rbp, -56 -.cfi_adjust_cfa_offset 8 - subq $96,%rsp -.cfi_adjust_cfa_offset 96 - - addq 32(%rdi),%r8 - adcq 40(%rdi),%r9 - adcq 48(%rdi),%r10 - adcq $0x0,%r11 - sbbq $0x0,%rax - movq %r8,0(%rsp) - movq %r9,8(%rsp) - movq %r10,16(%rsp) - movq %r11,24(%rsp) - - - xorq %rbx,%rbx - movq 0(%rsi),%r12 - movq 8(%rsi),%r13 - movq 16(%rsi),%r14 - movq 24(%rsi),%r15 - addq 32(%rsi),%r12 - adcq 40(%rsi),%r13 - adcq 48(%rsi),%r14 - adcq $0x0,%r15 - sbbq $0x0,%rbx - movq %r12,32(%rsp) - movq %r13,40(%rsp) - movq %r14,48(%rsp) - movq %r15,56(%rsp) - - - andq %rax,%r12 - andq %rax,%r13 - andq %rax,%r14 - andq %rax,%r15 - - - andq %rbx,%r8 - andq %rbx,%r9 - andq %rbx,%r10 - andq %rbx,%r11 - - - addq %r12,%r8 - adcq %r13,%r9 - adcq %r14,%r10 - adcq %r15,%r11 - movq %r8,64(%rsp) - movq %r9,72(%rsp) - movq %r10,80(%rsp) - movq %r11,88(%rsp) - - - movq 0+0(%rsp),%rdx - mulxq 32+0(%rsp),%r9,%r8 - movq %r9,0+0(%rsp) - mulxq 32+8(%rsp),%r10,%r9 - xorq %rax,%rax - adoxq %r10,%r8 - mulxq 32+16(%rsp),%r11,%r10 - adoxq %r11,%r9 - mulxq 32+24(%rsp),%r12,%r11 - adoxq %r12,%r10 - - movq 0+8(%rsp),%rdx - mulxq 32+0(%rsp),%r12,%r13 - adoxq %rax,%r11 - xorq %rax,%rax - mulxq 32+8(%rsp),%r15,%r14 - adoxq %r8,%r12 - movq %r12,0+8(%rsp) - adcxq %r15,%r13 - mulxq 32+16(%rsp),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r9,%r13 - mulxq 32+24(%rsp),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r10,%r14 - - movq 0+16(%rsp),%rdx - mulxq 32+0(%rsp),%r8,%r9 - adoxq %r11,%r15 - adoxq %rax,%rbx - xorq %rax,%rax - mulxq 32+8(%rsp),%r11,%r10 - adoxq %r13,%r8 - movq %r8,0+16(%rsp) - adcxq %r11,%r9 - mulxq 32+16(%rsp),%r12,%r11 - adcxq %r12,%r10 - adoxq %r14,%r9 - mulxq 32+24(%rsp),%rbp,%r12 - adcxq %rbp,%r11 - adcxq %rax,%r12 - - adoxq %r15,%r10 - adoxq %rbx,%r11 - adoxq %rax,%r12 - - movq 0+24(%rsp),%rdx - mulxq 32+0(%rsp),%r8,%r13 - xorq %rax,%rax - mulxq 32+8(%rsp),%r15,%r14 - adcxq %r15,%r13 - adoxq %r8,%r9 - mulxq 32+16(%rsp),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r13,%r10 - mulxq 32+24(%rsp),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r14,%r11 - adoxq %r15,%r12 - adoxq %rax,%rbx - movq %r9,0+24(%rsp) - movq %r10,0+32(%rsp) - movq %r11,0+40(%rsp) - movq %r12,0+48(%rsp) - movq %rbx,0+56(%rsp) - - - - movq 0+0(%rdi),%rdx - mulxq 0+0(%rsi),%r9,%r8 - movq %r9,0+0(%rcx) - mulxq 0+8(%rsi),%r10,%r9 - xorq %rax,%rax - adoxq %r10,%r8 - mulxq 0+16(%rsi),%r11,%r10 - adoxq %r11,%r9 - mulxq 0+24(%rsi),%r12,%r11 - adoxq %r12,%r10 - - movq 0+8(%rdi),%rdx - mulxq 0+0(%rsi),%r12,%r13 - adoxq %rax,%r11 - xorq %rax,%rax - mulxq 0+8(%rsi),%r15,%r14 - adoxq %r8,%r12 - movq %r12,0+8(%rcx) - adcxq %r15,%r13 - mulxq 0+16(%rsi),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r9,%r13 - mulxq 0+24(%rsi),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r10,%r14 - - movq 0+16(%rdi),%rdx - mulxq 0+0(%rsi),%r8,%r9 - adoxq %r11,%r15 - adoxq %rax,%rbx - xorq %rax,%rax - mulxq 0+8(%rsi),%r11,%r10 - adoxq %r13,%r8 - movq %r8,0+16(%rcx) - adcxq %r11,%r9 - mulxq 0+16(%rsi),%r12,%r11 - adcxq %r12,%r10 - adoxq %r14,%r9 - mulxq 0+24(%rsi),%rbp,%r12 - adcxq %rbp,%r11 - adcxq %rax,%r12 - - adoxq %r15,%r10 - adoxq %rbx,%r11 - adoxq %rax,%r12 - - movq 0+24(%rdi),%rdx - mulxq 0+0(%rsi),%r8,%r13 - xorq %rax,%rax - mulxq 0+8(%rsi),%r15,%r14 - adcxq %r15,%r13 - adoxq %r8,%r9 - mulxq 0+16(%rsi),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r13,%r10 - mulxq 0+24(%rsi),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r14,%r11 - adoxq %r15,%r12 - adoxq %rax,%rbx - movq %r9,0+24(%rcx) - movq %r10,0+32(%rcx) - movq %r11,0+40(%rcx) - movq %r12,0+48(%rcx) - movq %rbx,0+56(%rcx) - - - - movq 32+0(%rdi),%rdx - mulxq 32+0(%rsi),%r9,%r8 - movq %r9,64+0(%rcx) - mulxq 32+8(%rsi),%r10,%r9 - xorq %rax,%rax - adoxq %r10,%r8 - mulxq 32+16(%rsi),%r11,%r10 - adoxq %r11,%r9 - - movq 32+8(%rdi),%rdx - mulxq 32+0(%rsi),%r12,%r11 - adoxq %rax,%r10 - xorq %rax,%rax - - mulxq 32+8(%rsi),%r14,%r13 - adoxq %r8,%r12 - movq %r12,64+8(%rcx) - adcxq %r14,%r11 - - mulxq 32+16(%rsi),%r8,%r14 - adoxq %r9,%r11 - adcxq %r8,%r13 - adcxq %rax,%r14 - adoxq %r10,%r13 - - movq 32+16(%rdi),%rdx - mulxq 32+0(%rsi),%r8,%r9 - adoxq %rax,%r14 - xorq %rax,%rax - - mulxq 32+8(%rsi),%r10,%r12 - adoxq %r11,%r8 - movq %r8,64+16(%rcx) - adcxq %r13,%r9 - - mulxq 32+16(%rsi),%r11,%r8 - adcxq %r14,%r12 - adcxq %rax,%r8 - adoxq %r10,%r9 - adoxq %r12,%r11 - adoxq %rax,%r8 - movq %r9,64+24(%rcx) - movq %r11,64+32(%rcx) - movq %r8,64+40(%rcx) - - - - - movq 64(%rsp),%r8 - movq 72(%rsp),%r9 - movq 80(%rsp),%r10 - movq 88(%rsp),%r11 - - movq 32(%rsp),%rax - addq %rax,%r8 - movq 40(%rsp),%rax - adcq %rax,%r9 - movq 48(%rsp),%rax - adcq %rax,%r10 - movq 56(%rsp),%rax - adcq %rax,%r11 - - - movq 0(%rsp),%r12 - movq 8(%rsp),%r13 - movq 16(%rsp),%r14 - movq 24(%rsp),%r15 - subq 0(%rcx),%r12 - sbbq 8(%rcx),%r13 - sbbq 16(%rcx),%r14 - sbbq 24(%rcx),%r15 - sbbq 32(%rcx),%r8 - sbbq 40(%rcx),%r9 - sbbq 48(%rcx),%r10 - sbbq 56(%rcx),%r11 - - - subq 64(%rcx),%r12 - sbbq 72(%rcx),%r13 - sbbq 80(%rcx),%r14 - sbbq 88(%rcx),%r15 - sbbq 96(%rcx),%r8 - sbbq 104(%rcx),%r9 - sbbq $0x0,%r10 - sbbq $0x0,%r11 - - addq 32(%rcx),%r12 - movq %r12,32(%rcx) - adcq 40(%rcx),%r13 - movq %r13,40(%rcx) - adcq 48(%rcx),%r14 - movq %r14,48(%rcx) - adcq 56(%rcx),%r15 - movq %r15,56(%rcx) - adcq 64(%rcx),%r8 - movq %r8,64(%rcx) - adcq 72(%rcx),%r9 - movq %r9,72(%rcx) - adcq 80(%rcx),%r10 - movq %r10,80(%rcx) - adcq 88(%rcx),%r11 - movq %r11,88(%rcx) - movq 96(%rcx),%r12 - adcq $0x0,%r12 - movq %r12,96(%rcx) - movq 104(%rcx),%r13 - adcq $0x0,%r13 - movq %r13,104(%rcx) - - addq $96,%rsp -.cfi_adjust_cfa_offset -96 - popq %rbp -.cfi_adjust_cfa_offset -8 -.cfi_same_value rbp - popq %rbx -.cfi_adjust_cfa_offset -8 -.cfi_same_value rbx - - - popq %r15 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r15 - popq %r14 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r14 - popq %r13 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r13 - popq %r12 -.cfi_adjust_cfa_offset -8 -.cfi_same_value r12 - .byte 0xf3,0xc3 -.cfi_endproc - -.globl sike_mpmul -.hidden sike_mpmul -.type sike_mpmul,@function -sike_mpmul: -.cfi_startproc - pushq %r12 -.cfi_adjust_cfa_offset 8 -.cfi_offset r12, -16 - pushq %r13 -.cfi_adjust_cfa_offset 8 -.cfi_offset r13, -24 - pushq %r14 -.cfi_adjust_cfa_offset 8 -.cfi_offset r14, -32 - pushq %r15 -.cfi_adjust_cfa_offset 8 -.cfi_offset r15, -40 - - - - leaq OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je .Lmul_bdw - - - - movq %rdx,%rcx - - subq $112,%rsp -.cfi_adjust_cfa_offset 112 - - - xorq %rax,%rax - movq 32(%rdi),%r8 - movq 40(%rdi),%r9 - movq 48(%rdi),%r10 - xorq %r11,%r11 - addq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - - sbbq $0,%rax - movq %rax,64(%rsp) - - movq %r8,0(%rcx) - movq %r9,8(%rcx) - movq %r10,16(%rcx) - movq %r11,24(%rcx) - - - xorq %rdx,%rdx - movq 32(%rsi),%r12 - movq 40(%rsi),%r13 - movq 48(%rsi),%r14 - xorq %r15,%r15 - addq 0(%rsi),%r12 - adcq 8(%rsi),%r13 - adcq 16(%rsi),%r14 - adcq 24(%rsi),%r15 - sbbq $0x0,%rdx - - movq %rdx,72(%rsp) - - - movq (%rcx),%rax - mulq %r12 - movq %rax,(%rsp) - movq %rdx,%r8 - - xorq %r9,%r9 - movq (%rcx),%rax - mulq %r13 - addq %rax,%r8 - adcq %rdx,%r9 - - xorq %r10,%r10 - movq 8(%rcx),%rax - mulq %r12 - addq %rax,%r8 - movq %r8,8(%rsp) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq (%rcx),%rax - mulq %r14 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 16(%rcx),%rax - mulq %r12 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 8(%rcx),%rax - mulq %r13 - addq %rax,%r9 - movq %r9,16(%rsp) - adcq %rdx,%r10 - adcq $0x0,%r8 - - xorq %r9,%r9 - movq (%rcx),%rax - mulq %r15 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 24(%rcx),%rax - mulq %r12 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 8(%rcx),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 16(%rcx),%rax - mulq %r13 - addq %rax,%r10 - movq %r10,24(%rsp) - adcq %rdx,%r8 - adcq $0x0,%r9 - - xorq %r10,%r10 - movq 8(%rcx),%rax - mulq %r15 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 24(%rcx),%rax - mulq %r13 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 16(%rcx),%rax - mulq %r14 - addq %rax,%r8 - movq %r8,32(%rsp) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r11,%r11 - movq 16(%rcx),%rax - mulq %r15 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - movq 24(%rcx),%rax - mulq %r14 - addq %rax,%r9 - movq %r9,40(%rsp) - adcq %rdx,%r10 - adcq $0x0,%r11 - - movq 24(%rcx),%rax - mulq %r15 - addq %rax,%r10 - movq %r10,48(%rsp) - adcq %rdx,%r11 - movq %r11,56(%rsp) - - - movq 64(%rsp),%rax - andq %rax,%r12 - andq %rax,%r13 - andq %rax,%r14 - andq %rax,%r15 - - - movq 72(%rsp),%rax - movq 0(%rcx),%r8 - andq %rax,%r8 - movq 8(%rcx),%r9 - andq %rax,%r9 - movq 16(%rcx),%r10 - andq %rax,%r10 - movq 24(%rcx),%r11 - andq %rax,%r11 - - - addq %r8,%r12 - adcq %r9,%r13 - adcq %r10,%r14 - adcq %r11,%r15 - - - movq 32(%rsp),%rax - addq %rax,%r12 - movq 40(%rsp),%rax - adcq %rax,%r13 - movq 48(%rsp),%rax - adcq %rax,%r14 - movq 56(%rsp),%rax - adcq %rax,%r15 - movq %r12,80(%rsp) - movq %r13,88(%rsp) - movq %r14,96(%rsp) - movq %r15,104(%rsp) - - - movq (%rdi),%r11 - movq (%rsi),%rax - mulq %r11 - xorq %r9,%r9 - movq %rax,(%rcx) - movq %rdx,%r8 - - movq 16(%rdi),%r14 - movq 8(%rsi),%rax - mulq %r11 - xorq %r10,%r10 - addq %rax,%r8 - adcq %rdx,%r9 - - movq 8(%rdi),%r12 - movq (%rsi),%rax - mulq %r12 - addq %rax,%r8 - movq %r8,8(%rcx) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq 16(%rsi),%rax - mulq %r11 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq (%rsi),%r13 - movq %r14,%rax - mulq %r13 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 8(%rsi),%rax - mulq %r12 - addq %rax,%r9 - movq %r9,16(%rcx) - adcq %rdx,%r10 - adcq $0x0,%r8 - - xorq %r9,%r9 - movq 24(%rsi),%rax - mulq %r11 - movq 24(%rdi),%r15 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq %r15,%rax - mulq %r13 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 16(%rsi),%rax - mulq %r12 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 8(%rsi),%rax - mulq %r14 - addq %rax,%r10 - movq %r10,24(%rcx) - adcq %rdx,%r8 - adcq $0x0,%r9 - - xorq %r10,%r10 - movq 24(%rsi),%rax - mulq %r12 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 8(%rsi),%rax - mulq %r15 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 16(%rsi),%rax - mulq %r14 - addq %rax,%r8 - movq %r8,32(%rcx) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq 24(%rsi),%rax - mulq %r14 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 16(%rsi),%rax - mulq %r15 - addq %rax,%r9 - movq %r9,40(%rcx) - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 24(%rsi),%rax - mulq %r15 - addq %rax,%r10 - movq %r10,48(%rcx) - adcq %rdx,%r8 - movq %r8,56(%rcx) - - - - movq 32(%rdi),%r11 - movq 32(%rsi),%rax - mulq %r11 - xorq %r9,%r9 - movq %rax,64(%rcx) - movq %rdx,%r8 - - movq 48(%rdi),%r14 - movq 40(%rsi),%rax - mulq %r11 - xorq %r10,%r10 - addq %rax,%r8 - adcq %rdx,%r9 - - movq 40(%rdi),%r12 - movq 32(%rsi),%rax - mulq %r12 - addq %rax,%r8 - movq %r8,72(%rcx) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq 48(%rsi),%rax - mulq %r11 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 32(%rsi),%r13 - movq %r14,%rax - mulq %r13 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 40(%rsi),%rax - mulq %r12 - addq %rax,%r9 - movq %r9,80(%rcx) - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 48(%rsi),%rax - mulq %r12 - xorq %r12,%r12 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r12 - - movq 40(%rsi),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r12 - movq %r10,88(%rcx) - - movq 48(%rsi),%rax - mulq %r14 - addq %rax,%r8 - adcq $0x0,%r12 - movq %r8,96(%rcx) - - addq %r12,%rdx - - - movq 0(%rsp),%r8 - subq 0(%rcx),%r8 - movq 8(%rsp),%r9 - sbbq 8(%rcx),%r9 - movq 16(%rsp),%r10 - sbbq 16(%rcx),%r10 - movq 24(%rsp),%r11 - sbbq 24(%rcx),%r11 - movq 80(%rsp),%r12 - sbbq 32(%rcx),%r12 - movq 88(%rsp),%r13 - sbbq 40(%rcx),%r13 - movq 96(%rsp),%r14 - sbbq 48(%rcx),%r14 - movq 104(%rsp),%r15 - sbbq 56(%rcx),%r15 - - - movq 64(%rcx),%rax - subq %rax,%r8 - movq 72(%rcx),%rax - sbbq %rax,%r9 - movq 80(%rcx),%rax - sbbq %rax,%r10 - movq 88(%rcx),%rax - sbbq %rax,%r11 - movq 96(%rcx),%rax - sbbq %rax,%r12 - sbbq %rdx,%r13 - sbbq $0x0,%r14 - sbbq $0x0,%r15 - - - addq 32(%rcx),%r8 - movq %r8,32(%rcx) - adcq 40(%rcx),%r9 - movq %r9,40(%rcx) - adcq 48(%rcx),%r10 - movq %r10,48(%rcx) - adcq 56(%rcx),%r11 - movq %r11,56(%rcx) - adcq 64(%rcx),%r12 - movq %r12,64(%rcx) - adcq 72(%rcx),%r13 - movq %r13,72(%rcx) - adcq 80(%rcx),%r14 - movq %r14,80(%rcx) - adcq 88(%rcx),%r15 - movq %r15,88(%rcx) - movq 96(%rcx),%r12 - adcq $0x0,%r12 - movq %r12,96(%rcx) - adcq $0x0,%rdx - movq %rdx,104(%rcx) - - addq $112,%rsp -.cfi_adjust_cfa_offset -112 - - - popq %r15 -.cfi_adjust_cfa_offset -8 - popq %r14 -.cfi_adjust_cfa_offset -8 - popq %r13 -.cfi_adjust_cfa_offset -8 - popq %r12 -.cfi_adjust_cfa_offset -8 - .byte 0xf3,0xc3 -.cfi_endproc -#endif -.section .note.GNU-stack,"",@progbits diff --git a/packager/third_party/boringssl/mac-x86/crypto/chacha/chacha-x86.S b/packager/third_party/boringssl/mac-x86/crypto/chacha/chacha-x86.S deleted file mode 100644 index bc324888b6..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/chacha/chacha-x86.S +++ /dev/null @@ -1,974 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _ChaCha20_ctr32 -.private_extern _ChaCha20_ctr32 -.align 4 -_ChaCha20_ctr32: -L_ChaCha20_ctr32_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - xorl %eax,%eax - cmpl 28(%esp),%eax - je L000no_data - call Lpic_point -Lpic_point: - popl %eax - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-Lpic_point(%eax),%ebp - testl $16777216,(%ebp) - jz L001x86 - testl $512,4(%ebp) - jz L001x86 - jmp Lssse3_shortcut -L001x86: - movl 32(%esp),%esi - movl 36(%esp),%edi - subl $132,%esp - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,80(%esp) - movl %ebx,84(%esp) - movl %ecx,88(%esp) - movl %edx,92(%esp) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - movl %eax,96(%esp) - movl %ebx,100(%esp) - movl %ecx,104(%esp) - movl %edx,108(%esp) - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - subl $1,%eax - movl %eax,112(%esp) - movl %ebx,116(%esp) - movl %ecx,120(%esp) - movl %edx,124(%esp) - jmp L002entry -.align 4,0x90 -L003outer_loop: - movl %ebx,156(%esp) - movl %eax,152(%esp) - movl %ecx,160(%esp) -L002entry: - movl $1634760805,%eax - movl $857760878,4(%esp) - movl $2036477234,8(%esp) - movl $1797285236,12(%esp) - movl 84(%esp),%ebx - movl 88(%esp),%ebp - movl 104(%esp),%ecx - movl 108(%esp),%esi - movl 116(%esp),%edx - movl 120(%esp),%edi - movl %ebx,20(%esp) - movl %ebp,24(%esp) - movl %ecx,40(%esp) - movl %esi,44(%esp) - movl %edx,52(%esp) - movl %edi,56(%esp) - movl 92(%esp),%ebx - movl 124(%esp),%edi - movl 112(%esp),%edx - movl 80(%esp),%ebp - movl 96(%esp),%ecx - movl 100(%esp),%esi - addl $1,%edx - movl %ebx,28(%esp) - movl %edi,60(%esp) - movl %edx,112(%esp) - movl $10,%ebx - jmp L004loop -.align 4,0x90 -L004loop: - addl %ebp,%eax - movl %ebx,128(%esp) - movl %ebp,%ebx - xorl %eax,%edx - roll $16,%edx - addl %edx,%ecx - xorl %ecx,%ebx - movl 52(%esp),%edi - roll $12,%ebx - movl 20(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,(%esp) - roll $8,%edx - movl 4(%esp),%eax - addl %edx,%ecx - movl %edx,48(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - movl %ecx,32(%esp) - roll $16,%edi - movl %ebx,16(%esp) - addl %edi,%esi - movl 40(%esp),%ecx - xorl %esi,%ebp - movl 56(%esp),%edx - roll $12,%ebp - movl 24(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,4(%esp) - roll $8,%edi - movl 8(%esp),%eax - addl %edi,%esi - movl %edi,52(%esp) - xorl %esi,%ebp - addl %ebx,%eax - roll $7,%ebp - xorl %eax,%edx - movl %esi,36(%esp) - roll $16,%edx - movl %ebp,20(%esp) - addl %edx,%ecx - movl 44(%esp),%esi - xorl %ecx,%ebx - movl 60(%esp),%edi - roll $12,%ebx - movl 28(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,8(%esp) - roll $8,%edx - movl 12(%esp),%eax - addl %edx,%ecx - movl %edx,56(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - roll $16,%edi - movl %ebx,24(%esp) - addl %edi,%esi - xorl %esi,%ebp - roll $12,%ebp - movl 20(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,12(%esp) - roll $8,%edi - movl (%esp),%eax - addl %edi,%esi - movl %edi,%edx - xorl %esi,%ebp - addl %ebx,%eax - roll $7,%ebp - xorl %eax,%edx - roll $16,%edx - movl %ebp,28(%esp) - addl %edx,%ecx - xorl %ecx,%ebx - movl 48(%esp),%edi - roll $12,%ebx - movl 24(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,(%esp) - roll $8,%edx - movl 4(%esp),%eax - addl %edx,%ecx - movl %edx,60(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - movl %ecx,40(%esp) - roll $16,%edi - movl %ebx,20(%esp) - addl %edi,%esi - movl 32(%esp),%ecx - xorl %esi,%ebp - movl 52(%esp),%edx - roll $12,%ebp - movl 28(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,4(%esp) - roll $8,%edi - movl 8(%esp),%eax - addl %edi,%esi - movl %edi,48(%esp) - xorl %esi,%ebp - addl %ebx,%eax - roll $7,%ebp - xorl %eax,%edx - movl %esi,44(%esp) - roll $16,%edx - movl %ebp,24(%esp) - addl %edx,%ecx - movl 36(%esp),%esi - xorl %ecx,%ebx - movl 56(%esp),%edi - roll $12,%ebx - movl 16(%esp),%ebp - addl %ebx,%eax - xorl %eax,%edx - movl %eax,8(%esp) - roll $8,%edx - movl 12(%esp),%eax - addl %edx,%ecx - movl %edx,52(%esp) - xorl %ecx,%ebx - addl %ebp,%eax - roll $7,%ebx - xorl %eax,%edi - roll $16,%edi - movl %ebx,28(%esp) - addl %edi,%esi - xorl %esi,%ebp - movl 48(%esp),%edx - roll $12,%ebp - movl 128(%esp),%ebx - addl %ebp,%eax - xorl %eax,%edi - movl %eax,12(%esp) - roll $8,%edi - movl (%esp),%eax - addl %edi,%esi - movl %edi,56(%esp) - xorl %esi,%ebp - roll $7,%ebp - decl %ebx - jnz L004loop - movl 160(%esp),%ebx - addl $1634760805,%eax - addl 80(%esp),%ebp - addl 96(%esp),%ecx - addl 100(%esp),%esi - cmpl $64,%ebx - jb L005tail - movl 156(%esp),%ebx - addl 112(%esp),%edx - addl 120(%esp),%edi - xorl (%ebx),%eax - xorl 16(%ebx),%ebp - movl %eax,(%esp) - movl 152(%esp),%eax - xorl 32(%ebx),%ecx - xorl 36(%ebx),%esi - xorl 48(%ebx),%edx - xorl 56(%ebx),%edi - movl %ebp,16(%eax) - movl %ecx,32(%eax) - movl %esi,36(%eax) - movl %edx,48(%eax) - movl %edi,56(%eax) - movl 4(%esp),%ebp - movl 8(%esp),%ecx - movl 12(%esp),%esi - movl 20(%esp),%edx - movl 24(%esp),%edi - addl $857760878,%ebp - addl $2036477234,%ecx - addl $1797285236,%esi - addl 84(%esp),%edx - addl 88(%esp),%edi - xorl 4(%ebx),%ebp - xorl 8(%ebx),%ecx - xorl 12(%ebx),%esi - xorl 20(%ebx),%edx - xorl 24(%ebx),%edi - movl %ebp,4(%eax) - movl %ecx,8(%eax) - movl %esi,12(%eax) - movl %edx,20(%eax) - movl %edi,24(%eax) - movl 28(%esp),%ebp - movl 40(%esp),%ecx - movl 44(%esp),%esi - movl 52(%esp),%edx - movl 60(%esp),%edi - addl 92(%esp),%ebp - addl 104(%esp),%ecx - addl 108(%esp),%esi - addl 116(%esp),%edx - addl 124(%esp),%edi - xorl 28(%ebx),%ebp - xorl 40(%ebx),%ecx - xorl 44(%ebx),%esi - xorl 52(%ebx),%edx - xorl 60(%ebx),%edi - leal 64(%ebx),%ebx - movl %ebp,28(%eax) - movl (%esp),%ebp - movl %ecx,40(%eax) - movl 160(%esp),%ecx - movl %esi,44(%eax) - movl %edx,52(%eax) - movl %edi,60(%eax) - movl %ebp,(%eax) - leal 64(%eax),%eax - subl $64,%ecx - jnz L003outer_loop - jmp L006done -L005tail: - addl 112(%esp),%edx - addl 120(%esp),%edi - movl %eax,(%esp) - movl %ebp,16(%esp) - movl %ecx,32(%esp) - movl %esi,36(%esp) - movl %edx,48(%esp) - movl %edi,56(%esp) - movl 4(%esp),%ebp - movl 8(%esp),%ecx - movl 12(%esp),%esi - movl 20(%esp),%edx - movl 24(%esp),%edi - addl $857760878,%ebp - addl $2036477234,%ecx - addl $1797285236,%esi - addl 84(%esp),%edx - addl 88(%esp),%edi - movl %ebp,4(%esp) - movl %ecx,8(%esp) - movl %esi,12(%esp) - movl %edx,20(%esp) - movl %edi,24(%esp) - movl 28(%esp),%ebp - movl 40(%esp),%ecx - movl 44(%esp),%esi - movl 52(%esp),%edx - movl 60(%esp),%edi - addl 92(%esp),%ebp - addl 104(%esp),%ecx - addl 108(%esp),%esi - addl 116(%esp),%edx - addl 124(%esp),%edi - movl %ebp,28(%esp) - movl 156(%esp),%ebp - movl %ecx,40(%esp) - movl 152(%esp),%ecx - movl %esi,44(%esp) - xorl %esi,%esi - movl %edx,52(%esp) - movl %edi,60(%esp) - xorl %eax,%eax - xorl %edx,%edx -L007tail_loop: - movb (%esi,%ebp,1),%al - movb (%esp,%esi,1),%dl - leal 1(%esi),%esi - xorb %dl,%al - movb %al,-1(%ecx,%esi,1) - decl %ebx - jnz L007tail_loop -L006done: - addl $132,%esp -L000no_data: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _ChaCha20_ssse3 -.private_extern _ChaCha20_ssse3 -.align 4 -_ChaCha20_ssse3: -L_ChaCha20_ssse3_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -Lssse3_shortcut: - movl 20(%esp),%edi - movl 24(%esp),%esi - movl 28(%esp),%ecx - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl %esp,%ebp - subl $524,%esp - andl $-64,%esp - movl %ebp,512(%esp) - leal Lssse3_data-Lpic_point(%eax),%eax - movdqu (%ebx),%xmm3 - cmpl $256,%ecx - jb L0081x - movl %edx,516(%esp) - movl %ebx,520(%esp) - subl $256,%ecx - leal 384(%esp),%ebp - movdqu (%edx),%xmm7 - pshufd $0,%xmm3,%xmm0 - pshufd $85,%xmm3,%xmm1 - pshufd $170,%xmm3,%xmm2 - pshufd $255,%xmm3,%xmm3 - paddd 48(%eax),%xmm0 - pshufd $0,%xmm7,%xmm4 - pshufd $85,%xmm7,%xmm5 - psubd 64(%eax),%xmm0 - pshufd $170,%xmm7,%xmm6 - pshufd $255,%xmm7,%xmm7 - movdqa %xmm0,64(%ebp) - movdqa %xmm1,80(%ebp) - movdqa %xmm2,96(%ebp) - movdqa %xmm3,112(%ebp) - movdqu 16(%edx),%xmm3 - movdqa %xmm4,-64(%ebp) - movdqa %xmm5,-48(%ebp) - movdqa %xmm6,-32(%ebp) - movdqa %xmm7,-16(%ebp) - movdqa 32(%eax),%xmm7 - leal 128(%esp),%ebx - pshufd $0,%xmm3,%xmm0 - pshufd $85,%xmm3,%xmm1 - pshufd $170,%xmm3,%xmm2 - pshufd $255,%xmm3,%xmm3 - pshufd $0,%xmm7,%xmm4 - pshufd $85,%xmm7,%xmm5 - pshufd $170,%xmm7,%xmm6 - pshufd $255,%xmm7,%xmm7 - movdqa %xmm0,(%ebp) - movdqa %xmm1,16(%ebp) - movdqa %xmm2,32(%ebp) - movdqa %xmm3,48(%ebp) - movdqa %xmm4,-128(%ebp) - movdqa %xmm5,-112(%ebp) - movdqa %xmm6,-96(%ebp) - movdqa %xmm7,-80(%ebp) - leal 128(%esi),%esi - leal 128(%edi),%edi - jmp L009outer_loop -.align 4,0x90 -L009outer_loop: - movdqa -112(%ebp),%xmm1 - movdqa -96(%ebp),%xmm2 - movdqa -80(%ebp),%xmm3 - movdqa -48(%ebp),%xmm5 - movdqa -32(%ebp),%xmm6 - movdqa -16(%ebp),%xmm7 - movdqa %xmm1,-112(%ebx) - movdqa %xmm2,-96(%ebx) - movdqa %xmm3,-80(%ebx) - movdqa %xmm5,-48(%ebx) - movdqa %xmm6,-32(%ebx) - movdqa %xmm7,-16(%ebx) - movdqa 32(%ebp),%xmm2 - movdqa 48(%ebp),%xmm3 - movdqa 64(%ebp),%xmm4 - movdqa 80(%ebp),%xmm5 - movdqa 96(%ebp),%xmm6 - movdqa 112(%ebp),%xmm7 - paddd 64(%eax),%xmm4 - movdqa %xmm2,32(%ebx) - movdqa %xmm3,48(%ebx) - movdqa %xmm4,64(%ebx) - movdqa %xmm5,80(%ebx) - movdqa %xmm6,96(%ebx) - movdqa %xmm7,112(%ebx) - movdqa %xmm4,64(%ebp) - movdqa -128(%ebp),%xmm0 - movdqa %xmm4,%xmm6 - movdqa -64(%ebp),%xmm3 - movdqa (%ebp),%xmm4 - movdqa 16(%ebp),%xmm5 - movl $10,%edx - nop -.align 4,0x90 -L010loop: - paddd %xmm3,%xmm0 - movdqa %xmm3,%xmm2 - pxor %xmm0,%xmm6 - pshufb (%eax),%xmm6 - paddd %xmm6,%xmm4 - pxor %xmm4,%xmm2 - movdqa -48(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -112(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 80(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-128(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,64(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - movdqa %xmm4,(%ebx) - pshufb (%eax),%xmm7 - movdqa %xmm2,-64(%ebx) - paddd %xmm7,%xmm5 - movdqa 32(%ebx),%xmm4 - pxor %xmm5,%xmm3 - movdqa -32(%ebx),%xmm2 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -96(%ebx),%xmm0 - paddd %xmm3,%xmm1 - movdqa 96(%ebx),%xmm6 - pxor %xmm1,%xmm7 - movdqa %xmm1,-112(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,80(%ebx) - pxor %xmm5,%xmm3 - paddd %xmm2,%xmm0 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - pxor %xmm0,%xmm6 - por %xmm1,%xmm3 - movdqa %xmm5,16(%ebx) - pshufb (%eax),%xmm6 - movdqa %xmm3,-48(%ebx) - paddd %xmm6,%xmm4 - movdqa 48(%ebx),%xmm5 - pxor %xmm4,%xmm2 - movdqa -16(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -80(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 112(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-96(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,96(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - pshufb (%eax),%xmm7 - movdqa %xmm2,-32(%ebx) - paddd %xmm7,%xmm5 - pxor %xmm5,%xmm3 - movdqa -48(%ebx),%xmm2 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -128(%ebx),%xmm0 - paddd %xmm3,%xmm1 - pxor %xmm1,%xmm7 - movdqa %xmm1,-80(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,%xmm6 - pxor %xmm5,%xmm3 - paddd %xmm2,%xmm0 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - pxor %xmm0,%xmm6 - por %xmm1,%xmm3 - pshufb (%eax),%xmm6 - movdqa %xmm3,-16(%ebx) - paddd %xmm6,%xmm4 - pxor %xmm4,%xmm2 - movdqa -32(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -112(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 64(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-128(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,112(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - movdqa %xmm4,32(%ebx) - pshufb (%eax),%xmm7 - movdqa %xmm2,-48(%ebx) - paddd %xmm7,%xmm5 - movdqa (%ebx),%xmm4 - pxor %xmm5,%xmm3 - movdqa -16(%ebx),%xmm2 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -96(%ebx),%xmm0 - paddd %xmm3,%xmm1 - movdqa 80(%ebx),%xmm6 - pxor %xmm1,%xmm7 - movdqa %xmm1,-112(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,64(%ebx) - pxor %xmm5,%xmm3 - paddd %xmm2,%xmm0 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - pxor %xmm0,%xmm6 - por %xmm1,%xmm3 - movdqa %xmm5,48(%ebx) - pshufb (%eax),%xmm6 - movdqa %xmm3,-32(%ebx) - paddd %xmm6,%xmm4 - movdqa 16(%ebx),%xmm5 - pxor %xmm4,%xmm2 - movdqa -64(%ebx),%xmm3 - movdqa %xmm2,%xmm1 - pslld $12,%xmm2 - psrld $20,%xmm1 - por %xmm1,%xmm2 - movdqa -80(%ebx),%xmm1 - paddd %xmm2,%xmm0 - movdqa 96(%ebx),%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm0,-96(%ebx) - pshufb 16(%eax),%xmm6 - paddd %xmm6,%xmm4 - movdqa %xmm6,80(%ebx) - pxor %xmm4,%xmm2 - paddd %xmm3,%xmm1 - movdqa %xmm2,%xmm0 - pslld $7,%xmm2 - psrld $25,%xmm0 - pxor %xmm1,%xmm7 - por %xmm0,%xmm2 - pshufb (%eax),%xmm7 - movdqa %xmm2,-16(%ebx) - paddd %xmm7,%xmm5 - pxor %xmm5,%xmm3 - movdqa %xmm3,%xmm0 - pslld $12,%xmm3 - psrld $20,%xmm0 - por %xmm0,%xmm3 - movdqa -128(%ebx),%xmm0 - paddd %xmm3,%xmm1 - movdqa 64(%ebx),%xmm6 - pxor %xmm1,%xmm7 - movdqa %xmm1,-80(%ebx) - pshufb 16(%eax),%xmm7 - paddd %xmm7,%xmm5 - movdqa %xmm7,96(%ebx) - pxor %xmm5,%xmm3 - movdqa %xmm3,%xmm1 - pslld $7,%xmm3 - psrld $25,%xmm1 - por %xmm1,%xmm3 - decl %edx - jnz L010loop - movdqa %xmm3,-64(%ebx) - movdqa %xmm4,(%ebx) - movdqa %xmm5,16(%ebx) - movdqa %xmm6,64(%ebx) - movdqa %xmm7,96(%ebx) - movdqa -112(%ebx),%xmm1 - movdqa -96(%ebx),%xmm2 - movdqa -80(%ebx),%xmm3 - paddd -128(%ebp),%xmm0 - paddd -112(%ebp),%xmm1 - paddd -96(%ebp),%xmm2 - paddd -80(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 16(%esi),%esi - pxor %xmm0,%xmm4 - movdqa -64(%ebx),%xmm0 - pxor %xmm1,%xmm5 - movdqa -48(%ebx),%xmm1 - pxor %xmm2,%xmm6 - movdqa -32(%ebx),%xmm2 - pxor %xmm3,%xmm7 - movdqa -16(%ebx),%xmm3 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 16(%edi),%edi - paddd -64(%ebp),%xmm0 - paddd -48(%ebp),%xmm1 - paddd -32(%ebp),%xmm2 - paddd -16(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 16(%esi),%esi - pxor %xmm0,%xmm4 - movdqa (%ebx),%xmm0 - pxor %xmm1,%xmm5 - movdqa 16(%ebx),%xmm1 - pxor %xmm2,%xmm6 - movdqa 32(%ebx),%xmm2 - pxor %xmm3,%xmm7 - movdqa 48(%ebx),%xmm3 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 16(%edi),%edi - paddd (%ebp),%xmm0 - paddd 16(%ebp),%xmm1 - paddd 32(%ebp),%xmm2 - paddd 48(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 16(%esi),%esi - pxor %xmm0,%xmm4 - movdqa 64(%ebx),%xmm0 - pxor %xmm1,%xmm5 - movdqa 80(%ebx),%xmm1 - pxor %xmm2,%xmm6 - movdqa 96(%ebx),%xmm2 - pxor %xmm3,%xmm7 - movdqa 112(%ebx),%xmm3 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 16(%edi),%edi - paddd 64(%ebp),%xmm0 - paddd 80(%ebp),%xmm1 - paddd 96(%ebp),%xmm2 - paddd 112(%ebp),%xmm3 - movdqa %xmm0,%xmm6 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm6 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm6,%xmm3 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - movdqu -128(%esi),%xmm4 - movdqu -64(%esi),%xmm5 - movdqu (%esi),%xmm2 - movdqu 64(%esi),%xmm7 - leal 208(%esi),%esi - pxor %xmm0,%xmm4 - pxor %xmm1,%xmm5 - pxor %xmm2,%xmm6 - pxor %xmm3,%xmm7 - movdqu %xmm4,-128(%edi) - movdqu %xmm5,-64(%edi) - movdqu %xmm6,(%edi) - movdqu %xmm7,64(%edi) - leal 208(%edi),%edi - subl $256,%ecx - jnc L009outer_loop - addl $256,%ecx - jz L011done - movl 520(%esp),%ebx - leal -128(%esi),%esi - movl 516(%esp),%edx - leal -128(%edi),%edi - movd 64(%ebp),%xmm2 - movdqu (%ebx),%xmm3 - paddd 96(%eax),%xmm2 - pand 112(%eax),%xmm3 - por %xmm2,%xmm3 -L0081x: - movdqa 32(%eax),%xmm0 - movdqu (%edx),%xmm1 - movdqu 16(%edx),%xmm2 - movdqa (%eax),%xmm6 - movdqa 16(%eax),%xmm7 - movl %ebp,48(%esp) - movdqa %xmm0,(%esp) - movdqa %xmm1,16(%esp) - movdqa %xmm2,32(%esp) - movdqa %xmm3,48(%esp) - movl $10,%edx - jmp L012loop1x -.align 4,0x90 -L013outer1x: - movdqa 80(%eax),%xmm3 - movdqa (%esp),%xmm0 - movdqa 16(%esp),%xmm1 - movdqa 32(%esp),%xmm2 - paddd 48(%esp),%xmm3 - movl $10,%edx - movdqa %xmm3,48(%esp) - jmp L012loop1x -.align 4,0x90 -L012loop1x: - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $57,%xmm1,%xmm1 - pshufd $147,%xmm3,%xmm3 - nop - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $147,%xmm1,%xmm1 - pshufd $57,%xmm3,%xmm3 - decl %edx - jnz L012loop1x - paddd (%esp),%xmm0 - paddd 16(%esp),%xmm1 - paddd 32(%esp),%xmm2 - paddd 48(%esp),%xmm3 - cmpl $64,%ecx - jb L014tail - movdqu (%esi),%xmm4 - movdqu 16(%esi),%xmm5 - pxor %xmm4,%xmm0 - movdqu 32(%esi),%xmm4 - pxor %xmm5,%xmm1 - movdqu 48(%esi),%xmm5 - pxor %xmm4,%xmm2 - pxor %xmm5,%xmm3 - leal 64(%esi),%esi - movdqu %xmm0,(%edi) - movdqu %xmm1,16(%edi) - movdqu %xmm2,32(%edi) - movdqu %xmm3,48(%edi) - leal 64(%edi),%edi - subl $64,%ecx - jnz L013outer1x - jmp L011done -L014tail: - movdqa %xmm0,(%esp) - movdqa %xmm1,16(%esp) - movdqa %xmm2,32(%esp) - movdqa %xmm3,48(%esp) - xorl %eax,%eax - xorl %edx,%edx - xorl %ebp,%ebp -L015tail_loop: - movb (%esp,%ebp,1),%al - movb (%esi,%ebp,1),%dl - leal 1(%ebp),%ebp - xorb %dl,%al - movb %al,-1(%edi,%ebp,1) - decl %ecx - jnz L015tail_loop -L011done: - movl 512(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 6,0x90 -Lssse3_data: -.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 -.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 -.long 1634760805,857760878,2036477234,1797285236 -.long 0,1,2,3 -.long 4,4,4,4 -.long 1,0,0,0 -.long 4,0,0,0 -.long 0,-1,-1,-1 -.align 6,0x90 -.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 -.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 -.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 -.byte 114,103,62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/aes-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/aes-586.S deleted file mode 100644 index 3634f64d11..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/aes-586.S +++ /dev/null @@ -1,3226 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.private_extern __x86_AES_encrypt_compact -.align 4 -__x86_AES_encrypt_compact: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl -128(%ebp),%edi - movl -96(%ebp),%esi - movl -64(%ebp),%edi - movl -32(%ebp),%esi - movl (%ebp),%edi - movl 32(%ebp),%esi - movl 64(%ebp),%edi - movl 96(%ebp),%esi -.align 4,0x90 -L000loop: - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movzbl -128(%ebp,%esi,1),%esi - movzbl %ch,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ah,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $8,%eax - xorl %eax,%edx - movl 4(%esp),%eax - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - movzbl -128(%ebp,%ecx,1),%ecx - shll $24,%ecx - xorl %ecx,%edx - movl %esi,%ecx - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %ecx,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %ecx,%edi - xorl %esi,%ecx - rorl $24,%edi - xorl %ebp,%esi - roll $24,%ecx - xorl %edi,%esi - movl $2155905152,%ebp - xorl %esi,%ecx - andl %edx,%ebp - leal (%edx,%edx,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %edx,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %edx,%edi - xorl %esi,%edx - rorl $24,%edi - xorl %ebp,%esi - roll $24,%edx - xorl %edi,%esi - movl $2155905152,%ebp - xorl %esi,%edx - andl %eax,%ebp - leal (%eax,%eax,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %eax,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %eax,%edi - xorl %esi,%eax - rorl $24,%edi - xorl %ebp,%esi - roll $24,%eax - xorl %edi,%esi - movl $2155905152,%ebp - xorl %esi,%eax - andl %ebx,%ebp - leal (%ebx,%ebx,1),%edi - movl %ebp,%esi - shrl $7,%ebp - andl $4278124286,%edi - subl %ebp,%esi - movl %ebx,%ebp - andl $454761243,%esi - rorl $16,%ebp - xorl %edi,%esi - movl %ebx,%edi - xorl %esi,%ebx - rorl $24,%edi - xorl %ebp,%esi - roll $24,%ebx - xorl %edi,%esi - xorl %esi,%ebx - movl 20(%esp),%edi - movl 28(%esp),%ebp - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb L000loop - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movzbl -128(%ebp,%esi,1),%esi - movzbl %ch,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ah,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $8,%eax - xorl %eax,%edx - movl 4(%esp),%eax - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - movzbl -128(%ebp,%ecx,1),%ecx - shll $24,%ecx - xorl %ecx,%edx - movl %esi,%ecx - xorl 16(%edi),%eax - xorl 20(%edi),%ebx - xorl 24(%edi),%ecx - xorl 28(%edi),%edx - ret -.private_extern __sse_AES_encrypt_compact -.align 4 -__sse_AES_encrypt_compact: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl $454761243,%eax - movl %eax,8(%esp) - movl %eax,12(%esp) - movl -128(%ebp),%eax - movl -96(%ebp),%ebx - movl -64(%ebp),%ecx - movl -32(%ebp),%edx - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%edx -.align 4,0x90 -L001loop: - pshufw $8,%mm0,%mm1 - pshufw $13,%mm4,%mm5 - movd %mm1,%eax - movd %mm5,%ebx - movl %edi,20(%esp) - movzbl %al,%esi - movzbl %ah,%edx - pshufw $13,%mm0,%mm2 - movzbl -128(%ebp,%esi,1),%ecx - movzbl %bl,%edi - movzbl -128(%ebp,%edx,1),%edx - shrl $16,%eax - shll $8,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $16,%esi - pshufw $8,%mm4,%mm6 - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %ah,%edi - shll $24,%esi - shrl $16,%ebx - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $8,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shll $24,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - movd %mm2,%eax - movd %ecx,%mm0 - movzbl -128(%ebp,%edi,1),%ecx - movzbl %ah,%edi - shll $16,%ecx - movd %mm6,%ebx - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $24,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - shll $8,%esi - shrl $16,%ebx - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shrl $16,%eax - movd %ecx,%mm1 - movzbl -128(%ebp,%edi,1),%ecx - movzbl %ah,%edi - shll $16,%ecx - andl $255,%eax - orl %esi,%ecx - punpckldq %mm1,%mm0 - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $24,%esi - andl $255,%ebx - movzbl -128(%ebp,%eax,1),%eax - orl %esi,%ecx - shll $16,%eax - movzbl -128(%ebp,%edi,1),%esi - orl %eax,%edx - shll $8,%esi - movzbl -128(%ebp,%ebx,1),%ebx - orl %esi,%ecx - orl %ebx,%edx - movl 20(%esp),%edi - movd %ecx,%mm4 - movd %edx,%mm5 - punpckldq %mm5,%mm4 - addl $16,%edi - cmpl 24(%esp),%edi - ja L002out - movq 8(%esp),%mm2 - pxor %mm3,%mm3 - pxor %mm7,%mm7 - movq %mm0,%mm1 - movq %mm4,%mm5 - pcmpgtb %mm0,%mm3 - pcmpgtb %mm4,%mm7 - pand %mm2,%mm3 - pand %mm2,%mm7 - pshufw $177,%mm0,%mm2 - pshufw $177,%mm4,%mm6 - paddb %mm0,%mm0 - paddb %mm4,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pshufw $177,%mm2,%mm3 - pshufw $177,%mm6,%mm7 - pxor %mm0,%mm1 - pxor %mm4,%mm5 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - movq %mm3,%mm2 - movq %mm7,%mm6 - pslld $8,%mm3 - pslld $8,%mm7 - psrld $24,%mm2 - psrld $24,%mm6 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - movq %mm1,%mm3 - movq %mm5,%mm7 - movq (%edi),%mm2 - movq 8(%edi),%mm6 - psrld $8,%mm1 - psrld $8,%mm5 - movl -128(%ebp),%eax - pslld $24,%mm3 - pslld $24,%mm7 - movl -64(%ebp),%ebx - pxor %mm1,%mm0 - pxor %mm5,%mm4 - movl (%ebp),%ecx - pxor %mm3,%mm0 - pxor %mm7,%mm4 - movl 64(%ebp),%edx - pxor %mm2,%mm0 - pxor %mm6,%mm4 - jmp L001loop -.align 4,0x90 -L002out: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - ret -.private_extern __x86_AES_encrypt -.align 4 -__x86_AES_encrypt: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) -.align 4,0x90 -L003loop: - movl %eax,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %bh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movl (%ebp,%esi,8),%esi - movzbl %ch,%edi - xorl 3(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %eax,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movl (%ebp,%esi,8),%esi - movzbl %dh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movzbl %bh,%edi - xorl 1(%ebp,%edi,8),%esi - movl 20(%esp),%edi - movl (%ebp,%edx,8),%edx - movzbl %ah,%eax - xorl 3(%ebp,%eax,8),%edx - movl 4(%esp),%eax - andl $255,%ebx - xorl 2(%ebp,%ebx,8),%edx - movl 8(%esp),%ebx - xorl 1(%ebp,%ecx,8),%edx - movl %esi,%ecx - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb L003loop - movl %eax,%esi - andl $255,%esi - movl 2(%ebp,%esi,8),%esi - andl $255,%esi - movzbl %bh,%edi - movl (%ebp,%edi,8),%edi - andl $65280,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movl (%ebp,%edi,8),%edi - andl $16711680,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movl 2(%ebp,%edi,8),%edi - andl $4278190080,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - shrl $16,%ebx - movl 2(%ebp,%esi,8),%esi - andl $255,%esi - movzbl %ch,%edi - movl (%ebp,%edi,8),%edi - andl $65280,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movl (%ebp,%edi,8),%edi - andl $16711680,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $24,%edi - movl 2(%ebp,%edi,8),%edi - andl $4278190080,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - shrl $24,%ecx - movl 2(%ebp,%esi,8),%esi - andl $255,%esi - movzbl %dh,%edi - movl (%ebp,%edi,8),%edi - andl $65280,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edx - andl $255,%edi - movl (%ebp,%edi,8),%edi - andl $16711680,%edi - xorl %edi,%esi - movzbl %bh,%edi - movl 2(%ebp,%edi,8),%edi - andl $4278190080,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movl 2(%ebp,%edx,8),%edx - andl $255,%edx - movzbl %ah,%eax - movl (%ebp,%eax,8),%eax - andl $65280,%eax - xorl %eax,%edx - movl 4(%esp),%eax - andl $255,%ebx - movl (%ebp,%ebx,8),%ebx - andl $16711680,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - movl 2(%ebp,%ecx,8),%ecx - andl $4278190080,%ecx - xorl %ecx,%edx - movl %esi,%ecx - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - ret -.align 6,0x90 -LAES_Te: -.long 2774754246,2774754246 -.long 2222750968,2222750968 -.long 2574743534,2574743534 -.long 2373680118,2373680118 -.long 234025727,234025727 -.long 3177933782,3177933782 -.long 2976870366,2976870366 -.long 1422247313,1422247313 -.long 1345335392,1345335392 -.long 50397442,50397442 -.long 2842126286,2842126286 -.long 2099981142,2099981142 -.long 436141799,436141799 -.long 1658312629,1658312629 -.long 3870010189,3870010189 -.long 2591454956,2591454956 -.long 1170918031,1170918031 -.long 2642575903,2642575903 -.long 1086966153,1086966153 -.long 2273148410,2273148410 -.long 368769775,368769775 -.long 3948501426,3948501426 -.long 3376891790,3376891790 -.long 200339707,200339707 -.long 3970805057,3970805057 -.long 1742001331,1742001331 -.long 4255294047,4255294047 -.long 3937382213,3937382213 -.long 3214711843,3214711843 -.long 4154762323,4154762323 -.long 2524082916,2524082916 -.long 1539358875,1539358875 -.long 3266819957,3266819957 -.long 486407649,486407649 -.long 2928907069,2928907069 -.long 1780885068,1780885068 -.long 1513502316,1513502316 -.long 1094664062,1094664062 -.long 49805301,49805301 -.long 1338821763,1338821763 -.long 1546925160,1546925160 -.long 4104496465,4104496465 -.long 887481809,887481809 -.long 150073849,150073849 -.long 2473685474,2473685474 -.long 1943591083,1943591083 -.long 1395732834,1395732834 -.long 1058346282,1058346282 -.long 201589768,201589768 -.long 1388824469,1388824469 -.long 1696801606,1696801606 -.long 1589887901,1589887901 -.long 672667696,672667696 -.long 2711000631,2711000631 -.long 251987210,251987210 -.long 3046808111,3046808111 -.long 151455502,151455502 -.long 907153956,907153956 -.long 2608889883,2608889883 -.long 1038279391,1038279391 -.long 652995533,652995533 -.long 1764173646,1764173646 -.long 3451040383,3451040383 -.long 2675275242,2675275242 -.long 453576978,453576978 -.long 2659418909,2659418909 -.long 1949051992,1949051992 -.long 773462580,773462580 -.long 756751158,756751158 -.long 2993581788,2993581788 -.long 3998898868,3998898868 -.long 4221608027,4221608027 -.long 4132590244,4132590244 -.long 1295727478,1295727478 -.long 1641469623,1641469623 -.long 3467883389,3467883389 -.long 2066295122,2066295122 -.long 1055122397,1055122397 -.long 1898917726,1898917726 -.long 2542044179,2542044179 -.long 4115878822,4115878822 -.long 1758581177,1758581177 -.long 0,0 -.long 753790401,753790401 -.long 1612718144,1612718144 -.long 536673507,536673507 -.long 3367088505,3367088505 -.long 3982187446,3982187446 -.long 3194645204,3194645204 -.long 1187761037,1187761037 -.long 3653156455,3653156455 -.long 1262041458,1262041458 -.long 3729410708,3729410708 -.long 3561770136,3561770136 -.long 3898103984,3898103984 -.long 1255133061,1255133061 -.long 1808847035,1808847035 -.long 720367557,720367557 -.long 3853167183,3853167183 -.long 385612781,385612781 -.long 3309519750,3309519750 -.long 3612167578,3612167578 -.long 1429418854,1429418854 -.long 2491778321,2491778321 -.long 3477423498,3477423498 -.long 284817897,284817897 -.long 100794884,100794884 -.long 2172616702,2172616702 -.long 4031795360,4031795360 -.long 1144798328,1144798328 -.long 3131023141,3131023141 -.long 3819481163,3819481163 -.long 4082192802,4082192802 -.long 4272137053,4272137053 -.long 3225436288,3225436288 -.long 2324664069,2324664069 -.long 2912064063,2912064063 -.long 3164445985,3164445985 -.long 1211644016,1211644016 -.long 83228145,83228145 -.long 3753688163,3753688163 -.long 3249976951,3249976951 -.long 1977277103,1977277103 -.long 1663115586,1663115586 -.long 806359072,806359072 -.long 452984805,452984805 -.long 250868733,250868733 -.long 1842533055,1842533055 -.long 1288555905,1288555905 -.long 336333848,336333848 -.long 890442534,890442534 -.long 804056259,804056259 -.long 3781124030,3781124030 -.long 2727843637,2727843637 -.long 3427026056,3427026056 -.long 957814574,957814574 -.long 1472513171,1472513171 -.long 4071073621,4071073621 -.long 2189328124,2189328124 -.long 1195195770,1195195770 -.long 2892260552,2892260552 -.long 3881655738,3881655738 -.long 723065138,723065138 -.long 2507371494,2507371494 -.long 2690670784,2690670784 -.long 2558624025,2558624025 -.long 3511635870,3511635870 -.long 2145180835,2145180835 -.long 1713513028,1713513028 -.long 2116692564,2116692564 -.long 2878378043,2878378043 -.long 2206763019,2206763019 -.long 3393603212,3393603212 -.long 703524551,703524551 -.long 3552098411,3552098411 -.long 1007948840,1007948840 -.long 2044649127,2044649127 -.long 3797835452,3797835452 -.long 487262998,487262998 -.long 1994120109,1994120109 -.long 1004593371,1004593371 -.long 1446130276,1446130276 -.long 1312438900,1312438900 -.long 503974420,503974420 -.long 3679013266,3679013266 -.long 168166924,168166924 -.long 1814307912,1814307912 -.long 3831258296,3831258296 -.long 1573044895,1573044895 -.long 1859376061,1859376061 -.long 4021070915,4021070915 -.long 2791465668,2791465668 -.long 2828112185,2828112185 -.long 2761266481,2761266481 -.long 937747667,937747667 -.long 2339994098,2339994098 -.long 854058965,854058965 -.long 1137232011,1137232011 -.long 1496790894,1496790894 -.long 3077402074,3077402074 -.long 2358086913,2358086913 -.long 1691735473,1691735473 -.long 3528347292,3528347292 -.long 3769215305,3769215305 -.long 3027004632,3027004632 -.long 4199962284,4199962284 -.long 133494003,133494003 -.long 636152527,636152527 -.long 2942657994,2942657994 -.long 2390391540,2390391540 -.long 3920539207,3920539207 -.long 403179536,403179536 -.long 3585784431,3585784431 -.long 2289596656,2289596656 -.long 1864705354,1864705354 -.long 1915629148,1915629148 -.long 605822008,605822008 -.long 4054230615,4054230615 -.long 3350508659,3350508659 -.long 1371981463,1371981463 -.long 602466507,602466507 -.long 2094914977,2094914977 -.long 2624877800,2624877800 -.long 555687742,555687742 -.long 3712699286,3712699286 -.long 3703422305,3703422305 -.long 2257292045,2257292045 -.long 2240449039,2240449039 -.long 2423288032,2423288032 -.long 1111375484,1111375484 -.long 3300242801,3300242801 -.long 2858837708,2858837708 -.long 3628615824,3628615824 -.long 84083462,84083462 -.long 32962295,32962295 -.long 302911004,302911004 -.long 2741068226,2741068226 -.long 1597322602,1597322602 -.long 4183250862,4183250862 -.long 3501832553,3501832553 -.long 2441512471,2441512471 -.long 1489093017,1489093017 -.long 656219450,656219450 -.long 3114180135,3114180135 -.long 954327513,954327513 -.long 335083755,335083755 -.long 3013122091,3013122091 -.long 856756514,856756514 -.long 3144247762,3144247762 -.long 1893325225,1893325225 -.long 2307821063,2307821063 -.long 2811532339,2811532339 -.long 3063651117,3063651117 -.long 572399164,572399164 -.long 2458355477,2458355477 -.long 552200649,552200649 -.long 1238290055,1238290055 -.long 4283782570,4283782570 -.long 2015897680,2015897680 -.long 2061492133,2061492133 -.long 2408352771,2408352771 -.long 4171342169,4171342169 -.long 2156497161,2156497161 -.long 386731290,386731290 -.long 3669999461,3669999461 -.long 837215959,837215959 -.long 3326231172,3326231172 -.long 3093850320,3093850320 -.long 3275833730,3275833730 -.long 2962856233,2962856233 -.long 1999449434,1999449434 -.long 286199582,286199582 -.long 3417354363,3417354363 -.long 4233385128,4233385128 -.long 3602627437,3602627437 -.long 974525996,974525996 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.byte 99,124,119,123,242,107,111,197 -.byte 48,1,103,43,254,215,171,118 -.byte 202,130,201,125,250,89,71,240 -.byte 173,212,162,175,156,164,114,192 -.byte 183,253,147,38,54,63,247,204 -.byte 52,165,229,241,113,216,49,21 -.byte 4,199,35,195,24,150,5,154 -.byte 7,18,128,226,235,39,178,117 -.byte 9,131,44,26,27,110,90,160 -.byte 82,59,214,179,41,227,47,132 -.byte 83,209,0,237,32,252,177,91 -.byte 106,203,190,57,74,76,88,207 -.byte 208,239,170,251,67,77,51,133 -.byte 69,249,2,127,80,60,159,168 -.byte 81,163,64,143,146,157,56,245 -.byte 188,182,218,33,16,255,243,210 -.byte 205,12,19,236,95,151,68,23 -.byte 196,167,126,61,100,93,25,115 -.byte 96,129,79,220,34,42,144,136 -.byte 70,238,184,20,222,94,11,219 -.byte 224,50,58,10,73,6,36,92 -.byte 194,211,172,98,145,149,228,121 -.byte 231,200,55,109,141,213,78,169 -.byte 108,86,244,234,101,122,174,8 -.byte 186,120,37,46,28,166,180,198 -.byte 232,221,116,31,75,189,139,138 -.byte 112,62,181,102,72,3,246,14 -.byte 97,53,87,185,134,193,29,158 -.byte 225,248,152,17,105,217,142,148 -.byte 155,30,135,233,206,85,40,223 -.byte 140,161,137,13,191,230,66,104 -.byte 65,153,45,15,176,84,187,22 -.long 1,2,4,8 -.long 16,32,64,128 -.long 27,54,0,0 -.long 0,0,0,0 -.globl _aes_nohw_encrypt -.private_extern _aes_nohw_encrypt -.align 4 -_aes_nohw_encrypt: -L_aes_nohw_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 28(%esp),%edi - movl %esp,%eax - subl $36,%esp - andl $-64,%esp - leal -127(%edi),%ebx - subl %esp,%ebx - negl %ebx - andl $960,%ebx - subl %ebx,%esp - addl $4,%esp - movl %eax,28(%esp) - call L004pic_point -L004pic_point: - popl %ebp - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L004pic_point(%ebp),%eax - leal LAES_Te-L004pic_point(%ebp),%ebp - leal 764(%esp),%ebx - subl %ebp,%ebx - andl $768,%ebx - leal 2176(%ebp,%ebx,1),%ebp - btl $25,(%eax) - jnc L005x86 - movq (%esi),%mm0 - movq 8(%esi),%mm4 - call __sse_AES_encrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movq %mm0,(%esi) - movq %mm4,8(%esi) - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 4,0x90 -L005x86: - movl %ebp,24(%esp) - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - call __x86_AES_encrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.private_extern __x86_AES_decrypt_compact -.align 4 -__x86_AES_decrypt_compact: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl -128(%ebp),%edi - movl -96(%ebp),%esi - movl -64(%ebp),%edi - movl -32(%ebp),%esi - movl (%ebp),%edi - movl 32(%ebp),%esi - movl 64(%ebp),%edi - movl 96(%ebp),%esi -.align 4,0x90 -L006loop: - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ebx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %ah,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ch,%ecx - movzbl -128(%ebp,%ecx,1),%ecx - shll $8,%ecx - xorl %ecx,%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - shrl $24,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $24,%eax - xorl %eax,%edx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%eax - subl %edi,%esi - andl $4278124286,%eax - andl $454761243,%esi - xorl %esi,%eax - movl $2155905152,%edi - andl %eax,%edi - movl %edi,%esi - shrl $7,%edi - leal (%eax,%eax,1),%ebx - subl %edi,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %ecx,%eax - xorl %esi,%ebx - movl $2155905152,%edi - andl %ebx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ebx,%ebx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %ecx,%ebx - roll $8,%ecx - xorl %esi,%ebp - xorl %eax,%ecx - xorl %ebp,%eax - xorl %ebx,%ecx - xorl %ebp,%ebx - roll $24,%eax - xorl %ebp,%ecx - roll $16,%ebx - xorl %eax,%ecx - roll $8,%ebp - xorl %ebx,%ecx - movl 4(%esp),%eax - xorl %ebp,%ecx - movl %ecx,12(%esp) - movl $2155905152,%edi - andl %edx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%edx,%edx,1),%ebx - subl %edi,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %esi,%ebx - movl $2155905152,%edi - andl %ebx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ebx,%ebx,1),%ecx - subl %edi,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %edx,%ebx - xorl %esi,%ecx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %edx,%ecx - roll $8,%edx - xorl %esi,%ebp - xorl %ebx,%edx - xorl %ebp,%ebx - xorl %ecx,%edx - xorl %ebp,%ecx - roll $24,%ebx - xorl %ebp,%edx - roll $16,%ecx - xorl %ebx,%edx - roll $8,%ebp - xorl %ecx,%edx - movl 8(%esp),%ebx - xorl %ebp,%edx - movl %edx,16(%esp) - movl $2155905152,%edi - andl %eax,%edi - movl %edi,%esi - shrl $7,%edi - leal (%eax,%eax,1),%ecx - subl %edi,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %esi,%ecx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%edx - subl %edi,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %eax,%ecx - xorl %esi,%edx - movl $2155905152,%edi - andl %edx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%edx,%edx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %eax,%edx - roll $8,%eax - xorl %esi,%ebp - xorl %ecx,%eax - xorl %ebp,%ecx - xorl %edx,%eax - xorl %ebp,%edx - roll $24,%ecx - xorl %ebp,%eax - roll $16,%edx - xorl %ecx,%eax - roll $8,%ebp - xorl %edx,%eax - xorl %ebp,%eax - movl $2155905152,%edi - andl %ebx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ebx,%ebx,1),%ecx - subl %edi,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %esi,%ecx - movl $2155905152,%edi - andl %ecx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%ecx,%ecx,1),%edx - subl %edi,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %ebx,%ecx - xorl %esi,%edx - movl $2155905152,%edi - andl %edx,%edi - movl %edi,%esi - shrl $7,%edi - leal (%edx,%edx,1),%ebp - subl %edi,%esi - andl $4278124286,%ebp - andl $454761243,%esi - xorl %ebx,%edx - roll $8,%ebx - xorl %esi,%ebp - xorl %ecx,%ebx - xorl %ebp,%ecx - xorl %edx,%ebx - xorl %ebp,%edx - roll $24,%ecx - xorl %ebp,%ebx - roll $16,%edx - xorl %ecx,%ebx - roll $8,%ebp - xorl %edx,%ebx - movl 12(%esp),%ecx - xorl %ebp,%ebx - movl 16(%esp),%edx - movl 20(%esp),%edi - movl 28(%esp),%ebp - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb L006loop - movl %eax,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ebx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %ah,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movzbl -128(%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl -128(%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movzbl -128(%ebp,%edx,1),%edx - movzbl %ch,%ecx - movzbl -128(%ebp,%ecx,1),%ecx - shll $8,%ecx - xorl %ecx,%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - movzbl -128(%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - shrl $24,%eax - movzbl -128(%ebp,%eax,1),%eax - shll $24,%eax - xorl %eax,%edx - movl 4(%esp),%eax - xorl 16(%edi),%eax - xorl 20(%edi),%ebx - xorl 24(%edi),%ecx - xorl 28(%edi),%edx - ret -.private_extern __sse_AES_decrypt_compact -.align 4 -__sse_AES_decrypt_compact: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) - movl $454761243,%eax - movl %eax,8(%esp) - movl %eax,12(%esp) - movl -128(%ebp),%eax - movl -96(%ebp),%ebx - movl -64(%ebp),%ecx - movl -32(%ebp),%edx - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%edx -.align 4,0x90 -L007loop: - pshufw $12,%mm0,%mm1 - pshufw $9,%mm4,%mm5 - movd %mm1,%eax - movd %mm5,%ebx - movl %edi,20(%esp) - movzbl %al,%esi - movzbl %ah,%edx - pshufw $6,%mm0,%mm2 - movzbl -128(%ebp,%esi,1),%ecx - movzbl %bl,%edi - movzbl -128(%ebp,%edx,1),%edx - shrl $16,%eax - shll $8,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $16,%esi - pshufw $3,%mm4,%mm6 - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %ah,%edi - shll $24,%esi - shrl $16,%ebx - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shll $24,%esi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shll $8,%esi - movd %mm2,%eax - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - shll $16,%esi - movd %mm6,%ebx - movd %ecx,%mm0 - movzbl -128(%ebp,%edi,1),%ecx - movzbl %al,%edi - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bl,%edi - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %ah,%edi - shll $16,%esi - shrl $16,%eax - orl %esi,%edx - movzbl -128(%ebp,%edi,1),%esi - movzbl %bh,%edi - shrl $16,%ebx - shll $8,%esi - movd %edx,%mm1 - movzbl -128(%ebp,%edi,1),%edx - movzbl %bh,%edi - shll $24,%edx - andl $255,%ebx - orl %esi,%edx - punpckldq %mm1,%mm0 - movzbl -128(%ebp,%edi,1),%esi - movzbl %al,%edi - shll $8,%esi - movzbl %ah,%eax - movzbl -128(%ebp,%ebx,1),%ebx - orl %esi,%ecx - movzbl -128(%ebp,%edi,1),%esi - orl %ebx,%edx - shll $16,%esi - movzbl -128(%ebp,%eax,1),%eax - orl %esi,%edx - shll $24,%eax - orl %eax,%ecx - movl 20(%esp),%edi - movd %edx,%mm4 - movd %ecx,%mm5 - punpckldq %mm5,%mm4 - addl $16,%edi - cmpl 24(%esp),%edi - ja L008out - movq %mm0,%mm3 - movq %mm4,%mm7 - pshufw $228,%mm0,%mm2 - pshufw $228,%mm4,%mm6 - movq %mm0,%mm1 - movq %mm4,%mm5 - pshufw $177,%mm0,%mm0 - pshufw $177,%mm4,%mm4 - pslld $8,%mm2 - pslld $8,%mm6 - psrld $8,%mm3 - psrld $8,%mm7 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pslld $16,%mm2 - pslld $16,%mm6 - psrld $16,%mm3 - psrld $16,%mm7 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - movq 8(%esp),%mm3 - pxor %mm2,%mm2 - pxor %mm6,%mm6 - pcmpgtb %mm1,%mm2 - pcmpgtb %mm5,%mm6 - pand %mm3,%mm2 - pand %mm3,%mm6 - paddb %mm1,%mm1 - paddb %mm5,%mm5 - pxor %mm2,%mm1 - pxor %mm6,%mm5 - movq %mm1,%mm3 - movq %mm5,%mm7 - movq %mm1,%mm2 - movq %mm5,%mm6 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - pslld $24,%mm3 - pslld $24,%mm7 - psrld $8,%mm2 - psrld $8,%mm6 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - movq 8(%esp),%mm2 - pxor %mm3,%mm3 - pxor %mm7,%mm7 - pcmpgtb %mm1,%mm3 - pcmpgtb %mm5,%mm7 - pand %mm2,%mm3 - pand %mm2,%mm7 - paddb %mm1,%mm1 - paddb %mm5,%mm5 - pxor %mm3,%mm1 - pxor %mm7,%mm5 - pshufw $177,%mm1,%mm3 - pshufw $177,%mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm3,%mm3 - pxor %mm7,%mm7 - pcmpgtb %mm1,%mm3 - pcmpgtb %mm5,%mm7 - pand %mm2,%mm3 - pand %mm2,%mm7 - paddb %mm1,%mm1 - paddb %mm5,%mm5 - pxor %mm3,%mm1 - pxor %mm7,%mm5 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - movq %mm1,%mm3 - movq %mm5,%mm7 - pshufw $177,%mm1,%mm2 - pshufw $177,%mm5,%mm6 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - pslld $8,%mm1 - pslld $8,%mm5 - psrld $8,%mm3 - psrld $8,%mm7 - movq (%edi),%mm2 - movq 8(%edi),%mm6 - pxor %mm1,%mm0 - pxor %mm5,%mm4 - pxor %mm3,%mm0 - pxor %mm7,%mm4 - movl -128(%ebp),%eax - pslld $16,%mm1 - pslld $16,%mm5 - movl -64(%ebp),%ebx - psrld $16,%mm3 - psrld $16,%mm7 - movl (%ebp),%ecx - pxor %mm1,%mm0 - pxor %mm5,%mm4 - movl 64(%ebp),%edx - pxor %mm3,%mm0 - pxor %mm7,%mm4 - pxor %mm2,%mm0 - pxor %mm6,%mm4 - jmp L007loop -.align 4,0x90 -L008out: - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - ret -.private_extern __x86_AES_decrypt -.align 4 -__x86_AES_decrypt: - movl %edi,20(%esp) - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,24(%esp) -.align 4,0x90 -L009loop: - movl %eax,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %dh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %ebx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %ah,%edi - xorl 3(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %ecx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movl (%ebp,%esi,8),%esi - movzbl %bh,%edi - xorl 3(%ebp,%edi,8),%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - xorl 2(%ebp,%edi,8),%esi - movl %edx,%edi - shrl $24,%edi - xorl 1(%ebp,%edi,8),%esi - movl 20(%esp),%edi - andl $255,%edx - movl (%ebp,%edx,8),%edx - movzbl %ch,%ecx - xorl 3(%ebp,%ecx,8),%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - xorl 2(%ebp,%ebx,8),%edx - movl 8(%esp),%ebx - shrl $24,%eax - xorl 1(%ebp,%eax,8),%edx - movl 4(%esp),%eax - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - cmpl 24(%esp),%edi - movl %edi,20(%esp) - jb L009loop - leal 2176(%ebp),%ebp - movl -128(%ebp),%edi - movl -96(%ebp),%esi - movl -64(%ebp),%edi - movl -32(%ebp),%esi - movl (%ebp),%edi - movl 32(%ebp),%esi - movl 64(%ebp),%edi - movl 96(%ebp),%esi - leal -128(%ebp),%ebp - movl %eax,%esi - andl $255,%esi - movzbl (%ebp,%esi,1),%esi - movzbl %dh,%edi - movzbl (%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $16,%edi - andl $255,%edi - movzbl (%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ebx,%edi - shrl $24,%edi - movzbl (%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,4(%esp) - movl %ebx,%esi - andl $255,%esi - movzbl (%ebp,%esi,1),%esi - movzbl %ah,%edi - movzbl (%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $16,%edi - andl $255,%edi - movzbl (%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %ecx,%edi - shrl $24,%edi - movzbl (%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl %esi,8(%esp) - movl %ecx,%esi - andl $255,%esi - movzbl (%ebp,%esi,1),%esi - movzbl %bh,%edi - movzbl (%ebp,%edi,1),%edi - shll $8,%edi - xorl %edi,%esi - movl %eax,%edi - shrl $16,%edi - andl $255,%edi - movzbl (%ebp,%edi,1),%edi - shll $16,%edi - xorl %edi,%esi - movl %edx,%edi - shrl $24,%edi - movzbl (%ebp,%edi,1),%edi - shll $24,%edi - xorl %edi,%esi - movl 20(%esp),%edi - andl $255,%edx - movzbl (%ebp,%edx,1),%edx - movzbl %ch,%ecx - movzbl (%ebp,%ecx,1),%ecx - shll $8,%ecx - xorl %ecx,%edx - movl %esi,%ecx - shrl $16,%ebx - andl $255,%ebx - movzbl (%ebp,%ebx,1),%ebx - shll $16,%ebx - xorl %ebx,%edx - movl 8(%esp),%ebx - shrl $24,%eax - movzbl (%ebp,%eax,1),%eax - shll $24,%eax - xorl %eax,%edx - movl 4(%esp),%eax - leal -2048(%ebp),%ebp - addl $16,%edi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - ret -.align 6,0x90 -LAES_Td: -.long 1353184337,1353184337 -.long 1399144830,1399144830 -.long 3282310938,3282310938 -.long 2522752826,2522752826 -.long 3412831035,3412831035 -.long 4047871263,4047871263 -.long 2874735276,2874735276 -.long 2466505547,2466505547 -.long 1442459680,1442459680 -.long 4134368941,4134368941 -.long 2440481928,2440481928 -.long 625738485,625738485 -.long 4242007375,4242007375 -.long 3620416197,3620416197 -.long 2151953702,2151953702 -.long 2409849525,2409849525 -.long 1230680542,1230680542 -.long 1729870373,1729870373 -.long 2551114309,2551114309 -.long 3787521629,3787521629 -.long 41234371,41234371 -.long 317738113,317738113 -.long 2744600205,2744600205 -.long 3338261355,3338261355 -.long 3881799427,3881799427 -.long 2510066197,2510066197 -.long 3950669247,3950669247 -.long 3663286933,3663286933 -.long 763608788,763608788 -.long 3542185048,3542185048 -.long 694804553,694804553 -.long 1154009486,1154009486 -.long 1787413109,1787413109 -.long 2021232372,2021232372 -.long 1799248025,1799248025 -.long 3715217703,3715217703 -.long 3058688446,3058688446 -.long 397248752,397248752 -.long 1722556617,1722556617 -.long 3023752829,3023752829 -.long 407560035,407560035 -.long 2184256229,2184256229 -.long 1613975959,1613975959 -.long 1165972322,1165972322 -.long 3765920945,3765920945 -.long 2226023355,2226023355 -.long 480281086,480281086 -.long 2485848313,2485848313 -.long 1483229296,1483229296 -.long 436028815,436028815 -.long 2272059028,2272059028 -.long 3086515026,3086515026 -.long 601060267,601060267 -.long 3791801202,3791801202 -.long 1468997603,1468997603 -.long 715871590,715871590 -.long 120122290,120122290 -.long 63092015,63092015 -.long 2591802758,2591802758 -.long 2768779219,2768779219 -.long 4068943920,4068943920 -.long 2997206819,2997206819 -.long 3127509762,3127509762 -.long 1552029421,1552029421 -.long 723308426,723308426 -.long 2461301159,2461301159 -.long 4042393587,4042393587 -.long 2715969870,2715969870 -.long 3455375973,3455375973 -.long 3586000134,3586000134 -.long 526529745,526529745 -.long 2331944644,2331944644 -.long 2639474228,2639474228 -.long 2689987490,2689987490 -.long 853641733,853641733 -.long 1978398372,1978398372 -.long 971801355,971801355 -.long 2867814464,2867814464 -.long 111112542,111112542 -.long 1360031421,1360031421 -.long 4186579262,4186579262 -.long 1023860118,1023860118 -.long 2919579357,2919579357 -.long 1186850381,1186850381 -.long 3045938321,3045938321 -.long 90031217,90031217 -.long 1876166148,1876166148 -.long 4279586912,4279586912 -.long 620468249,620468249 -.long 2548678102,2548678102 -.long 3426959497,3426959497 -.long 2006899047,2006899047 -.long 3175278768,3175278768 -.long 2290845959,2290845959 -.long 945494503,945494503 -.long 3689859193,3689859193 -.long 1191869601,1191869601 -.long 3910091388,3910091388 -.long 3374220536,3374220536 -.long 0,0 -.long 2206629897,2206629897 -.long 1223502642,1223502642 -.long 2893025566,2893025566 -.long 1316117100,1316117100 -.long 4227796733,4227796733 -.long 1446544655,1446544655 -.long 517320253,517320253 -.long 658058550,658058550 -.long 1691946762,1691946762 -.long 564550760,564550760 -.long 3511966619,3511966619 -.long 976107044,976107044 -.long 2976320012,2976320012 -.long 266819475,266819475 -.long 3533106868,3533106868 -.long 2660342555,2660342555 -.long 1338359936,1338359936 -.long 2720062561,2720062561 -.long 1766553434,1766553434 -.long 370807324,370807324 -.long 179999714,179999714 -.long 3844776128,3844776128 -.long 1138762300,1138762300 -.long 488053522,488053522 -.long 185403662,185403662 -.long 2915535858,2915535858 -.long 3114841645,3114841645 -.long 3366526484,3366526484 -.long 2233069911,2233069911 -.long 1275557295,1275557295 -.long 3151862254,3151862254 -.long 4250959779,4250959779 -.long 2670068215,2670068215 -.long 3170202204,3170202204 -.long 3309004356,3309004356 -.long 880737115,880737115 -.long 1982415755,1982415755 -.long 3703972811,3703972811 -.long 1761406390,1761406390 -.long 1676797112,1676797112 -.long 3403428311,3403428311 -.long 277177154,277177154 -.long 1076008723,1076008723 -.long 538035844,538035844 -.long 2099530373,2099530373 -.long 4164795346,4164795346 -.long 288553390,288553390 -.long 1839278535,1839278535 -.long 1261411869,1261411869 -.long 4080055004,4080055004 -.long 3964831245,3964831245 -.long 3504587127,3504587127 -.long 1813426987,1813426987 -.long 2579067049,2579067049 -.long 4199060497,4199060497 -.long 577038663,577038663 -.long 3297574056,3297574056 -.long 440397984,440397984 -.long 3626794326,3626794326 -.long 4019204898,4019204898 -.long 3343796615,3343796615 -.long 3251714265,3251714265 -.long 4272081548,4272081548 -.long 906744984,906744984 -.long 3481400742,3481400742 -.long 685669029,685669029 -.long 646887386,646887386 -.long 2764025151,2764025151 -.long 3835509292,3835509292 -.long 227702864,227702864 -.long 2613862250,2613862250 -.long 1648787028,1648787028 -.long 3256061430,3256061430 -.long 3904428176,3904428176 -.long 1593260334,1593260334 -.long 4121936770,4121936770 -.long 3196083615,3196083615 -.long 2090061929,2090061929 -.long 2838353263,2838353263 -.long 3004310991,3004310991 -.long 999926984,999926984 -.long 2809993232,2809993232 -.long 1852021992,1852021992 -.long 2075868123,2075868123 -.long 158869197,158869197 -.long 4095236462,4095236462 -.long 28809964,28809964 -.long 2828685187,2828685187 -.long 1701746150,1701746150 -.long 2129067946,2129067946 -.long 147831841,147831841 -.long 3873969647,3873969647 -.long 3650873274,3650873274 -.long 3459673930,3459673930 -.long 3557400554,3557400554 -.long 3598495785,3598495785 -.long 2947720241,2947720241 -.long 824393514,824393514 -.long 815048134,815048134 -.long 3227951669,3227951669 -.long 935087732,935087732 -.long 2798289660,2798289660 -.long 2966458592,2966458592 -.long 366520115,366520115 -.long 1251476721,1251476721 -.long 4158319681,4158319681 -.long 240176511,240176511 -.long 804688151,804688151 -.long 2379631990,2379631990 -.long 1303441219,1303441219 -.long 1414376140,1414376140 -.long 3741619940,3741619940 -.long 3820343710,3820343710 -.long 461924940,461924940 -.long 3089050817,3089050817 -.long 2136040774,2136040774 -.long 82468509,82468509 -.long 1563790337,1563790337 -.long 1937016826,1937016826 -.long 776014843,776014843 -.long 1511876531,1511876531 -.long 1389550482,1389550482 -.long 861278441,861278441 -.long 323475053,323475053 -.long 2355222426,2355222426 -.long 2047648055,2047648055 -.long 2383738969,2383738969 -.long 2302415851,2302415851 -.long 3995576782,3995576782 -.long 902390199,902390199 -.long 3991215329,3991215329 -.long 1018251130,1018251130 -.long 1507840668,1507840668 -.long 1064563285,1064563285 -.long 2043548696,2043548696 -.long 3208103795,3208103795 -.long 3939366739,3939366739 -.long 1537932639,1537932639 -.long 342834655,342834655 -.long 2262516856,2262516856 -.long 2180231114,2180231114 -.long 1053059257,1053059257 -.long 741614648,741614648 -.long 1598071746,1598071746 -.long 1925389590,1925389590 -.long 203809468,203809468 -.long 2336832552,2336832552 -.long 1100287487,1100287487 -.long 1895934009,1895934009 -.long 3736275976,3736275976 -.long 2632234200,2632234200 -.long 2428589668,2428589668 -.long 1636092795,1636092795 -.long 1890988757,1890988757 -.long 1952214088,1952214088 -.long 1113045200,1113045200 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.byte 82,9,106,213,48,54,165,56 -.byte 191,64,163,158,129,243,215,251 -.byte 124,227,57,130,155,47,255,135 -.byte 52,142,67,68,196,222,233,203 -.byte 84,123,148,50,166,194,35,61 -.byte 238,76,149,11,66,250,195,78 -.byte 8,46,161,102,40,217,36,178 -.byte 118,91,162,73,109,139,209,37 -.byte 114,248,246,100,134,104,152,22 -.byte 212,164,92,204,93,101,182,146 -.byte 108,112,72,80,253,237,185,218 -.byte 94,21,70,87,167,141,157,132 -.byte 144,216,171,0,140,188,211,10 -.byte 247,228,88,5,184,179,69,6 -.byte 208,44,30,143,202,63,15,2 -.byte 193,175,189,3,1,19,138,107 -.byte 58,145,17,65,79,103,220,234 -.byte 151,242,207,206,240,180,230,115 -.byte 150,172,116,34,231,173,53,133 -.byte 226,249,55,232,28,117,223,110 -.byte 71,241,26,113,29,41,197,137 -.byte 111,183,98,14,170,24,190,27 -.byte 252,86,62,75,198,210,121,32 -.byte 154,219,192,254,120,205,90,244 -.byte 31,221,168,51,136,7,199,49 -.byte 177,18,16,89,39,128,236,95 -.byte 96,81,127,169,25,181,74,13 -.byte 45,229,122,159,147,201,156,239 -.byte 160,224,59,77,174,42,245,176 -.byte 200,235,187,60,131,83,153,97 -.byte 23,43,4,126,186,119,214,38 -.byte 225,105,20,99,85,33,12,125 -.globl _aes_nohw_decrypt -.private_extern _aes_nohw_decrypt -.align 4 -_aes_nohw_decrypt: -L_aes_nohw_decrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 28(%esp),%edi - movl %esp,%eax - subl $36,%esp - andl $-64,%esp - leal -127(%edi),%ebx - subl %esp,%ebx - negl %ebx - andl $960,%ebx - subl %ebx,%esp - addl $4,%esp - movl %eax,28(%esp) - call L010pic_point -L010pic_point: - popl %ebp - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L010pic_point(%ebp),%eax - leal LAES_Td-L010pic_point(%ebp),%ebp - leal 764(%esp),%ebx - subl %ebp,%ebx - andl $768,%ebx - leal 2176(%ebp,%ebx,1),%ebp - btl $25,(%eax) - jnc L011x86 - movq (%esi),%mm0 - movq 8(%esi),%mm4 - call __sse_AES_decrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movq %mm0,(%esi) - movq %mm4,8(%esi) - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 4,0x90 -L011x86: - movl %ebp,24(%esp) - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - call __x86_AES_decrypt_compact - movl 28(%esp),%esp - movl 24(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_nohw_cbc_encrypt -.private_extern _aes_nohw_cbc_encrypt -.align 4 -_aes_nohw_cbc_encrypt: -L_aes_nohw_cbc_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 28(%esp),%ecx - cmpl $0,%ecx - je L012drop_out - call L013pic_point -L013pic_point: - popl %ebp - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L013pic_point(%ebp),%eax - cmpl $0,40(%esp) - leal LAES_Te-L013pic_point(%ebp),%ebp - jne L014picked_te - leal LAES_Td-LAES_Te(%ebp),%ebp -L014picked_te: - pushfl - cld - cmpl $512,%ecx - jb L015slow_way - testl $15,%ecx - jnz L015slow_way - btl $28,(%eax) - jc L015slow_way - leal -324(%esp),%esi - andl $-64,%esi - movl %ebp,%eax - leal 2304(%ebp),%ebx - movl %esi,%edx - andl $4095,%eax - andl $4095,%ebx - andl $4095,%edx - cmpl %ebx,%edx - jb L016tbl_break_out - subl %ebx,%edx - subl %edx,%esi - jmp L017tbl_ok -.align 2,0x90 -L016tbl_break_out: - subl %eax,%edx - andl $4095,%edx - addl $384,%edx - subl %edx,%esi -.align 2,0x90 -L017tbl_ok: - leal 24(%esp),%edx - xchgl %esi,%esp - addl $4,%esp - movl %ebp,24(%esp) - movl %esi,28(%esp) - movl (%edx),%eax - movl 4(%edx),%ebx - movl 12(%edx),%edi - movl 16(%edx),%esi - movl 20(%edx),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,40(%esp) - movl %edi,44(%esp) - movl %esi,48(%esp) - movl $0,316(%esp) - movl %edi,%ebx - movl $61,%ecx - subl %ebp,%ebx - movl %edi,%esi - andl $4095,%ebx - leal 76(%esp),%edi - cmpl $2304,%ebx - jb L018do_copy - cmpl $3852,%ebx - jb L019skip_copy -.align 2,0x90 -L018do_copy: - movl %edi,44(%esp) -.long 2784229001 -L019skip_copy: - movl $16,%edi -.align 2,0x90 -L020prefetch_tbl: - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%esi - leal 128(%ebp),%ebp - subl $1,%edi - jnz L020prefetch_tbl - subl $2048,%ebp - movl 32(%esp),%esi - movl 48(%esp),%edi - cmpl $0,%edx - je L021fast_decrypt - movl (%edi),%eax - movl 4(%edi),%ebx -.align 4,0x90 -L022fast_enc_loop: - movl 8(%edi),%ecx - movl 12(%edi),%edx - xorl (%esi),%eax - xorl 4(%esi),%ebx - xorl 8(%esi),%ecx - xorl 12(%esi),%edx - movl 44(%esp),%edi - call __x86_AES_encrypt - movl 32(%esp),%esi - movl 36(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - leal 16(%esi),%esi - movl 40(%esp),%ecx - movl %esi,32(%esp) - leal 16(%edi),%edx - movl %edx,36(%esp) - subl $16,%ecx - movl %ecx,40(%esp) - jnz L022fast_enc_loop - movl 48(%esp),%esi - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - cmpl $0,316(%esp) - movl 44(%esp),%edi - je L023skip_ezero - movl $60,%ecx - xorl %eax,%eax -.align 2,0x90 -.long 2884892297 -L023skip_ezero: - movl 28(%esp),%esp - popfl -L012drop_out: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L021fast_decrypt: - cmpl 36(%esp),%esi - je L024fast_dec_in_place - movl %edi,52(%esp) -.align 2,0x90 -.align 4,0x90 -L025fast_dec_loop: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl 44(%esp),%edi - call __x86_AES_decrypt - movl 52(%esp),%edi - movl 40(%esp),%esi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl 36(%esp),%edi - movl 32(%esp),%esi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 40(%esp),%ecx - movl %esi,52(%esp) - leal 16(%esi),%esi - movl %esi,32(%esp) - leal 16(%edi),%edi - movl %edi,36(%esp) - subl $16,%ecx - movl %ecx,40(%esp) - jnz L025fast_dec_loop - movl 52(%esp),%edi - movl 48(%esp),%esi - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - jmp L026fast_dec_out -.align 4,0x90 -L024fast_dec_in_place: -L027fast_dec_in_place_loop: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - leal 60(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 44(%esp),%edi - call __x86_AES_decrypt - movl 48(%esp),%edi - movl 36(%esp),%esi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - leal 16(%esi),%esi - movl %esi,36(%esp) - leal 60(%esp),%esi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 32(%esp),%esi - movl 40(%esp),%ecx - leal 16(%esi),%esi - movl %esi,32(%esp) - subl $16,%ecx - movl %ecx,40(%esp) - jnz L027fast_dec_in_place_loop -.align 2,0x90 -L026fast_dec_out: - cmpl $0,316(%esp) - movl 44(%esp),%edi - je L028skip_dzero - movl $60,%ecx - xorl %eax,%eax -.align 2,0x90 -.long 2884892297 -L028skip_dzero: - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L015slow_way: - movl (%eax),%eax - movl 36(%esp),%edi - leal -80(%esp),%esi - andl $-64,%esi - leal -143(%edi),%ebx - subl %esi,%ebx - negl %ebx - andl $960,%ebx - subl %ebx,%esi - leal 768(%esi),%ebx - subl %ebp,%ebx - andl $768,%ebx - leal 2176(%ebp,%ebx,1),%ebp - leal 24(%esp),%edx - xchgl %esi,%esp - addl $4,%esp - movl %ebp,24(%esp) - movl %esi,28(%esp) - movl %eax,52(%esp) - movl (%edx),%eax - movl 4(%edx),%ebx - movl 16(%edx),%esi - movl 20(%edx),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,40(%esp) - movl %edi,44(%esp) - movl %esi,48(%esp) - movl %esi,%edi - movl %eax,%esi - cmpl $0,%edx - je L029slow_decrypt - cmpl $16,%ecx - movl %ebx,%edx - jb L030slow_enc_tail - btl $25,52(%esp) - jnc L031slow_enc_x86 - movq (%edi),%mm0 - movq 8(%edi),%mm4 -.align 4,0x90 -L032slow_enc_loop_sse: - pxor (%esi),%mm0 - pxor 8(%esi),%mm4 - movl 44(%esp),%edi - call __sse_AES_encrypt_compact - movl 32(%esp),%esi - movl 36(%esp),%edi - movl 40(%esp),%ecx - movq %mm0,(%edi) - movq %mm4,8(%edi) - leal 16(%esi),%esi - movl %esi,32(%esp) - leal 16(%edi),%edx - movl %edx,36(%esp) - subl $16,%ecx - cmpl $16,%ecx - movl %ecx,40(%esp) - jae L032slow_enc_loop_sse - testl $15,%ecx - jnz L030slow_enc_tail - movl 48(%esp),%esi - movq %mm0,(%esi) - movq %mm4,8(%esi) - emms - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L031slow_enc_x86: - movl (%edi),%eax - movl 4(%edi),%ebx -.align 2,0x90 -L033slow_enc_loop_x86: - movl 8(%edi),%ecx - movl 12(%edi),%edx - xorl (%esi),%eax - xorl 4(%esi),%ebx - xorl 8(%esi),%ecx - xorl 12(%esi),%edx - movl 44(%esp),%edi - call __x86_AES_encrypt_compact - movl 32(%esp),%esi - movl 36(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 40(%esp),%ecx - leal 16(%esi),%esi - movl %esi,32(%esp) - leal 16(%edi),%edx - movl %edx,36(%esp) - subl $16,%ecx - cmpl $16,%ecx - movl %ecx,40(%esp) - jae L033slow_enc_loop_x86 - testl $15,%ecx - jnz L030slow_enc_tail - movl 48(%esp),%esi - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L030slow_enc_tail: - emms - movl %edx,%edi - movl $16,%ebx - subl %ecx,%ebx - cmpl %esi,%edi - je L034enc_in_place -.align 2,0x90 -.long 2767451785 - jmp L035enc_skip_in_place -L034enc_in_place: - leal (%edi,%ecx,1),%edi -L035enc_skip_in_place: - movl %ebx,%ecx - xorl %eax,%eax -.align 2,0x90 -.long 2868115081 - movl 48(%esp),%edi - movl %edx,%esi - movl (%edi),%eax - movl 4(%edi),%ebx - movl $16,40(%esp) - jmp L033slow_enc_loop_x86 -.align 4,0x90 -L029slow_decrypt: - btl $25,52(%esp) - jnc L036slow_dec_loop_x86 -.align 2,0x90 -L037slow_dec_loop_sse: - movq (%esi),%mm0 - movq 8(%esi),%mm4 - movl 44(%esp),%edi - call __sse_AES_decrypt_compact - movl 32(%esp),%esi - leal 60(%esp),%eax - movl 36(%esp),%ebx - movl 40(%esp),%ecx - movl 48(%esp),%edi - movq (%esi),%mm1 - movq 8(%esi),%mm5 - pxor (%edi),%mm0 - pxor 8(%edi),%mm4 - movq %mm1,(%edi) - movq %mm5,8(%edi) - subl $16,%ecx - jc L038slow_dec_partial_sse - movq %mm0,(%ebx) - movq %mm4,8(%ebx) - leal 16(%ebx),%ebx - movl %ebx,36(%esp) - leal 16(%esi),%esi - movl %esi,32(%esp) - movl %ecx,40(%esp) - jnz L037slow_dec_loop_sse - emms - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L038slow_dec_partial_sse: - movq %mm0,(%eax) - movq %mm4,8(%eax) - emms - addl $16,%ecx - movl %ebx,%edi - movl %eax,%esi -.align 2,0x90 -.long 2767451785 - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L036slow_dec_loop_x86: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - leal 60(%esp),%edi - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 44(%esp),%edi - call __x86_AES_decrypt_compact - movl 48(%esp),%edi - movl 40(%esp),%esi - xorl (%edi),%eax - xorl 4(%edi),%ebx - xorl 8(%edi),%ecx - xorl 12(%edi),%edx - subl $16,%esi - jc L039slow_dec_partial_x86 - movl %esi,40(%esp) - movl 36(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - leal 16(%esi),%esi - movl %esi,36(%esp) - leal 60(%esp),%esi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 32(%esp),%esi - leal 16(%esi),%esi - movl %esi,32(%esp) - jnz L036slow_dec_loop_x86 - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret - pushfl -.align 4,0x90 -L039slow_dec_partial_x86: - leal 60(%esp),%esi - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - movl 32(%esp),%esi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 40(%esp),%ecx - movl 36(%esp),%edi - leal 60(%esp),%esi -.align 2,0x90 -.long 2767451785 - movl 28(%esp),%esp - popfl - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.private_extern __x86_AES_set_encrypt_key -.align 4 -__x86_AES_set_encrypt_key: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 24(%esp),%esi - movl 32(%esp),%edi - testl $-1,%esi - jz L040badpointer - testl $-1,%edi - jz L040badpointer - call L041pic_point -L041pic_point: - popl %ebp - leal LAES_Te-L041pic_point(%ebp),%ebp - leal 2176(%ebp),%ebp - movl -128(%ebp),%eax - movl -96(%ebp),%ebx - movl -64(%ebp),%ecx - movl -32(%ebp),%edx - movl (%ebp),%eax - movl 32(%ebp),%ebx - movl 64(%ebp),%ecx - movl 96(%ebp),%edx - movl 28(%esp),%ecx - cmpl $128,%ecx - je L04210rounds - cmpl $192,%ecx - je L04312rounds - cmpl $256,%ecx - je L04414rounds - movl $-2,%eax - jmp L045exit -L04210rounds: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - xorl %ecx,%ecx - jmp L04610shortcut -.align 2,0x90 -L04710loop: - movl (%edi),%eax - movl 12(%edi),%edx -L04610shortcut: - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - xorl 896(%ebp,%ecx,4),%eax - movl %eax,16(%edi) - xorl 4(%edi),%eax - movl %eax,20(%edi) - xorl 8(%edi),%eax - movl %eax,24(%edi) - xorl 12(%edi),%eax - movl %eax,28(%edi) - incl %ecx - addl $16,%edi - cmpl $10,%ecx - jl L04710loop - movl $10,80(%edi) - xorl %eax,%eax - jmp L045exit -L04312rounds: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 16(%esi),%ecx - movl 20(%esi),%edx - movl %ecx,16(%edi) - movl %edx,20(%edi) - xorl %ecx,%ecx - jmp L04812shortcut -.align 2,0x90 -L04912loop: - movl (%edi),%eax - movl 20(%edi),%edx -L04812shortcut: - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - xorl 896(%ebp,%ecx,4),%eax - movl %eax,24(%edi) - xorl 4(%edi),%eax - movl %eax,28(%edi) - xorl 8(%edi),%eax - movl %eax,32(%edi) - xorl 12(%edi),%eax - movl %eax,36(%edi) - cmpl $7,%ecx - je L05012break - incl %ecx - xorl 16(%edi),%eax - movl %eax,40(%edi) - xorl 20(%edi),%eax - movl %eax,44(%edi) - addl $24,%edi - jmp L04912loop -L05012break: - movl $12,72(%edi) - xorl %eax,%eax - jmp L045exit -L04414rounds: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,8(%edi) - movl %edx,12(%edi) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - movl %eax,16(%edi) - movl %ebx,20(%edi) - movl %ecx,24(%edi) - movl %edx,28(%edi) - xorl %ecx,%ecx - jmp L05114shortcut -.align 2,0x90 -L05214loop: - movl 28(%edi),%edx -L05114shortcut: - movl (%edi),%eax - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - xorl 896(%ebp,%ecx,4),%eax - movl %eax,32(%edi) - xorl 4(%edi),%eax - movl %eax,36(%edi) - xorl 8(%edi),%eax - movl %eax,40(%edi) - xorl 12(%edi),%eax - movl %eax,44(%edi) - cmpl $6,%ecx - je L05314break - incl %ecx - movl %eax,%edx - movl 16(%edi),%eax - movzbl %dl,%esi - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shrl $16,%edx - shll $8,%ebx - movzbl %dl,%esi - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - movzbl %dh,%esi - shll $16,%ebx - xorl %ebx,%eax - movzbl -128(%ebp,%esi,1),%ebx - shll $24,%ebx - xorl %ebx,%eax - movl %eax,48(%edi) - xorl 20(%edi),%eax - movl %eax,52(%edi) - xorl 24(%edi),%eax - movl %eax,56(%edi) - xorl 28(%edi),%eax - movl %eax,60(%edi) - addl $32,%edi - jmp L05214loop -L05314break: - movl $14,48(%edi) - xorl %eax,%eax - jmp L045exit -L040badpointer: - movl $-1,%eax -L045exit: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_nohw_set_encrypt_key -.private_extern _aes_nohw_set_encrypt_key -.align 4 -_aes_nohw_set_encrypt_key: -L_aes_nohw_set_encrypt_key_begin: - call __x86_AES_set_encrypt_key - ret -.globl _aes_nohw_set_decrypt_key -.private_extern _aes_nohw_set_decrypt_key -.align 4 -_aes_nohw_set_decrypt_key: -L_aes_nohw_set_decrypt_key_begin: - call __x86_AES_set_encrypt_key - cmpl $0,%eax - je L054proceed - ret -L054proceed: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 28(%esp),%esi - movl 240(%esi),%ecx - leal (,%ecx,4),%ecx - leal (%esi,%ecx,4),%edi -.align 2,0x90 -L055invert: - movl (%esi),%eax - movl 4(%esi),%ebx - movl (%edi),%ecx - movl 4(%edi),%edx - movl %eax,(%edi) - movl %ebx,4(%edi) - movl %ecx,(%esi) - movl %edx,4(%esi) - movl 8(%esi),%eax - movl 12(%esi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl %eax,8(%edi) - movl %ebx,12(%edi) - movl %ecx,8(%esi) - movl %edx,12(%esi) - addl $16,%esi - subl $16,%edi - cmpl %edi,%esi - jne L055invert - movl 28(%esp),%edi - movl 240(%edi),%esi - leal -2(%esi,%esi,1),%esi - leal (%edi,%esi,8),%esi - movl %esi,28(%esp) - movl 16(%edi),%eax -.align 2,0x90 -L056permute: - addl $16,%edi - movl $2155905152,%ebp - andl %eax,%ebp - leal (%eax,%eax,1),%ebx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %esi,%ebx - movl $2155905152,%ebp - andl %ebx,%ebp - leal (%ebx,%ebx,1),%ecx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %eax,%ebx - xorl %esi,%ecx - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edx - movl %ebp,%esi - shrl $7,%ebp - xorl %eax,%ecx - subl %ebp,%esi - andl $4278124286,%edx - andl $454761243,%esi - roll $8,%eax - xorl %esi,%edx - movl 4(%edi),%ebp - xorl %ebx,%eax - xorl %edx,%ebx - xorl %ecx,%eax - roll $24,%ebx - xorl %edx,%ecx - xorl %edx,%eax - roll $16,%ecx - xorl %ebx,%eax - roll $8,%edx - xorl %ecx,%eax - movl %ebp,%ebx - xorl %edx,%eax - movl %eax,(%edi) - movl $2155905152,%ebp - andl %ebx,%ebp - leal (%ebx,%ebx,1),%ecx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ecx - andl $454761243,%esi - xorl %esi,%ecx - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %ebx,%ecx - xorl %esi,%edx - movl $2155905152,%ebp - andl %edx,%ebp - leal (%edx,%edx,1),%eax - movl %ebp,%esi - shrl $7,%ebp - xorl %ebx,%edx - subl %ebp,%esi - andl $4278124286,%eax - andl $454761243,%esi - roll $8,%ebx - xorl %esi,%eax - movl 8(%edi),%ebp - xorl %ecx,%ebx - xorl %eax,%ecx - xorl %edx,%ebx - roll $24,%ecx - xorl %eax,%edx - xorl %eax,%ebx - roll $16,%edx - xorl %ecx,%ebx - roll $8,%eax - xorl %edx,%ebx - movl %ebp,%ecx - xorl %eax,%ebx - movl %ebx,4(%edi) - movl $2155905152,%ebp - andl %ecx,%ebp - leal (%ecx,%ecx,1),%edx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%edx - andl $454761243,%esi - xorl %esi,%edx - movl $2155905152,%ebp - andl %edx,%ebp - leal (%edx,%edx,1),%eax - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%eax - andl $454761243,%esi - xorl %ecx,%edx - xorl %esi,%eax - movl $2155905152,%ebp - andl %eax,%ebp - leal (%eax,%eax,1),%ebx - movl %ebp,%esi - shrl $7,%ebp - xorl %ecx,%eax - subl %ebp,%esi - andl $4278124286,%ebx - andl $454761243,%esi - roll $8,%ecx - xorl %esi,%ebx - movl 12(%edi),%ebp - xorl %edx,%ecx - xorl %ebx,%edx - xorl %eax,%ecx - roll $24,%edx - xorl %ebx,%eax - xorl %ebx,%ecx - roll $16,%eax - xorl %edx,%ecx - roll $8,%ebx - xorl %eax,%ecx - movl %ebp,%edx - xorl %ebx,%ecx - movl %ecx,8(%edi) - movl $2155905152,%ebp - andl %edx,%ebp - leal (%edx,%edx,1),%eax - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%eax - andl $454761243,%esi - xorl %esi,%eax - movl $2155905152,%ebp - andl %eax,%ebp - leal (%eax,%eax,1),%ebx - movl %ebp,%esi - shrl $7,%ebp - subl %ebp,%esi - andl $4278124286,%ebx - andl $454761243,%esi - xorl %edx,%eax - xorl %esi,%ebx - movl $2155905152,%ebp - andl %ebx,%ebp - leal (%ebx,%ebx,1),%ecx - movl %ebp,%esi - shrl $7,%ebp - xorl %edx,%ebx - subl %ebp,%esi - andl $4278124286,%ecx - andl $454761243,%esi - roll $8,%edx - xorl %esi,%ecx - movl 16(%edi),%ebp - xorl %eax,%edx - xorl %ecx,%eax - xorl %ebx,%edx - roll $24,%eax - xorl %ecx,%ebx - xorl %ecx,%edx - roll $16,%ebx - xorl %eax,%edx - roll $8,%ecx - xorl %ebx,%edx - movl %ebp,%eax - xorl %ecx,%edx - movl %edx,12(%edi) - cmpl 28(%esp),%edi - jb L056permute - xorl %eax,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.byte 65,69,83,32,102,111,114,32,120,56,54,44,32,67,82,89 -.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114 -.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/aesni-x86.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/aesni-x86.S deleted file mode 100644 index db7efffdf8..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/aesni-x86.S +++ /dev/null @@ -1,2476 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -#ifdef BORINGSSL_DISPATCH_TEST -#endif -.globl _aes_hw_encrypt -.private_extern _aes_hw_encrypt -.align 4 -_aes_hw_encrypt: -L_aes_hw_encrypt_begin: -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call L000pic -L000pic: - popl %ebx - leal _BORINGSSL_function_hit+1-L000pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 4(%esp),%eax - movl 12(%esp),%edx - movups (%eax),%xmm2 - movl 240(%edx),%ecx - movl 8(%esp),%eax - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L001enc1_loop_1: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L001enc1_loop_1 -.byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%eax) - pxor %xmm2,%xmm2 - ret -.globl _aes_hw_decrypt -.private_extern _aes_hw_decrypt -.align 4 -_aes_hw_decrypt: -L_aes_hw_decrypt_begin: - movl 4(%esp),%eax - movl 12(%esp),%edx - movups (%eax),%xmm2 - movl 240(%edx),%ecx - movl 8(%esp),%eax - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L002dec1_loop_2: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L002dec1_loop_2 -.byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%eax) - pxor %xmm2,%xmm2 - ret -.private_extern __aesni_encrypt2 -.align 4 -__aesni_encrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L003enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L003enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - ret -.private_extern __aesni_decrypt2 -.align 4 -__aesni_decrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L004dec2_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L004dec2_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 - ret -.private_extern __aesni_encrypt3 -.align 4 -__aesni_encrypt3: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L005enc3_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz L005enc3_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 - ret -.private_extern __aesni_decrypt3 -.align 4 -__aesni_decrypt3: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L006dec3_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz L006dec3_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 - ret -.private_extern __aesni_encrypt4 -.align 4 -__aesni_encrypt4: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - shll $4,%ecx - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -L007enc4_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz L007enc4_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 - ret -.private_extern __aesni_decrypt4 -.align 4 -__aesni_decrypt4: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - shll $4,%ecx - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -L008dec4_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz L008dec4_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 - ret -.private_extern __aesni_encrypt6 -.align 4 -__aesni_encrypt6: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,220,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,220,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 102,15,56,220,225 - pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp L009_aesni_encrypt6_inner -.align 4,0x90 -L010enc6_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -L009_aesni_encrypt6_inner: -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -L_aesni_encrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz L010enc6_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 -.byte 102,15,56,221,240 -.byte 102,15,56,221,248 - ret -.private_extern __aesni_decrypt6 -.align 4 -__aesni_decrypt6: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,222,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,222,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 102,15,56,222,225 - pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp L011_aesni_decrypt6_inner -.align 4,0x90 -L012dec6_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -L011_aesni_decrypt6_inner: -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -L_aesni_decrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz L012dec6_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 -.byte 102,15,56,223,240 -.byte 102,15,56,223,248 - ret -.globl _aes_hw_ecb_encrypt -.private_extern _aes_hw_ecb_encrypt -.align 4 -_aes_hw_ecb_encrypt: -L_aes_hw_ecb_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - andl $-16,%eax - jz L013ecb_ret - movl 240(%edx),%ecx - testl %ebx,%ebx - jz L014ecb_decrypt - movl %edx,%ebp - movl %ecx,%ebx - cmpl $96,%eax - jb L015ecb_enc_tail - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - movdqu 48(%esi),%xmm5 - movdqu 64(%esi),%xmm6 - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi - subl $96,%eax - jmp L016ecb_enc_loop6_enter -.align 4,0x90 -L017ecb_enc_loop6: - movups %xmm2,(%edi) - movdqu (%esi),%xmm2 - movups %xmm3,16(%edi) - movdqu 16(%esi),%xmm3 - movups %xmm4,32(%edi) - movdqu 32(%esi),%xmm4 - movups %xmm5,48(%edi) - movdqu 48(%esi),%xmm5 - movups %xmm6,64(%edi) - movdqu 64(%esi),%xmm6 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi -L016ecb_enc_loop6_enter: - call __aesni_encrypt6 - movl %ebp,%edx - movl %ebx,%ecx - subl $96,%eax - jnc L017ecb_enc_loop6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - movups %xmm7,80(%edi) - leal 96(%edi),%edi - addl $96,%eax - jz L013ecb_ret -L015ecb_enc_tail: - movups (%esi),%xmm2 - cmpl $32,%eax - jb L018ecb_enc_one - movups 16(%esi),%xmm3 - je L019ecb_enc_two - movups 32(%esi),%xmm4 - cmpl $64,%eax - jb L020ecb_enc_three - movups 48(%esi),%xmm5 - je L021ecb_enc_four - movups 64(%esi),%xmm6 - xorps %xmm7,%xmm7 - call __aesni_encrypt6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - jmp L013ecb_ret -.align 4,0x90 -L018ecb_enc_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L022enc1_loop_3: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L022enc1_loop_3 -.byte 102,15,56,221,209 - movups %xmm2,(%edi) - jmp L013ecb_ret -.align 4,0x90 -L019ecb_enc_two: - call __aesni_encrypt2 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - jmp L013ecb_ret -.align 4,0x90 -L020ecb_enc_three: - call __aesni_encrypt3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - jmp L013ecb_ret -.align 4,0x90 -L021ecb_enc_four: - call __aesni_encrypt4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - jmp L013ecb_ret -.align 4,0x90 -L014ecb_decrypt: - movl %edx,%ebp - movl %ecx,%ebx - cmpl $96,%eax - jb L023ecb_dec_tail - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - movdqu 48(%esi),%xmm5 - movdqu 64(%esi),%xmm6 - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi - subl $96,%eax - jmp L024ecb_dec_loop6_enter -.align 4,0x90 -L025ecb_dec_loop6: - movups %xmm2,(%edi) - movdqu (%esi),%xmm2 - movups %xmm3,16(%edi) - movdqu 16(%esi),%xmm3 - movups %xmm4,32(%edi) - movdqu 32(%esi),%xmm4 - movups %xmm5,48(%edi) - movdqu 48(%esi),%xmm5 - movups %xmm6,64(%edi) - movdqu 64(%esi),%xmm6 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqu 80(%esi),%xmm7 - leal 96(%esi),%esi -L024ecb_dec_loop6_enter: - call __aesni_decrypt6 - movl %ebp,%edx - movl %ebx,%ecx - subl $96,%eax - jnc L025ecb_dec_loop6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - movups %xmm7,80(%edi) - leal 96(%edi),%edi - addl $96,%eax - jz L013ecb_ret -L023ecb_dec_tail: - movups (%esi),%xmm2 - cmpl $32,%eax - jb L026ecb_dec_one - movups 16(%esi),%xmm3 - je L027ecb_dec_two - movups 32(%esi),%xmm4 - cmpl $64,%eax - jb L028ecb_dec_three - movups 48(%esi),%xmm5 - je L029ecb_dec_four - movups 64(%esi),%xmm6 - xorps %xmm7,%xmm7 - call __aesni_decrypt6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - jmp L013ecb_ret -.align 4,0x90 -L026ecb_dec_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L030dec1_loop_4: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L030dec1_loop_4 -.byte 102,15,56,223,209 - movups %xmm2,(%edi) - jmp L013ecb_ret -.align 4,0x90 -L027ecb_dec_two: - call __aesni_decrypt2 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - jmp L013ecb_ret -.align 4,0x90 -L028ecb_dec_three: - call __aesni_decrypt3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - jmp L013ecb_ret -.align 4,0x90 -L029ecb_dec_four: - call __aesni_decrypt4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) -L013ecb_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_hw_ccm64_encrypt_blocks -.private_extern _aes_hw_ccm64_encrypt_blocks -.align 4 -_aes_hw_ccm64_encrypt_blocks: -L_aes_hw_ccm64_encrypt_blocks_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl 40(%esp),%ecx - movl %esp,%ebp - subl $60,%esp - andl $-16,%esp - movl %ebp,48(%esp) - movdqu (%ebx),%xmm7 - movdqu (%ecx),%xmm3 - movl 240(%edx),%ecx - movl $202182159,(%esp) - movl $134810123,4(%esp) - movl $67438087,8(%esp) - movl $66051,12(%esp) - movl $1,%ebx - xorl %ebp,%ebp - movl %ebx,16(%esp) - movl %ebp,20(%esp) - movl %ebp,24(%esp) - movl %ebp,28(%esp) - shll $4,%ecx - movl $16,%ebx - leal (%edx),%ebp - movdqa (%esp),%xmm5 - movdqa %xmm7,%xmm2 - leal 32(%edx,%ecx,1),%edx - subl %ecx,%ebx -.byte 102,15,56,0,253 -L031ccm64_enc_outer: - movups (%ebp),%xmm0 - movl %ebx,%ecx - movups (%esi),%xmm6 - xorps %xmm0,%xmm2 - movups 16(%ebp),%xmm1 - xorps %xmm6,%xmm0 - xorps %xmm0,%xmm3 - movups 32(%ebp),%xmm0 -L032ccm64_enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L032ccm64_enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - paddq 16(%esp),%xmm7 - decl %eax -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - leal 16(%esi),%esi - xorps %xmm2,%xmm6 - movdqa %xmm7,%xmm2 - movups %xmm6,(%edi) -.byte 102,15,56,0,213 - leal 16(%edi),%edi - jnz L031ccm64_enc_outer - movl 48(%esp),%esp - movl 40(%esp),%edi - movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_hw_ccm64_decrypt_blocks -.private_extern _aes_hw_ccm64_decrypt_blocks -.align 4 -_aes_hw_ccm64_decrypt_blocks: -L_aes_hw_ccm64_decrypt_blocks_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl 40(%esp),%ecx - movl %esp,%ebp - subl $60,%esp - andl $-16,%esp - movl %ebp,48(%esp) - movdqu (%ebx),%xmm7 - movdqu (%ecx),%xmm3 - movl 240(%edx),%ecx - movl $202182159,(%esp) - movl $134810123,4(%esp) - movl $67438087,8(%esp) - movl $66051,12(%esp) - movl $1,%ebx - xorl %ebp,%ebp - movl %ebx,16(%esp) - movl %ebp,20(%esp) - movl %ebp,24(%esp) - movl %ebp,28(%esp) - movdqa (%esp),%xmm5 - movdqa %xmm7,%xmm2 - movl %edx,%ebp - movl %ecx,%ebx -.byte 102,15,56,0,253 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L033enc1_loop_5: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L033enc1_loop_5 -.byte 102,15,56,221,209 - shll $4,%ebx - movl $16,%ecx - movups (%esi),%xmm6 - paddq 16(%esp),%xmm7 - leal 16(%esi),%esi - subl %ebx,%ecx - leal 32(%ebp,%ebx,1),%edx - movl %ecx,%ebx - jmp L034ccm64_dec_outer -.align 4,0x90 -L034ccm64_dec_outer: - xorps %xmm2,%xmm6 - movdqa %xmm7,%xmm2 - movups %xmm6,(%edi) - leal 16(%edi),%edi -.byte 102,15,56,0,213 - subl $1,%eax - jz L035ccm64_dec_break - movups (%ebp),%xmm0 - movl %ebx,%ecx - movups 16(%ebp),%xmm1 - xorps %xmm0,%xmm6 - xorps %xmm0,%xmm2 - xorps %xmm6,%xmm3 - movups 32(%ebp),%xmm0 -L036ccm64_dec2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L036ccm64_dec2_loop - movups (%esi),%xmm6 - paddq 16(%esp),%xmm7 -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - leal 16(%esi),%esi - jmp L034ccm64_dec_outer -.align 4,0x90 -L035ccm64_dec_break: - movl 240(%ebp),%ecx - movl %ebp,%edx - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm6 - leal 32(%edx),%edx - xorps %xmm6,%xmm3 -L037enc1_loop_6: -.byte 102,15,56,220,217 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L037enc1_loop_6 -.byte 102,15,56,221,217 - movl 48(%esp),%esp - movl 40(%esp),%edi - movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_hw_ctr32_encrypt_blocks -.private_extern _aes_hw_ctr32_encrypt_blocks -.align 4 -_aes_hw_ctr32_encrypt_blocks: -L_aes_hw_ctr32_encrypt_blocks_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call L038pic -L038pic: - popl %ebx - leal _BORINGSSL_function_hit+0-L038pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl 36(%esp),%ebx - movl %esp,%ebp - subl $88,%esp - andl $-16,%esp - movl %ebp,80(%esp) - cmpl $1,%eax - je L039ctr32_one_shortcut - movdqu (%ebx),%xmm7 - movl $202182159,(%esp) - movl $134810123,4(%esp) - movl $67438087,8(%esp) - movl $66051,12(%esp) - movl $6,%ecx - xorl %ebp,%ebp - movl %ecx,16(%esp) - movl %ecx,20(%esp) - movl %ecx,24(%esp) - movl %ebp,28(%esp) -.byte 102,15,58,22,251,3 -.byte 102,15,58,34,253,3 - movl 240(%edx),%ecx - bswap %ebx - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movdqa (%esp),%xmm2 -.byte 102,15,58,34,195,0 - leal 3(%ebx),%ebp -.byte 102,15,58,34,205,0 - incl %ebx -.byte 102,15,58,34,195,1 - incl %ebp -.byte 102,15,58,34,205,1 - incl %ebx -.byte 102,15,58,34,195,2 - incl %ebp -.byte 102,15,58,34,205,2 - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - movdqu (%edx),%xmm6 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 - pshufd $192,%xmm0,%xmm2 - pshufd $128,%xmm0,%xmm3 - cmpl $6,%eax - jb L040ctr32_tail - pxor %xmm6,%xmm7 - shll $4,%ecx - movl $16,%ebx - movdqa %xmm7,32(%esp) - movl %edx,%ebp - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - subl $6,%eax - jmp L041ctr32_loop6 -.align 4,0x90 -L041ctr32_loop6: - pshufd $64,%xmm0,%xmm4 - movdqa 32(%esp),%xmm0 - pshufd $192,%xmm1,%xmm5 - pxor %xmm0,%xmm2 - pshufd $128,%xmm1,%xmm6 - pxor %xmm0,%xmm3 - pshufd $64,%xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 -.byte 102,15,56,220,209 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 -.byte 102,15,56,220,217 - movups 32(%ebp),%xmm0 - movl %ebx,%ecx -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 - call L_aesni_encrypt6_enter - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps %xmm1,%xmm2 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm3 - movups %xmm2,(%edi) - movdqa 16(%esp),%xmm0 - xorps %xmm1,%xmm4 - movdqa 64(%esp),%xmm1 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - paddd %xmm0,%xmm1 - paddd 48(%esp),%xmm0 - movdqa (%esp),%xmm2 - movups 48(%esi),%xmm3 - movups 64(%esi),%xmm4 - xorps %xmm3,%xmm5 - movups 80(%esi),%xmm3 - leal 96(%esi),%esi - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - xorps %xmm4,%xmm6 - movups %xmm5,48(%edi) - xorps %xmm3,%xmm7 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 - movups %xmm6,64(%edi) - pshufd $192,%xmm0,%xmm2 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - pshufd $128,%xmm0,%xmm3 - subl $6,%eax - jnc L041ctr32_loop6 - addl $6,%eax - jz L042ctr32_ret - movdqu (%ebp),%xmm7 - movl %ebp,%edx - pxor 32(%esp),%xmm7 - movl 240(%ebp),%ecx -L040ctr32_tail: - por %xmm7,%xmm2 - cmpl $2,%eax - jb L043ctr32_one - pshufd $64,%xmm0,%xmm4 - por %xmm7,%xmm3 - je L044ctr32_two - pshufd $192,%xmm1,%xmm5 - por %xmm7,%xmm4 - cmpl $4,%eax - jb L045ctr32_three - pshufd $128,%xmm1,%xmm6 - por %xmm7,%xmm5 - je L046ctr32_four - por %xmm7,%xmm6 - call __aesni_encrypt6 - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps %xmm1,%xmm2 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm3 - movups 48(%esi),%xmm0 - xorps %xmm1,%xmm4 - movups 64(%esi),%xmm1 - xorps %xmm0,%xmm5 - movups %xmm2,(%edi) - xorps %xmm1,%xmm6 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - jmp L042ctr32_ret -.align 4,0x90 -L039ctr32_one_shortcut: - movups (%ebx),%xmm2 - movl 240(%edx),%ecx -L043ctr32_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L047enc1_loop_7: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L047enc1_loop_7 -.byte 102,15,56,221,209 - movups (%esi),%xmm6 - xorps %xmm2,%xmm6 - movups %xmm6,(%edi) - jmp L042ctr32_ret -.align 4,0x90 -L044ctr32_two: - call __aesni_encrypt2 - movups (%esi),%xmm5 - movups 16(%esi),%xmm6 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - jmp L042ctr32_ret -.align 4,0x90 -L045ctr32_three: - call __aesni_encrypt3 - movups (%esi),%xmm5 - movups 16(%esi),%xmm6 - xorps %xmm5,%xmm2 - movups 32(%esi),%xmm7 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - xorps %xmm7,%xmm4 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - jmp L042ctr32_ret -.align 4,0x90 -L046ctr32_four: - call __aesni_encrypt4 - movups (%esi),%xmm6 - movups 16(%esi),%xmm7 - movups 32(%esi),%xmm1 - xorps %xmm6,%xmm2 - movups 48(%esi),%xmm0 - xorps %xmm7,%xmm3 - movups %xmm2,(%edi) - xorps %xmm1,%xmm4 - movups %xmm3,16(%edi) - xorps %xmm0,%xmm5 - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) -L042ctr32_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movl 80(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_hw_xts_encrypt -.private_extern _aes_hw_xts_encrypt -.align 4 -_aes_hw_xts_encrypt: -L_aes_hw_xts_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 36(%esp),%edx - movl 40(%esp),%esi - movl 240(%edx),%ecx - movups (%esi),%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L048enc1_loop_8: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L048enc1_loop_8 -.byte 102,15,56,221,209 - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl %esp,%ebp - subl $120,%esp - movl 240(%edx),%ecx - andl $-16,%esp - movl $135,96(%esp) - movl $0,100(%esp) - movl $1,104(%esp) - movl $0,108(%esp) - movl %eax,112(%esp) - movl %ebp,116(%esp) - movdqa %xmm2,%xmm1 - pxor %xmm0,%xmm0 - movdqa 96(%esp),%xmm3 - pcmpgtd %xmm1,%xmm0 - andl $-16,%eax - movl %edx,%ebp - movl %ecx,%ebx - subl $96,%eax - jc L049xts_enc_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp L050xts_enc_loop6 -.align 4,0x90 -L050xts_enc_loop6: - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,16(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,32(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,64(%esp) - paddq %xmm1,%xmm1 - movups (%ebp),%xmm0 - pand %xmm3,%xmm7 - movups (%esi),%xmm2 - pxor %xmm1,%xmm7 - movl %ebx,%ecx - movdqu 16(%esi),%xmm3 - xorps %xmm0,%xmm2 - movdqu 32(%esi),%xmm4 - pxor %xmm0,%xmm3 - movdqu 48(%esi),%xmm5 - pxor %xmm0,%xmm4 - movdqu 64(%esi),%xmm6 - pxor %xmm0,%xmm5 - movdqu 80(%esi),%xmm1 - pxor %xmm0,%xmm6 - leal 96(%esi),%esi - pxor (%esp),%xmm2 - movdqa %xmm7,80(%esp) - pxor %xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 -.byte 102,15,56,220,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 -.byte 102,15,56,220,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 - call L_aesni_encrypt6_enter - movdqa 80(%esp),%xmm1 - pxor %xmm0,%xmm0 - xorps (%esp),%xmm2 - pcmpgtd %xmm1,%xmm0 - xorps 16(%esp),%xmm3 - movups %xmm2,(%edi) - xorps 32(%esp),%xmm4 - movups %xmm3,16(%edi) - xorps 48(%esp),%xmm5 - movups %xmm4,32(%edi) - xorps 64(%esp),%xmm6 - movups %xmm5,48(%edi) - xorps %xmm1,%xmm7 - movups %xmm6,64(%edi) - pshufd $19,%xmm0,%xmm2 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqa 96(%esp),%xmm3 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - subl $96,%eax - jnc L050xts_enc_loop6 - movl 240(%ebp),%ecx - movl %ebp,%edx - movl %ecx,%ebx -L049xts_enc_short: - addl $96,%eax - jz L051xts_enc_done6x - movdqa %xmm1,%xmm5 - cmpl $32,%eax - jb L052xts_enc_one - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - je L053xts_enc_two - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm6 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - cmpl $64,%eax - jb L054xts_enc_three - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm7 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - movdqa %xmm5,(%esp) - movdqa %xmm6,16(%esp) - je L055xts_enc_four - movdqa %xmm7,32(%esp) - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm7 - pxor %xmm1,%xmm7 - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - pxor (%esp),%xmm2 - movdqu 48(%esi),%xmm5 - pxor 16(%esp),%xmm3 - movdqu 64(%esi),%xmm6 - pxor 32(%esp),%xmm4 - leal 80(%esi),%esi - pxor 48(%esp),%xmm5 - movdqa %xmm7,64(%esp) - pxor %xmm7,%xmm6 - call __aesni_encrypt6 - movaps 64(%esp),%xmm1 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps 32(%esp),%xmm4 - movups %xmm2,(%edi) - xorps 48(%esp),%xmm5 - movups %xmm3,16(%edi) - xorps %xmm1,%xmm6 - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - leal 80(%edi),%edi - jmp L056xts_enc_done -.align 4,0x90 -L052xts_enc_one: - movups (%esi),%xmm2 - leal 16(%esi),%esi - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L057enc1_loop_9: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L057enc1_loop_9 -.byte 102,15,56,221,209 - xorps %xmm5,%xmm2 - movups %xmm2,(%edi) - leal 16(%edi),%edi - movdqa %xmm5,%xmm1 - jmp L056xts_enc_done -.align 4,0x90 -L053xts_enc_two: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - leal 32(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - call __aesni_encrypt2 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - leal 32(%edi),%edi - movdqa %xmm6,%xmm1 - jmp L056xts_enc_done -.align 4,0x90 -L054xts_enc_three: - movaps %xmm1,%xmm7 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - leal 48(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - call __aesni_encrypt3 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - leal 48(%edi),%edi - movdqa %xmm7,%xmm1 - jmp L056xts_enc_done -.align 4,0x90 -L055xts_enc_four: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - xorps (%esp),%xmm2 - movups 48(%esi),%xmm5 - leal 64(%esi),%esi - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - xorps %xmm6,%xmm5 - call __aesni_encrypt4 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - xorps %xmm6,%xmm5 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - leal 64(%edi),%edi - movdqa %xmm6,%xmm1 - jmp L056xts_enc_done -.align 4,0x90 -L051xts_enc_done6x: - movl 112(%esp),%eax - andl $15,%eax - jz L058xts_enc_ret - movdqa %xmm1,%xmm5 - movl %eax,112(%esp) - jmp L059xts_enc_steal -.align 4,0x90 -L056xts_enc_done: - movl 112(%esp),%eax - pxor %xmm0,%xmm0 - andl $15,%eax - jz L058xts_enc_ret - pcmpgtd %xmm1,%xmm0 - movl %eax,112(%esp) - pshufd $19,%xmm0,%xmm5 - paddq %xmm1,%xmm1 - pand 96(%esp),%xmm5 - pxor %xmm1,%xmm5 -L059xts_enc_steal: - movzbl (%esi),%ecx - movzbl -16(%edi),%edx - leal 1(%esi),%esi - movb %cl,-16(%edi) - movb %dl,(%edi) - leal 1(%edi),%edi - subl $1,%eax - jnz L059xts_enc_steal - subl 112(%esp),%edi - movl %ebp,%edx - movl %ebx,%ecx - movups -16(%edi),%xmm2 - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L060enc1_loop_10: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L060enc1_loop_10 -.byte 102,15,56,221,209 - xorps %xmm5,%xmm2 - movups %xmm2,-16(%edi) -L058xts_enc_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) - movl 116(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_hw_xts_decrypt -.private_extern _aes_hw_xts_decrypt -.align 4 -_aes_hw_xts_decrypt: -L_aes_hw_xts_decrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 36(%esp),%edx - movl 40(%esp),%esi - movl 240(%edx),%ecx - movups (%esi),%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L061enc1_loop_11: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L061enc1_loop_11 -.byte 102,15,56,221,209 - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - movl %esp,%ebp - subl $120,%esp - andl $-16,%esp - xorl %ebx,%ebx - testl $15,%eax - setnz %bl - shll $4,%ebx - subl %ebx,%eax - movl $135,96(%esp) - movl $0,100(%esp) - movl $1,104(%esp) - movl $0,108(%esp) - movl %eax,112(%esp) - movl %ebp,116(%esp) - movl 240(%edx),%ecx - movl %edx,%ebp - movl %ecx,%ebx - movdqa %xmm2,%xmm1 - pxor %xmm0,%xmm0 - movdqa 96(%esp),%xmm3 - pcmpgtd %xmm1,%xmm0 - andl $-16,%eax - subl $96,%eax - jc L062xts_dec_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp L063xts_dec_loop6 -.align 4,0x90 -L063xts_dec_loop6: - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,16(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,32(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,64(%esp) - paddq %xmm1,%xmm1 - movups (%ebp),%xmm0 - pand %xmm3,%xmm7 - movups (%esi),%xmm2 - pxor %xmm1,%xmm7 - movl %ebx,%ecx - movdqu 16(%esi),%xmm3 - xorps %xmm0,%xmm2 - movdqu 32(%esi),%xmm4 - pxor %xmm0,%xmm3 - movdqu 48(%esi),%xmm5 - pxor %xmm0,%xmm4 - movdqu 64(%esi),%xmm6 - pxor %xmm0,%xmm5 - movdqu 80(%esi),%xmm1 - pxor %xmm0,%xmm6 - leal 96(%esi),%esi - pxor (%esp),%xmm2 - movdqa %xmm7,80(%esp) - pxor %xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 -.byte 102,15,56,222,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 -.byte 102,15,56,222,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 - call L_aesni_decrypt6_enter - movdqa 80(%esp),%xmm1 - pxor %xmm0,%xmm0 - xorps (%esp),%xmm2 - pcmpgtd %xmm1,%xmm0 - xorps 16(%esp),%xmm3 - movups %xmm2,(%edi) - xorps 32(%esp),%xmm4 - movups %xmm3,16(%edi) - xorps 48(%esp),%xmm5 - movups %xmm4,32(%edi) - xorps 64(%esp),%xmm6 - movups %xmm5,48(%edi) - xorps %xmm1,%xmm7 - movups %xmm6,64(%edi) - pshufd $19,%xmm0,%xmm2 - movups %xmm7,80(%edi) - leal 96(%edi),%edi - movdqa 96(%esp),%xmm3 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - subl $96,%eax - jnc L063xts_dec_loop6 - movl 240(%ebp),%ecx - movl %ebp,%edx - movl %ecx,%ebx -L062xts_dec_short: - addl $96,%eax - jz L064xts_dec_done6x - movdqa %xmm1,%xmm5 - cmpl $32,%eax - jb L065xts_dec_one - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - je L066xts_dec_two - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm6 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - cmpl $64,%eax - jb L067xts_dec_three - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa %xmm1,%xmm7 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 - movdqa %xmm5,(%esp) - movdqa %xmm6,16(%esp) - je L068xts_dec_four - movdqa %xmm7,32(%esp) - pshufd $19,%xmm0,%xmm7 - movdqa %xmm1,48(%esp) - paddq %xmm1,%xmm1 - pand %xmm3,%xmm7 - pxor %xmm1,%xmm7 - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - pxor (%esp),%xmm2 - movdqu 48(%esi),%xmm5 - pxor 16(%esp),%xmm3 - movdqu 64(%esi),%xmm6 - pxor 32(%esp),%xmm4 - leal 80(%esi),%esi - pxor 48(%esp),%xmm5 - movdqa %xmm7,64(%esp) - pxor %xmm7,%xmm6 - call __aesni_decrypt6 - movaps 64(%esp),%xmm1 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps 32(%esp),%xmm4 - movups %xmm2,(%edi) - xorps 48(%esp),%xmm5 - movups %xmm3,16(%edi) - xorps %xmm1,%xmm6 - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - movups %xmm6,64(%edi) - leal 80(%edi),%edi - jmp L069xts_dec_done -.align 4,0x90 -L065xts_dec_one: - movups (%esi),%xmm2 - leal 16(%esi),%esi - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L070dec1_loop_12: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L070dec1_loop_12 -.byte 102,15,56,223,209 - xorps %xmm5,%xmm2 - movups %xmm2,(%edi) - leal 16(%edi),%edi - movdqa %xmm5,%xmm1 - jmp L069xts_dec_done -.align 4,0x90 -L066xts_dec_two: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - leal 32(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - call __aesni_decrypt2 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - leal 32(%edi),%edi - movdqa %xmm6,%xmm1 - jmp L069xts_dec_done -.align 4,0x90 -L067xts_dec_three: - movaps %xmm1,%xmm7 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - leal 48(%esi),%esi - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - call __aesni_decrypt3 - xorps %xmm5,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - leal 48(%edi),%edi - movdqa %xmm7,%xmm1 - jmp L069xts_dec_done -.align 4,0x90 -L068xts_dec_four: - movaps %xmm1,%xmm6 - movups (%esi),%xmm2 - movups 16(%esi),%xmm3 - movups 32(%esi),%xmm4 - xorps (%esp),%xmm2 - movups 48(%esi),%xmm5 - leal 64(%esi),%esi - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - xorps %xmm6,%xmm5 - call __aesni_decrypt4 - xorps (%esp),%xmm2 - xorps 16(%esp),%xmm3 - xorps %xmm7,%xmm4 - movups %xmm2,(%edi) - xorps %xmm6,%xmm5 - movups %xmm3,16(%edi) - movups %xmm4,32(%edi) - movups %xmm5,48(%edi) - leal 64(%edi),%edi - movdqa %xmm6,%xmm1 - jmp L069xts_dec_done -.align 4,0x90 -L064xts_dec_done6x: - movl 112(%esp),%eax - andl $15,%eax - jz L071xts_dec_ret - movl %eax,112(%esp) - jmp L072xts_dec_only_one_more -.align 4,0x90 -L069xts_dec_done: - movl 112(%esp),%eax - pxor %xmm0,%xmm0 - andl $15,%eax - jz L071xts_dec_ret - pcmpgtd %xmm1,%xmm0 - movl %eax,112(%esp) - pshufd $19,%xmm0,%xmm2 - pxor %xmm0,%xmm0 - movdqa 96(%esp),%xmm3 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm2 - pcmpgtd %xmm1,%xmm0 - pxor %xmm2,%xmm1 -L072xts_dec_only_one_more: - pshufd $19,%xmm0,%xmm5 - movdqa %xmm1,%xmm6 - paddq %xmm1,%xmm1 - pand %xmm3,%xmm5 - pxor %xmm1,%xmm5 - movl %ebp,%edx - movl %ebx,%ecx - movups (%esi),%xmm2 - xorps %xmm5,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L073dec1_loop_13: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L073dec1_loop_13 -.byte 102,15,56,223,209 - xorps %xmm5,%xmm2 - movups %xmm2,(%edi) -L074xts_dec_steal: - movzbl 16(%esi),%ecx - movzbl (%edi),%edx - leal 1(%esi),%esi - movb %cl,(%edi) - movb %dl,16(%edi) - leal 1(%edi),%edi - subl $1,%eax - jnz L074xts_dec_steal - subl 112(%esp),%edi - movl %ebp,%edx - movl %ebx,%ecx - movups (%edi),%xmm2 - xorps %xmm6,%xmm2 - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L075dec1_loop_14: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L075dec1_loop_14 -.byte 102,15,56,223,209 - xorps %xmm6,%xmm2 - movups %xmm2,(%edi) -L071xts_dec_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) - movl 116(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _aes_hw_cbc_encrypt -.private_extern _aes_hw_cbc_encrypt -.align 4 -_aes_hw_cbc_encrypt: -L_aes_hw_cbc_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl %esp,%ebx - movl 24(%esp),%edi - subl $24,%ebx - movl 28(%esp),%eax - andl $-16,%ebx - movl 32(%esp),%edx - movl 36(%esp),%ebp - testl %eax,%eax - jz L076cbc_abort - cmpl $0,40(%esp) - xchgl %esp,%ebx - movups (%ebp),%xmm7 - movl 240(%edx),%ecx - movl %edx,%ebp - movl %ebx,16(%esp) - movl %ecx,%ebx - je L077cbc_decrypt - movaps %xmm7,%xmm2 - cmpl $16,%eax - jb L078cbc_enc_tail - subl $16,%eax - jmp L079cbc_enc_loop -.align 4,0x90 -L079cbc_enc_loop: - movups (%esi),%xmm7 - leal 16(%esi),%esi - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm7 - leal 32(%edx),%edx - xorps %xmm7,%xmm2 -L080enc1_loop_15: -.byte 102,15,56,220,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L080enc1_loop_15 -.byte 102,15,56,221,209 - movl %ebx,%ecx - movl %ebp,%edx - movups %xmm2,(%edi) - leal 16(%edi),%edi - subl $16,%eax - jnc L079cbc_enc_loop - addl $16,%eax - jnz L078cbc_enc_tail - movaps %xmm2,%xmm7 - pxor %xmm2,%xmm2 - jmp L081cbc_ret -L078cbc_enc_tail: - movl %eax,%ecx -.long 2767451785 - movl $16,%ecx - subl %eax,%ecx - xorl %eax,%eax -.long 2868115081 - leal -16(%edi),%edi - movl %ebx,%ecx - movl %edi,%esi - movl %ebp,%edx - jmp L079cbc_enc_loop -.align 4,0x90 -L077cbc_decrypt: - cmpl $80,%eax - jbe L082cbc_dec_tail - movaps %xmm7,(%esp) - subl $80,%eax - jmp L083cbc_dec_loop6_enter -.align 4,0x90 -L084cbc_dec_loop6: - movaps %xmm0,(%esp) - movups %xmm7,(%edi) - leal 16(%edi),%edi -L083cbc_dec_loop6_enter: - movdqu (%esi),%xmm2 - movdqu 16(%esi),%xmm3 - movdqu 32(%esi),%xmm4 - movdqu 48(%esi),%xmm5 - movdqu 64(%esi),%xmm6 - movdqu 80(%esi),%xmm7 - call __aesni_decrypt6 - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps (%esp),%xmm2 - xorps %xmm1,%xmm3 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm4 - movups 48(%esi),%xmm0 - xorps %xmm1,%xmm5 - movups 64(%esi),%xmm1 - xorps %xmm0,%xmm6 - movups 80(%esi),%xmm0 - xorps %xmm1,%xmm7 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - leal 96(%esi),%esi - movups %xmm4,32(%edi) - movl %ebx,%ecx - movups %xmm5,48(%edi) - movl %ebp,%edx - movups %xmm6,64(%edi) - leal 80(%edi),%edi - subl $96,%eax - ja L084cbc_dec_loop6 - movaps %xmm7,%xmm2 - movaps %xmm0,%xmm7 - addl $80,%eax - jle L085cbc_dec_clear_tail_collected - movups %xmm2,(%edi) - leal 16(%edi),%edi -L082cbc_dec_tail: - movups (%esi),%xmm2 - movaps %xmm2,%xmm6 - cmpl $16,%eax - jbe L086cbc_dec_one - movups 16(%esi),%xmm3 - movaps %xmm3,%xmm5 - cmpl $32,%eax - jbe L087cbc_dec_two - movups 32(%esi),%xmm4 - cmpl $48,%eax - jbe L088cbc_dec_three - movups 48(%esi),%xmm5 - cmpl $64,%eax - jbe L089cbc_dec_four - movups 64(%esi),%xmm6 - movaps %xmm7,(%esp) - movups (%esi),%xmm2 - xorps %xmm7,%xmm7 - call __aesni_decrypt6 - movups (%esi),%xmm1 - movups 16(%esi),%xmm0 - xorps (%esp),%xmm2 - xorps %xmm1,%xmm3 - movups 32(%esi),%xmm1 - xorps %xmm0,%xmm4 - movups 48(%esi),%xmm0 - xorps %xmm1,%xmm5 - movups 64(%esi),%xmm7 - xorps %xmm0,%xmm6 - movups %xmm2,(%edi) - movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%edi) - pxor %xmm5,%xmm5 - leal 64(%edi),%edi - movaps %xmm6,%xmm2 - pxor %xmm6,%xmm6 - subl $80,%eax - jmp L090cbc_dec_tail_collected -.align 4,0x90 -L086cbc_dec_one: - movups (%edx),%xmm0 - movups 16(%edx),%xmm1 - leal 32(%edx),%edx - xorps %xmm0,%xmm2 -L091dec1_loop_16: -.byte 102,15,56,222,209 - decl %ecx - movups (%edx),%xmm1 - leal 16(%edx),%edx - jnz L091dec1_loop_16 -.byte 102,15,56,223,209 - xorps %xmm7,%xmm2 - movaps %xmm6,%xmm7 - subl $16,%eax - jmp L090cbc_dec_tail_collected -.align 4,0x90 -L087cbc_dec_two: - call __aesni_decrypt2 - xorps %xmm7,%xmm2 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - movaps %xmm3,%xmm2 - pxor %xmm3,%xmm3 - leal 16(%edi),%edi - movaps %xmm5,%xmm7 - subl $32,%eax - jmp L090cbc_dec_tail_collected -.align 4,0x90 -L088cbc_dec_three: - call __aesni_decrypt3 - xorps %xmm7,%xmm2 - xorps %xmm6,%xmm3 - xorps %xmm5,%xmm4 - movups %xmm2,(%edi) - movaps %xmm4,%xmm2 - pxor %xmm4,%xmm4 - movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 - leal 32(%edi),%edi - movups 32(%esi),%xmm7 - subl $48,%eax - jmp L090cbc_dec_tail_collected -.align 4,0x90 -L089cbc_dec_four: - call __aesni_decrypt4 - movups 16(%esi),%xmm1 - movups 32(%esi),%xmm0 - xorps %xmm7,%xmm2 - movups 48(%esi),%xmm7 - xorps %xmm6,%xmm3 - movups %xmm2,(%edi) - xorps %xmm1,%xmm4 - movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 - xorps %xmm0,%xmm5 - movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 - leal 48(%edi),%edi - movaps %xmm5,%xmm2 - pxor %xmm5,%xmm5 - subl $64,%eax - jmp L090cbc_dec_tail_collected -.align 4,0x90 -L085cbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 -L090cbc_dec_tail_collected: - andl $15,%eax - jnz L092cbc_dec_tail_partial - movups %xmm2,(%edi) - pxor %xmm0,%xmm0 - jmp L081cbc_ret -.align 4,0x90 -L092cbc_dec_tail_partial: - movaps %xmm2,(%esp) - pxor %xmm0,%xmm0 - movl $16,%ecx - movl %esp,%esi - subl %eax,%ecx -.long 2767451785 - movdqa %xmm2,(%esp) -L081cbc_ret: - movl 16(%esp),%esp - movl 36(%esp),%ebp - pxor %xmm2,%xmm2 - pxor %xmm1,%xmm1 - movups %xmm7,(%ebp) - pxor %xmm7,%xmm7 -L076cbc_abort: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.private_extern __aesni_set_encrypt_key -.align 4 -__aesni_set_encrypt_key: - pushl %ebp - pushl %ebx - testl %eax,%eax - jz L093bad_pointer - testl %edx,%edx - jz L093bad_pointer - call L094pic -L094pic: - popl %ebx - leal Lkey_const-L094pic(%ebx),%ebx - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-Lkey_const(%ebx),%ebp - movups (%eax),%xmm0 - xorps %xmm4,%xmm4 - movl 4(%ebp),%ebp - leal 16(%edx),%edx - andl $268437504,%ebp - cmpl $256,%ecx - je L09514rounds - cmpl $192,%ecx - je L09612rounds - cmpl $128,%ecx - jne L097bad_keybits -.align 4,0x90 -L09810rounds: - cmpl $268435456,%ebp - je L09910rounds_alt - movl $9,%ecx - movups %xmm0,-16(%edx) -.byte 102,15,58,223,200,1 - call L100key_128_cold -.byte 102,15,58,223,200,2 - call L101key_128 -.byte 102,15,58,223,200,4 - call L101key_128 -.byte 102,15,58,223,200,8 - call L101key_128 -.byte 102,15,58,223,200,16 - call L101key_128 -.byte 102,15,58,223,200,32 - call L101key_128 -.byte 102,15,58,223,200,64 - call L101key_128 -.byte 102,15,58,223,200,128 - call L101key_128 -.byte 102,15,58,223,200,27 - call L101key_128 -.byte 102,15,58,223,200,54 - call L101key_128 - movups %xmm0,(%edx) - movl %ecx,80(%edx) - jmp L102good_key -.align 4,0x90 -L101key_128: - movups %xmm0,(%edx) - leal 16(%edx),%edx -L100key_128_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - ret -.align 4,0x90 -L09910rounds_alt: - movdqa (%ebx),%xmm5 - movl $8,%ecx - movdqa 32(%ebx),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,-16(%edx) -L103loop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leal 16(%edx),%edx - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%edx) - movdqa %xmm0,%xmm2 - decl %ecx - jnz L103loop_key128 - movdqa 48(%ebx),%xmm4 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%edx) - movl $9,%ecx - movl %ecx,96(%edx) - jmp L102good_key -.align 4,0x90 -L09612rounds: - movq 16(%eax),%xmm2 - cmpl $268435456,%ebp - je L10412rounds_alt - movl $11,%ecx - movups %xmm0,-16(%edx) -.byte 102,15,58,223,202,1 - call L105key_192a_cold -.byte 102,15,58,223,202,2 - call L106key_192b -.byte 102,15,58,223,202,4 - call L107key_192a -.byte 102,15,58,223,202,8 - call L106key_192b -.byte 102,15,58,223,202,16 - call L107key_192a -.byte 102,15,58,223,202,32 - call L106key_192b -.byte 102,15,58,223,202,64 - call L107key_192a -.byte 102,15,58,223,202,128 - call L106key_192b - movups %xmm0,(%edx) - movl %ecx,48(%edx) - jmp L102good_key -.align 4,0x90 -L107key_192a: - movups %xmm0,(%edx) - leal 16(%edx),%edx -.align 4,0x90 -L105key_192a_cold: - movaps %xmm2,%xmm5 -L108key_192b_warm: - shufps $16,%xmm0,%xmm4 - movdqa %xmm2,%xmm3 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - pslldq $4,%xmm3 - xorps %xmm4,%xmm0 - pshufd $85,%xmm1,%xmm1 - pxor %xmm3,%xmm2 - pxor %xmm1,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm3,%xmm2 - ret -.align 4,0x90 -L106key_192b: - movaps %xmm0,%xmm3 - shufps $68,%xmm0,%xmm5 - movups %xmm5,(%edx) - shufps $78,%xmm2,%xmm3 - movups %xmm3,16(%edx) - leal 32(%edx),%edx - jmp L108key_192b_warm -.align 4,0x90 -L10412rounds_alt: - movdqa 16(%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $8,%ecx - movdqu %xmm0,-16(%edx) -L109loop_key192: - movq %xmm2,(%edx) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leal 24(%edx),%edx - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%edx) - decl %ecx - jnz L109loop_key192 - movl $11,%ecx - movl %ecx,32(%edx) - jmp L102good_key -.align 4,0x90 -L09514rounds: - movups 16(%eax),%xmm2 - leal 16(%edx),%edx - cmpl $268435456,%ebp - je L11014rounds_alt - movl $13,%ecx - movups %xmm0,-32(%edx) - movups %xmm2,-16(%edx) -.byte 102,15,58,223,202,1 - call L111key_256a_cold -.byte 102,15,58,223,200,1 - call L112key_256b -.byte 102,15,58,223,202,2 - call L113key_256a -.byte 102,15,58,223,200,2 - call L112key_256b -.byte 102,15,58,223,202,4 - call L113key_256a -.byte 102,15,58,223,200,4 - call L112key_256b -.byte 102,15,58,223,202,8 - call L113key_256a -.byte 102,15,58,223,200,8 - call L112key_256b -.byte 102,15,58,223,202,16 - call L113key_256a -.byte 102,15,58,223,200,16 - call L112key_256b -.byte 102,15,58,223,202,32 - call L113key_256a -.byte 102,15,58,223,200,32 - call L112key_256b -.byte 102,15,58,223,202,64 - call L113key_256a - movups %xmm0,(%edx) - movl %ecx,16(%edx) - xorl %eax,%eax - jmp L102good_key -.align 4,0x90 -L113key_256a: - movups %xmm2,(%edx) - leal 16(%edx),%edx -L111key_256a_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - ret -.align 4,0x90 -L112key_256b: - movups %xmm0,(%edx) - leal 16(%edx),%edx - shufps $16,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $140,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $170,%xmm1,%xmm1 - xorps %xmm1,%xmm2 - ret -.align 4,0x90 -L11014rounds_alt: - movdqa (%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $7,%ecx - movdqu %xmm0,-32(%edx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,-16(%edx) -L114loop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - decl %ecx - jz L115done_key256 - pshufd $255,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%edx) - leal 32(%edx),%edx - movdqa %xmm2,%xmm1 - jmp L114loop_key256 -L115done_key256: - movl $13,%ecx - movl %ecx,16(%edx) -L102good_key: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - xorl %eax,%eax - popl %ebx - popl %ebp - ret -.align 2,0x90 -L093bad_pointer: - movl $-1,%eax - popl %ebx - popl %ebp - ret -.align 2,0x90 -L097bad_keybits: - pxor %xmm0,%xmm0 - movl $-2,%eax - popl %ebx - popl %ebp - ret -.globl _aes_hw_set_encrypt_key -.private_extern _aes_hw_set_encrypt_key -.align 4 -_aes_hw_set_encrypt_key: -L_aes_hw_set_encrypt_key_begin: -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call L116pic -L116pic: - popl %ebx - leal _BORINGSSL_function_hit+3-L116pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 4(%esp),%eax - movl 8(%esp),%ecx - movl 12(%esp),%edx - call __aesni_set_encrypt_key - ret -.globl _aes_hw_set_decrypt_key -.private_extern _aes_hw_set_decrypt_key -.align 4 -_aes_hw_set_decrypt_key: -L_aes_hw_set_decrypt_key_begin: - movl 4(%esp),%eax - movl 8(%esp),%ecx - movl 12(%esp),%edx - call __aesni_set_encrypt_key - movl 12(%esp),%edx - shll $4,%ecx - testl %eax,%eax - jnz L117dec_key_ret - leal 16(%edx,%ecx,1),%eax - movups (%edx),%xmm0 - movups (%eax),%xmm1 - movups %xmm0,(%eax) - movups %xmm1,(%edx) - leal 16(%edx),%edx - leal -16(%eax),%eax -L118dec_key_inverse: - movups (%edx),%xmm0 - movups (%eax),%xmm1 -.byte 102,15,56,219,192 -.byte 102,15,56,219,201 - leal 16(%edx),%edx - leal -16(%eax),%eax - movups %xmm0,16(%eax) - movups %xmm1,-16(%edx) - cmpl %edx,%eax - ja L118dec_key_inverse - movups (%edx),%xmm0 -.byte 102,15,56,219,192 - movups %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - xorl %eax,%eax -L117dec_key_ret: - ret -.align 6,0x90 -Lkey_const: -.long 202313229,202313229,202313229,202313229 -.long 67569157,67569157,67569157,67569157 -.long 1,1,1,1 -.long 27,27,27,27 -.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 -.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 -.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 -.byte 115,108,46,111,114,103,62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/bn-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/bn-586.S deleted file mode 100644 index 7d0462b51f..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/bn-586.S +++ /dev/null @@ -1,1533 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _bn_mul_add_words -.private_extern _bn_mul_add_words -.align 4 -_bn_mul_add_words: -L_bn_mul_add_words_begin: - call L000PIC_me_up -L000PIC_me_up: - popl %eax - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L000PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc L001maw_non_sse2 - movl 4(%esp),%eax - movl 8(%esp),%edx - movl 12(%esp),%ecx - movd 16(%esp),%mm0 - pxor %mm1,%mm1 - jmp L002maw_sse2_entry -.align 4,0x90 -L003maw_sse2_unrolled: - movd (%eax),%mm3 - paddq %mm3,%mm1 - movd (%edx),%mm2 - pmuludq %mm0,%mm2 - movd 4(%edx),%mm4 - pmuludq %mm0,%mm4 - movd 8(%edx),%mm6 - pmuludq %mm0,%mm6 - movd 12(%edx),%mm7 - pmuludq %mm0,%mm7 - paddq %mm2,%mm1 - movd 4(%eax),%mm3 - paddq %mm4,%mm3 - movd 8(%eax),%mm5 - paddq %mm6,%mm5 - movd 12(%eax),%mm4 - paddq %mm4,%mm7 - movd %mm1,(%eax) - movd 16(%edx),%mm2 - pmuludq %mm0,%mm2 - psrlq $32,%mm1 - movd 20(%edx),%mm4 - pmuludq %mm0,%mm4 - paddq %mm3,%mm1 - movd 24(%edx),%mm6 - pmuludq %mm0,%mm6 - movd %mm1,4(%eax) - psrlq $32,%mm1 - movd 28(%edx),%mm3 - addl $32,%edx - pmuludq %mm0,%mm3 - paddq %mm5,%mm1 - movd 16(%eax),%mm5 - paddq %mm5,%mm2 - movd %mm1,8(%eax) - psrlq $32,%mm1 - paddq %mm7,%mm1 - movd 20(%eax),%mm5 - paddq %mm5,%mm4 - movd %mm1,12(%eax) - psrlq $32,%mm1 - paddq %mm2,%mm1 - movd 24(%eax),%mm5 - paddq %mm5,%mm6 - movd %mm1,16(%eax) - psrlq $32,%mm1 - paddq %mm4,%mm1 - movd 28(%eax),%mm5 - paddq %mm5,%mm3 - movd %mm1,20(%eax) - psrlq $32,%mm1 - paddq %mm6,%mm1 - movd %mm1,24(%eax) - psrlq $32,%mm1 - paddq %mm3,%mm1 - movd %mm1,28(%eax) - leal 32(%eax),%eax - psrlq $32,%mm1 - subl $8,%ecx - jz L004maw_sse2_exit -L002maw_sse2_entry: - testl $4294967288,%ecx - jnz L003maw_sse2_unrolled -.align 2,0x90 -L005maw_sse2_loop: - movd (%edx),%mm2 - movd (%eax),%mm3 - pmuludq %mm0,%mm2 - leal 4(%edx),%edx - paddq %mm3,%mm1 - paddq %mm2,%mm1 - movd %mm1,(%eax) - subl $1,%ecx - psrlq $32,%mm1 - leal 4(%eax),%eax - jnz L005maw_sse2_loop -L004maw_sse2_exit: - movd %mm1,%eax - emms - ret -.align 4,0x90 -L001maw_non_sse2: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - xorl %esi,%esi - movl 20(%esp),%edi - movl 28(%esp),%ecx - movl 24(%esp),%ebx - andl $4294967288,%ecx - movl 32(%esp),%ebp - pushl %ecx - jz L006maw_finish -.align 4,0x90 -L007maw_loop: - # Round 0 - movl (%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl (%edi),%eax - adcl $0,%edx - movl %eax,(%edi) - movl %edx,%esi - # Round 4 - movl 4(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 4(%edi),%eax - adcl $0,%edx - movl %eax,4(%edi) - movl %edx,%esi - # Round 8 - movl 8(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 8(%edi),%eax - adcl $0,%edx - movl %eax,8(%edi) - movl %edx,%esi - # Round 12 - movl 12(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 12(%edi),%eax - adcl $0,%edx - movl %eax,12(%edi) - movl %edx,%esi - # Round 16 - movl 16(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 16(%edi),%eax - adcl $0,%edx - movl %eax,16(%edi) - movl %edx,%esi - # Round 20 - movl 20(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 20(%edi),%eax - adcl $0,%edx - movl %eax,20(%edi) - movl %edx,%esi - # Round 24 - movl 24(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 24(%edi),%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi - # Round 28 - movl 28(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 28(%edi),%eax - adcl $0,%edx - movl %eax,28(%edi) - movl %edx,%esi - - subl $8,%ecx - leal 32(%ebx),%ebx - leal 32(%edi),%edi - jnz L007maw_loop -L006maw_finish: - movl 32(%esp),%ecx - andl $7,%ecx - jnz L008maw_finish2 - jmp L009maw_end -L008maw_finish2: - # Tail Round 0 - movl (%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl (%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,(%edi) - movl %edx,%esi - jz L009maw_end - # Tail Round 1 - movl 4(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 4(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,4(%edi) - movl %edx,%esi - jz L009maw_end - # Tail Round 2 - movl 8(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 8(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,8(%edi) - movl %edx,%esi - jz L009maw_end - # Tail Round 3 - movl 12(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 12(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,12(%edi) - movl %edx,%esi - jz L009maw_end - # Tail Round 4 - movl 16(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 16(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,16(%edi) - movl %edx,%esi - jz L009maw_end - # Tail Round 5 - movl 20(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 20(%edi),%eax - adcl $0,%edx - decl %ecx - movl %eax,20(%edi) - movl %edx,%esi - jz L009maw_end - # Tail Round 6 - movl 24(%ebx),%eax - mull %ebp - addl %esi,%eax - adcl $0,%edx - addl 24(%edi),%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi -L009maw_end: - movl %esi,%eax - popl %ecx - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _bn_mul_words -.private_extern _bn_mul_words -.align 4 -_bn_mul_words: -L_bn_mul_words_begin: - call L010PIC_me_up -L010PIC_me_up: - popl %eax - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L010PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc L011mw_non_sse2 - movl 4(%esp),%eax - movl 8(%esp),%edx - movl 12(%esp),%ecx - movd 16(%esp),%mm0 - pxor %mm1,%mm1 -.align 4,0x90 -L012mw_sse2_loop: - movd (%edx),%mm2 - pmuludq %mm0,%mm2 - leal 4(%edx),%edx - paddq %mm2,%mm1 - movd %mm1,(%eax) - subl $1,%ecx - psrlq $32,%mm1 - leal 4(%eax),%eax - jnz L012mw_sse2_loop - movd %mm1,%eax - emms - ret -.align 4,0x90 -L011mw_non_sse2: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - xorl %esi,%esi - movl 20(%esp),%edi - movl 24(%esp),%ebx - movl 28(%esp),%ebp - movl 32(%esp),%ecx - andl $4294967288,%ebp - jz L013mw_finish -L014mw_loop: - # Round 0 - movl (%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,(%edi) - movl %edx,%esi - # Round 4 - movl 4(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,4(%edi) - movl %edx,%esi - # Round 8 - movl 8(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,8(%edi) - movl %edx,%esi - # Round 12 - movl 12(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,12(%edi) - movl %edx,%esi - # Round 16 - movl 16(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,16(%edi) - movl %edx,%esi - # Round 20 - movl 20(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,20(%edi) - movl %edx,%esi - # Round 24 - movl 24(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi - # Round 28 - movl 28(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,28(%edi) - movl %edx,%esi - - addl $32,%ebx - addl $32,%edi - subl $8,%ebp - jz L013mw_finish - jmp L014mw_loop -L013mw_finish: - movl 28(%esp),%ebp - andl $7,%ebp - jnz L015mw_finish2 - jmp L016mw_end -L015mw_finish2: - # Tail Round 0 - movl (%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,(%edi) - movl %edx,%esi - decl %ebp - jz L016mw_end - # Tail Round 1 - movl 4(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,4(%edi) - movl %edx,%esi - decl %ebp - jz L016mw_end - # Tail Round 2 - movl 8(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,8(%edi) - movl %edx,%esi - decl %ebp - jz L016mw_end - # Tail Round 3 - movl 12(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,12(%edi) - movl %edx,%esi - decl %ebp - jz L016mw_end - # Tail Round 4 - movl 16(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,16(%edi) - movl %edx,%esi - decl %ebp - jz L016mw_end - # Tail Round 5 - movl 20(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,20(%edi) - movl %edx,%esi - decl %ebp - jz L016mw_end - # Tail Round 6 - movl 24(%ebx),%eax - mull %ecx - addl %esi,%eax - adcl $0,%edx - movl %eax,24(%edi) - movl %edx,%esi -L016mw_end: - movl %esi,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _bn_sqr_words -.private_extern _bn_sqr_words -.align 4 -_bn_sqr_words: -L_bn_sqr_words_begin: - call L017PIC_me_up -L017PIC_me_up: - popl %eax - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L017PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc L018sqr_non_sse2 - movl 4(%esp),%eax - movl 8(%esp),%edx - movl 12(%esp),%ecx -.align 4,0x90 -L019sqr_sse2_loop: - movd (%edx),%mm0 - pmuludq %mm0,%mm0 - leal 4(%edx),%edx - movq %mm0,(%eax) - subl $1,%ecx - leal 8(%eax),%eax - jnz L019sqr_sse2_loop - emms - ret -.align 4,0x90 -L018sqr_non_sse2: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%ebx - andl $4294967288,%ebx - jz L020sw_finish -L021sw_loop: - # Round 0 - movl (%edi),%eax - mull %eax - movl %eax,(%esi) - movl %edx,4(%esi) - # Round 4 - movl 4(%edi),%eax - mull %eax - movl %eax,8(%esi) - movl %edx,12(%esi) - # Round 8 - movl 8(%edi),%eax - mull %eax - movl %eax,16(%esi) - movl %edx,20(%esi) - # Round 12 - movl 12(%edi),%eax - mull %eax - movl %eax,24(%esi) - movl %edx,28(%esi) - # Round 16 - movl 16(%edi),%eax - mull %eax - movl %eax,32(%esi) - movl %edx,36(%esi) - # Round 20 - movl 20(%edi),%eax - mull %eax - movl %eax,40(%esi) - movl %edx,44(%esi) - # Round 24 - movl 24(%edi),%eax - mull %eax - movl %eax,48(%esi) - movl %edx,52(%esi) - # Round 28 - movl 28(%edi),%eax - mull %eax - movl %eax,56(%esi) - movl %edx,60(%esi) - - addl $32,%edi - addl $64,%esi - subl $8,%ebx - jnz L021sw_loop -L020sw_finish: - movl 28(%esp),%ebx - andl $7,%ebx - jz L022sw_end - # Tail Round 0 - movl (%edi),%eax - mull %eax - movl %eax,(%esi) - decl %ebx - movl %edx,4(%esi) - jz L022sw_end - # Tail Round 1 - movl 4(%edi),%eax - mull %eax - movl %eax,8(%esi) - decl %ebx - movl %edx,12(%esi) - jz L022sw_end - # Tail Round 2 - movl 8(%edi),%eax - mull %eax - movl %eax,16(%esi) - decl %ebx - movl %edx,20(%esi) - jz L022sw_end - # Tail Round 3 - movl 12(%edi),%eax - mull %eax - movl %eax,24(%esi) - decl %ebx - movl %edx,28(%esi) - jz L022sw_end - # Tail Round 4 - movl 16(%edi),%eax - mull %eax - movl %eax,32(%esi) - decl %ebx - movl %edx,36(%esi) - jz L022sw_end - # Tail Round 5 - movl 20(%edi),%eax - mull %eax - movl %eax,40(%esi) - decl %ebx - movl %edx,44(%esi) - jz L022sw_end - # Tail Round 6 - movl 24(%edi),%eax - mull %eax - movl %eax,48(%esi) - movl %edx,52(%esi) -L022sw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _bn_div_words -.private_extern _bn_div_words -.align 4 -_bn_div_words: -L_bn_div_words_begin: - movl 4(%esp),%edx - movl 8(%esp),%eax - movl 12(%esp),%ecx - divl %ecx - ret -.globl _bn_add_words -.private_extern _bn_add_words -.align 4 -_bn_add_words: -L_bn_add_words_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%ebx - movl 24(%esp),%esi - movl 28(%esp),%edi - movl 32(%esp),%ebp - xorl %eax,%eax - andl $4294967288,%ebp - jz L023aw_finish -L024aw_loop: - # Round 0 - movl (%esi),%ecx - movl (%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - # Round 1 - movl 4(%esi),%ecx - movl 4(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - # Round 2 - movl 8(%esi),%ecx - movl 8(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - # Round 3 - movl 12(%esi),%ecx - movl 12(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - # Round 4 - movl 16(%esi),%ecx - movl 16(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - # Round 5 - movl 20(%esi),%ecx - movl 20(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - # Round 6 - movl 24(%esi),%ecx - movl 24(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - # Round 7 - movl 28(%esi),%ecx - movl 28(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%esi - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz L024aw_loop -L023aw_finish: - movl 32(%esp),%ebp - andl $7,%ebp - jz L025aw_end - # Tail Round 0 - movl (%esi),%ecx - movl (%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,(%ebx) - jz L025aw_end - # Tail Round 1 - movl 4(%esi),%ecx - movl 4(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,4(%ebx) - jz L025aw_end - # Tail Round 2 - movl 8(%esi),%ecx - movl 8(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,8(%ebx) - jz L025aw_end - # Tail Round 3 - movl 12(%esi),%ecx - movl 12(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,12(%ebx) - jz L025aw_end - # Tail Round 4 - movl 16(%esi),%ecx - movl 16(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,16(%ebx) - jz L025aw_end - # Tail Round 5 - movl 20(%esi),%ecx - movl 20(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,20(%ebx) - jz L025aw_end - # Tail Round 6 - movl 24(%esi),%ecx - movl 24(%edi),%edx - addl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - addl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) -L025aw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _bn_sub_words -.private_extern _bn_sub_words -.align 4 -_bn_sub_words: -L_bn_sub_words_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%ebx - movl 24(%esp),%esi - movl 28(%esp),%edi - movl 32(%esp),%ebp - xorl %eax,%eax - andl $4294967288,%ebp - jz L026aw_finish -L027aw_loop: - # Round 0 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - # Round 1 - movl 4(%esi),%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - # Round 2 - movl 8(%esi),%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - # Round 3 - movl 12(%esi),%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - # Round 4 - movl 16(%esi),%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - # Round 5 - movl 20(%esi),%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - # Round 6 - movl 24(%esi),%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - # Round 7 - movl 28(%esi),%ecx - movl 28(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%esi - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz L027aw_loop -L026aw_finish: - movl 32(%esp),%ebp - andl $7,%ebp - jz L028aw_end - # Tail Round 0 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,(%ebx) - jz L028aw_end - # Tail Round 1 - movl 4(%esi),%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,4(%ebx) - jz L028aw_end - # Tail Round 2 - movl 8(%esi),%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,8(%ebx) - jz L028aw_end - # Tail Round 3 - movl 12(%esi),%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,12(%ebx) - jz L028aw_end - # Tail Round 4 - movl 16(%esi),%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,16(%ebx) - jz L028aw_end - # Tail Round 5 - movl 20(%esi),%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,20(%ebx) - jz L028aw_end - # Tail Round 6 - movl 24(%esi),%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) -L028aw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _bn_sub_part_words -.private_extern _bn_sub_part_words -.align 4 -_bn_sub_part_words: -L_bn_sub_part_words_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - - movl 20(%esp),%ebx - movl 24(%esp),%esi - movl 28(%esp),%edi - movl 32(%esp),%ebp - xorl %eax,%eax - andl $4294967288,%ebp - jz L029aw_finish -L030aw_loop: - # Round 0 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - # Round 1 - movl 4(%esi),%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - # Round 2 - movl 8(%esi),%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - # Round 3 - movl 12(%esi),%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - # Round 4 - movl 16(%esi),%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - # Round 5 - movl 20(%esi),%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - # Round 6 - movl 24(%esi),%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - # Round 7 - movl 28(%esi),%ecx - movl 28(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%esi - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz L030aw_loop -L029aw_finish: - movl 32(%esp),%ebp - andl $7,%ebp - jz L031aw_end - # Tail Round 0 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz L031aw_end - # Tail Round 1 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz L031aw_end - # Tail Round 2 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz L031aw_end - # Tail Round 3 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz L031aw_end - # Tail Round 4 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz L031aw_end - # Tail Round 5 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx - decl %ebp - jz L031aw_end - # Tail Round 6 - movl (%esi),%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - addl $4,%esi - addl $4,%edi - addl $4,%ebx -L031aw_end: - cmpl $0,36(%esp) - je L032pw_end - movl 36(%esp),%ebp - cmpl $0,%ebp - je L032pw_end - jge L033pw_pos - # pw_neg - movl $0,%edx - subl %ebp,%edx - movl %edx,%ebp - andl $4294967288,%ebp - jz L034pw_neg_finish -L035pw_neg_loop: - # dl<0 Round 0 - movl $0,%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,(%ebx) - # dl<0 Round 1 - movl $0,%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,4(%ebx) - # dl<0 Round 2 - movl $0,%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,8(%ebx) - # dl<0 Round 3 - movl $0,%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,12(%ebx) - # dl<0 Round 4 - movl $0,%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,16(%ebx) - # dl<0 Round 5 - movl $0,%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,20(%ebx) - # dl<0 Round 6 - movl $0,%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - # dl<0 Round 7 - movl $0,%ecx - movl 28(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,28(%ebx) - - addl $32,%edi - addl $32,%ebx - subl $8,%ebp - jnz L035pw_neg_loop -L034pw_neg_finish: - movl 36(%esp),%edx - movl $0,%ebp - subl %edx,%ebp - andl $7,%ebp - jz L032pw_end - # dl<0 Tail Round 0 - movl $0,%ecx - movl (%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,(%ebx) - jz L032pw_end - # dl<0 Tail Round 1 - movl $0,%ecx - movl 4(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,4(%ebx) - jz L032pw_end - # dl<0 Tail Round 2 - movl $0,%ecx - movl 8(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,8(%ebx) - jz L032pw_end - # dl<0 Tail Round 3 - movl $0,%ecx - movl 12(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,12(%ebx) - jz L032pw_end - # dl<0 Tail Round 4 - movl $0,%ecx - movl 16(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,16(%ebx) - jz L032pw_end - # dl<0 Tail Round 5 - movl $0,%ecx - movl 20(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - decl %ebp - movl %ecx,20(%ebx) - jz L032pw_end - # dl<0 Tail Round 6 - movl $0,%ecx - movl 24(%edi),%edx - subl %eax,%ecx - movl $0,%eax - adcl %eax,%eax - subl %edx,%ecx - adcl $0,%eax - movl %ecx,24(%ebx) - jmp L032pw_end -L033pw_pos: - andl $4294967288,%ebp - jz L036pw_pos_finish -L037pw_pos_loop: - # dl>0 Round 0 - movl (%esi),%ecx - subl %eax,%ecx - movl %ecx,(%ebx) - jnc L038pw_nc0 - # dl>0 Round 1 - movl 4(%esi),%ecx - subl %eax,%ecx - movl %ecx,4(%ebx) - jnc L039pw_nc1 - # dl>0 Round 2 - movl 8(%esi),%ecx - subl %eax,%ecx - movl %ecx,8(%ebx) - jnc L040pw_nc2 - # dl>0 Round 3 - movl 12(%esi),%ecx - subl %eax,%ecx - movl %ecx,12(%ebx) - jnc L041pw_nc3 - # dl>0 Round 4 - movl 16(%esi),%ecx - subl %eax,%ecx - movl %ecx,16(%ebx) - jnc L042pw_nc4 - # dl>0 Round 5 - movl 20(%esi),%ecx - subl %eax,%ecx - movl %ecx,20(%ebx) - jnc L043pw_nc5 - # dl>0 Round 6 - movl 24(%esi),%ecx - subl %eax,%ecx - movl %ecx,24(%ebx) - jnc L044pw_nc6 - # dl>0 Round 7 - movl 28(%esi),%ecx - subl %eax,%ecx - movl %ecx,28(%ebx) - jnc L045pw_nc7 - - addl $32,%esi - addl $32,%ebx - subl $8,%ebp - jnz L037pw_pos_loop -L036pw_pos_finish: - movl 36(%esp),%ebp - andl $7,%ebp - jz L032pw_end - # dl>0 Tail Round 0 - movl (%esi),%ecx - subl %eax,%ecx - movl %ecx,(%ebx) - jnc L046pw_tail_nc0 - decl %ebp - jz L032pw_end - # dl>0 Tail Round 1 - movl 4(%esi),%ecx - subl %eax,%ecx - movl %ecx,4(%ebx) - jnc L047pw_tail_nc1 - decl %ebp - jz L032pw_end - # dl>0 Tail Round 2 - movl 8(%esi),%ecx - subl %eax,%ecx - movl %ecx,8(%ebx) - jnc L048pw_tail_nc2 - decl %ebp - jz L032pw_end - # dl>0 Tail Round 3 - movl 12(%esi),%ecx - subl %eax,%ecx - movl %ecx,12(%ebx) - jnc L049pw_tail_nc3 - decl %ebp - jz L032pw_end - # dl>0 Tail Round 4 - movl 16(%esi),%ecx - subl %eax,%ecx - movl %ecx,16(%ebx) - jnc L050pw_tail_nc4 - decl %ebp - jz L032pw_end - # dl>0 Tail Round 5 - movl 20(%esi),%ecx - subl %eax,%ecx - movl %ecx,20(%ebx) - jnc L051pw_tail_nc5 - decl %ebp - jz L032pw_end - # dl>0 Tail Round 6 - movl 24(%esi),%ecx - subl %eax,%ecx - movl %ecx,24(%ebx) - jnc L052pw_tail_nc6 - movl $1,%eax - jmp L032pw_end -L053pw_nc_loop: - movl (%esi),%ecx - movl %ecx,(%ebx) -L038pw_nc0: - movl 4(%esi),%ecx - movl %ecx,4(%ebx) -L039pw_nc1: - movl 8(%esi),%ecx - movl %ecx,8(%ebx) -L040pw_nc2: - movl 12(%esi),%ecx - movl %ecx,12(%ebx) -L041pw_nc3: - movl 16(%esi),%ecx - movl %ecx,16(%ebx) -L042pw_nc4: - movl 20(%esi),%ecx - movl %ecx,20(%ebx) -L043pw_nc5: - movl 24(%esi),%ecx - movl %ecx,24(%ebx) -L044pw_nc6: - movl 28(%esi),%ecx - movl %ecx,28(%ebx) -L045pw_nc7: - - addl $32,%esi - addl $32,%ebx - subl $8,%ebp - jnz L053pw_nc_loop - movl 36(%esp),%ebp - andl $7,%ebp - jz L054pw_nc_end - movl (%esi),%ecx - movl %ecx,(%ebx) -L046pw_tail_nc0: - decl %ebp - jz L054pw_nc_end - movl 4(%esi),%ecx - movl %ecx,4(%ebx) -L047pw_tail_nc1: - decl %ebp - jz L054pw_nc_end - movl 8(%esi),%ecx - movl %ecx,8(%ebx) -L048pw_tail_nc2: - decl %ebp - jz L054pw_nc_end - movl 12(%esi),%ecx - movl %ecx,12(%ebx) -L049pw_tail_nc3: - decl %ebp - jz L054pw_nc_end - movl 16(%esi),%ecx - movl %ecx,16(%ebx) -L050pw_tail_nc4: - decl %ebp - jz L054pw_nc_end - movl 20(%esi),%ecx - movl %ecx,20(%ebx) -L051pw_tail_nc5: - decl %ebp - jz L054pw_nc_end - movl 24(%esi),%ecx - movl %ecx,24(%ebx) -L052pw_tail_nc6: -L054pw_nc_end: - movl $0,%eax -L032pw_end: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/co-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/co-586.S deleted file mode 100644 index 578ca70b0c..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/co-586.S +++ /dev/null @@ -1,1257 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _bn_mul_comba8 -.private_extern _bn_mul_comba8 -.align 4 -_bn_mul_comba8: -L_bn_mul_comba8_begin: - pushl %esi - movl 12(%esp),%esi - pushl %edi - movl 20(%esp),%edi - pushl %ebp - pushl %ebx - xorl %ebx,%ebx - movl (%esi),%eax - xorl %ecx,%ecx - movl (%edi),%edx - # ################## Calculate word 0 - xorl %ebp,%ebp - # mul a[0]*b[0] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,(%eax) - movl 4(%esi),%eax - # saved r[0] - # ################## Calculate word 1 - xorl %ebx,%ebx - # mul a[1]*b[0] - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - # mul a[0]*b[1] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl (%edi),%edx - adcl $0,%ebx - movl %ecx,4(%eax) - movl 8(%esi),%eax - # saved r[1] - # ################## Calculate word 2 - xorl %ecx,%ecx - # mul a[2]*b[0] - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 4(%edi),%edx - adcl $0,%ecx - # mul a[1]*b[1] - mull %edx - addl %eax,%ebp - movl (%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - # mul a[0]*b[2] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl (%edi),%edx - adcl $0,%ecx - movl %ebp,8(%eax) - movl 12(%esi),%eax - # saved r[2] - # ################## Calculate word 3 - xorl %ebp,%ebp - # mul a[3]*b[0] - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - # mul a[2]*b[1] - mull %edx - addl %eax,%ebx - movl 4(%esi),%eax - adcl %edx,%ecx - movl 8(%edi),%edx - adcl $0,%ebp - # mul a[1]*b[2] - mull %edx - addl %eax,%ebx - movl (%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - # mul a[0]*b[3] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,12(%eax) - movl 16(%esi),%eax - # saved r[3] - # ################## Calculate word 4 - xorl %ebx,%ebx - # mul a[4]*b[0] - mull %edx - addl %eax,%ecx - movl 12(%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - # mul a[3]*b[1] - mull %edx - addl %eax,%ecx - movl 8(%esi),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - # mul a[2]*b[2] - mull %edx - addl %eax,%ecx - movl 4(%esi),%eax - adcl %edx,%ebp - movl 12(%edi),%edx - adcl $0,%ebx - # mul a[1]*b[3] - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - # mul a[0]*b[4] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl (%edi),%edx - adcl $0,%ebx - movl %ecx,16(%eax) - movl 20(%esi),%eax - # saved r[4] - # ################## Calculate word 5 - xorl %ecx,%ecx - # mul a[5]*b[0] - mull %edx - addl %eax,%ebp - movl 16(%esi),%eax - adcl %edx,%ebx - movl 4(%edi),%edx - adcl $0,%ecx - # mul a[4]*b[1] - mull %edx - addl %eax,%ebp - movl 12(%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - # mul a[3]*b[2] - mull %edx - addl %eax,%ebp - movl 8(%esi),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - # mul a[2]*b[3] - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 16(%edi),%edx - adcl $0,%ecx - # mul a[1]*b[4] - mull %edx - addl %eax,%ebp - movl (%esi),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - # mul a[0]*b[5] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl (%edi),%edx - adcl $0,%ecx - movl %ebp,20(%eax) - movl 24(%esi),%eax - # saved r[5] - # ################## Calculate word 6 - xorl %ebp,%ebp - # mul a[6]*b[0] - mull %edx - addl %eax,%ebx - movl 20(%esi),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - # mul a[5]*b[1] - mull %edx - addl %eax,%ebx - movl 16(%esi),%eax - adcl %edx,%ecx - movl 8(%edi),%edx - adcl $0,%ebp - # mul a[4]*b[2] - mull %edx - addl %eax,%ebx - movl 12(%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - # mul a[3]*b[3] - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 16(%edi),%edx - adcl $0,%ebp - # mul a[2]*b[4] - mull %edx - addl %eax,%ebx - movl 4(%esi),%eax - adcl %edx,%ecx - movl 20(%edi),%edx - adcl $0,%ebp - # mul a[1]*b[5] - mull %edx - addl %eax,%ebx - movl (%esi),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - # mul a[0]*b[6] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,24(%eax) - movl 28(%esi),%eax - # saved r[6] - # ################## Calculate word 7 - xorl %ebx,%ebx - # mul a[7]*b[0] - mull %edx - addl %eax,%ecx - movl 24(%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - # mul a[6]*b[1] - mull %edx - addl %eax,%ecx - movl 20(%esi),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - # mul a[5]*b[2] - mull %edx - addl %eax,%ecx - movl 16(%esi),%eax - adcl %edx,%ebp - movl 12(%edi),%edx - adcl $0,%ebx - # mul a[4]*b[3] - mull %edx - addl %eax,%ecx - movl 12(%esi),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - # mul a[3]*b[4] - mull %edx - addl %eax,%ecx - movl 8(%esi),%eax - adcl %edx,%ebp - movl 20(%edi),%edx - adcl $0,%ebx - # mul a[2]*b[5] - mull %edx - addl %eax,%ecx - movl 4(%esi),%eax - adcl %edx,%ebp - movl 24(%edi),%edx - adcl $0,%ebx - # mul a[1]*b[6] - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - # mul a[0]*b[7] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - movl %ecx,28(%eax) - movl 28(%esi),%eax - # saved r[7] - # ################## Calculate word 8 - xorl %ecx,%ecx - # mul a[7]*b[1] - mull %edx - addl %eax,%ebp - movl 24(%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - # mul a[6]*b[2] - mull %edx - addl %eax,%ebp - movl 20(%esi),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - # mul a[5]*b[3] - mull %edx - addl %eax,%ebp - movl 16(%esi),%eax - adcl %edx,%ebx - movl 16(%edi),%edx - adcl $0,%ecx - # mul a[4]*b[4] - mull %edx - addl %eax,%ebp - movl 12(%esi),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - # mul a[3]*b[5] - mull %edx - addl %eax,%ebp - movl 8(%esi),%eax - adcl %edx,%ebx - movl 24(%edi),%edx - adcl $0,%ecx - # mul a[2]*b[6] - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 28(%edi),%edx - adcl $0,%ecx - # mul a[1]*b[7] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - movl %ebp,32(%eax) - movl 28(%esi),%eax - # saved r[8] - # ################## Calculate word 9 - xorl %ebp,%ebp - # mul a[7]*b[2] - mull %edx - addl %eax,%ebx - movl 24(%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - # mul a[6]*b[3] - mull %edx - addl %eax,%ebx - movl 20(%esi),%eax - adcl %edx,%ecx - movl 16(%edi),%edx - adcl $0,%ebp - # mul a[5]*b[4] - mull %edx - addl %eax,%ebx - movl 16(%esi),%eax - adcl %edx,%ecx - movl 20(%edi),%edx - adcl $0,%ebp - # mul a[4]*b[5] - mull %edx - addl %eax,%ebx - movl 12(%esi),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - # mul a[3]*b[6] - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 28(%edi),%edx - adcl $0,%ebp - # mul a[2]*b[7] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - movl %ebx,36(%eax) - movl 28(%esi),%eax - # saved r[9] - # ################## Calculate word 10 - xorl %ebx,%ebx - # mul a[7]*b[3] - mull %edx - addl %eax,%ecx - movl 24(%esi),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - # mul a[6]*b[4] - mull %edx - addl %eax,%ecx - movl 20(%esi),%eax - adcl %edx,%ebp - movl 20(%edi),%edx - adcl $0,%ebx - # mul a[5]*b[5] - mull %edx - addl %eax,%ecx - movl 16(%esi),%eax - adcl %edx,%ebp - movl 24(%edi),%edx - adcl $0,%ebx - # mul a[4]*b[6] - mull %edx - addl %eax,%ecx - movl 12(%esi),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - # mul a[3]*b[7] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 16(%edi),%edx - adcl $0,%ebx - movl %ecx,40(%eax) - movl 28(%esi),%eax - # saved r[10] - # ################## Calculate word 11 - xorl %ecx,%ecx - # mul a[7]*b[4] - mull %edx - addl %eax,%ebp - movl 24(%esi),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - # mul a[6]*b[5] - mull %edx - addl %eax,%ebp - movl 20(%esi),%eax - adcl %edx,%ebx - movl 24(%edi),%edx - adcl $0,%ecx - # mul a[5]*b[6] - mull %edx - addl %eax,%ebp - movl 16(%esi),%eax - adcl %edx,%ebx - movl 28(%edi),%edx - adcl $0,%ecx - # mul a[4]*b[7] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl 20(%edi),%edx - adcl $0,%ecx - movl %ebp,44(%eax) - movl 28(%esi),%eax - # saved r[11] - # ################## Calculate word 12 - xorl %ebp,%ebp - # mul a[7]*b[5] - mull %edx - addl %eax,%ebx - movl 24(%esi),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - # mul a[6]*b[6] - mull %edx - addl %eax,%ebx - movl 20(%esi),%eax - adcl %edx,%ecx - movl 28(%edi),%edx - adcl $0,%ebp - # mul a[5]*b[7] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl 24(%edi),%edx - adcl $0,%ebp - movl %ebx,48(%eax) - movl 28(%esi),%eax - # saved r[12] - # ################## Calculate word 13 - xorl %ebx,%ebx - # mul a[7]*b[6] - mull %edx - addl %eax,%ecx - movl 24(%esi),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - # mul a[6]*b[7] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 28(%edi),%edx - adcl $0,%ebx - movl %ecx,52(%eax) - movl 28(%esi),%eax - # saved r[13] - # ################## Calculate word 14 - xorl %ecx,%ecx - # mul a[7]*b[7] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - adcl $0,%ecx - movl %ebp,56(%eax) - # saved r[14] - # save r[15] - movl %ebx,60(%eax) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.globl _bn_mul_comba4 -.private_extern _bn_mul_comba4 -.align 4 -_bn_mul_comba4: -L_bn_mul_comba4_begin: - pushl %esi - movl 12(%esp),%esi - pushl %edi - movl 20(%esp),%edi - pushl %ebp - pushl %ebx - xorl %ebx,%ebx - movl (%esi),%eax - xorl %ecx,%ecx - movl (%edi),%edx - # ################## Calculate word 0 - xorl %ebp,%ebp - # mul a[0]*b[0] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl (%edi),%edx - adcl $0,%ebp - movl %ebx,(%eax) - movl 4(%esi),%eax - # saved r[0] - # ################## Calculate word 1 - xorl %ebx,%ebx - # mul a[1]*b[0] - mull %edx - addl %eax,%ecx - movl (%esi),%eax - adcl %edx,%ebp - movl 4(%edi),%edx - adcl $0,%ebx - # mul a[0]*b[1] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl (%edi),%edx - adcl $0,%ebx - movl %ecx,4(%eax) - movl 8(%esi),%eax - # saved r[1] - # ################## Calculate word 2 - xorl %ecx,%ecx - # mul a[2]*b[0] - mull %edx - addl %eax,%ebp - movl 4(%esi),%eax - adcl %edx,%ebx - movl 4(%edi),%edx - adcl $0,%ecx - # mul a[1]*b[1] - mull %edx - addl %eax,%ebp - movl (%esi),%eax - adcl %edx,%ebx - movl 8(%edi),%edx - adcl $0,%ecx - # mul a[0]*b[2] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl (%edi),%edx - adcl $0,%ecx - movl %ebp,8(%eax) - movl 12(%esi),%eax - # saved r[2] - # ################## Calculate word 3 - xorl %ebp,%ebp - # mul a[3]*b[0] - mull %edx - addl %eax,%ebx - movl 8(%esi),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - # mul a[2]*b[1] - mull %edx - addl %eax,%ebx - movl 4(%esi),%eax - adcl %edx,%ecx - movl 8(%edi),%edx - adcl $0,%ebp - # mul a[1]*b[2] - mull %edx - addl %eax,%ebx - movl (%esi),%eax - adcl %edx,%ecx - movl 12(%edi),%edx - adcl $0,%ebp - # mul a[0]*b[3] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - movl 4(%edi),%edx - adcl $0,%ebp - movl %ebx,12(%eax) - movl 12(%esi),%eax - # saved r[3] - # ################## Calculate word 4 - xorl %ebx,%ebx - # mul a[3]*b[1] - mull %edx - addl %eax,%ecx - movl 8(%esi),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - # mul a[2]*b[2] - mull %edx - addl %eax,%ecx - movl 4(%esi),%eax - adcl %edx,%ebp - movl 12(%edi),%edx - adcl $0,%ebx - # mul a[1]*b[3] - mull %edx - addl %eax,%ecx - movl 20(%esp),%eax - adcl %edx,%ebp - movl 8(%edi),%edx - adcl $0,%ebx - movl %ecx,16(%eax) - movl 12(%esi),%eax - # saved r[4] - # ################## Calculate word 5 - xorl %ecx,%ecx - # mul a[3]*b[2] - mull %edx - addl %eax,%ebp - movl 8(%esi),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - # mul a[2]*b[3] - mull %edx - addl %eax,%ebp - movl 20(%esp),%eax - adcl %edx,%ebx - movl 12(%edi),%edx - adcl $0,%ecx - movl %ebp,20(%eax) - movl 12(%esi),%eax - # saved r[5] - # ################## Calculate word 6 - xorl %ebp,%ebp - # mul a[3]*b[3] - mull %edx - addl %eax,%ebx - movl 20(%esp),%eax - adcl %edx,%ecx - adcl $0,%ebp - movl %ebx,24(%eax) - # saved r[6] - # save r[7] - movl %ecx,28(%eax) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.globl _bn_sqr_comba8 -.private_extern _bn_sqr_comba8 -.align 4 -_bn_sqr_comba8: -L_bn_sqr_comba8_begin: - pushl %esi - pushl %edi - pushl %ebp - pushl %ebx - movl 20(%esp),%edi - movl 24(%esp),%esi - xorl %ebx,%ebx - xorl %ecx,%ecx - movl (%esi),%eax - # ############### Calculate word 0 - xorl %ebp,%ebp - # sqr a[0]*a[0] - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl (%esi),%edx - adcl $0,%ebp - movl %ebx,(%edi) - movl 4(%esi),%eax - # saved r[0] - # ############### Calculate word 1 - xorl %ebx,%ebx - # sqr a[1]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - movl %ecx,4(%edi) - movl (%esi),%edx - # saved r[1] - # ############### Calculate word 2 - xorl %ecx,%ecx - # sqr a[2]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 4(%esi),%eax - adcl $0,%ecx - # sqr a[1]*a[1] - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - movl (%esi),%edx - adcl $0,%ecx - movl %ebp,8(%edi) - movl 12(%esi),%eax - # saved r[2] - # ############### Calculate word 3 - xorl %ebp,%ebp - # sqr a[3]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 8(%esi),%eax - adcl $0,%ebp - movl 4(%esi),%edx - # sqr a[2]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 16(%esi),%eax - adcl $0,%ebp - movl %ebx,12(%edi) - movl (%esi),%edx - # saved r[3] - # ############### Calculate word 4 - xorl %ebx,%ebx - # sqr a[4]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 12(%esi),%eax - adcl $0,%ebx - movl 4(%esi),%edx - # sqr a[3]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - # sqr a[2]*a[2] - mull %eax - addl %eax,%ecx - adcl %edx,%ebp - movl (%esi),%edx - adcl $0,%ebx - movl %ecx,16(%edi) - movl 20(%esi),%eax - # saved r[4] - # ############### Calculate word 5 - xorl %ecx,%ecx - # sqr a[5]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 16(%esi),%eax - adcl $0,%ecx - movl 4(%esi),%edx - # sqr a[4]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 12(%esi),%eax - adcl $0,%ecx - movl 8(%esi),%edx - # sqr a[3]*a[2] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 24(%esi),%eax - adcl $0,%ecx - movl %ebp,20(%edi) - movl (%esi),%edx - # saved r[5] - # ############### Calculate word 6 - xorl %ebp,%ebp - # sqr a[6]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 20(%esi),%eax - adcl $0,%ebp - movl 4(%esi),%edx - # sqr a[5]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 16(%esi),%eax - adcl $0,%ebp - movl 8(%esi),%edx - # sqr a[4]*a[2] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 12(%esi),%eax - adcl $0,%ebp - # sqr a[3]*a[3] - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl (%esi),%edx - adcl $0,%ebp - movl %ebx,24(%edi) - movl 28(%esi),%eax - # saved r[6] - # ############### Calculate word 7 - xorl %ebx,%ebx - # sqr a[7]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 24(%esi),%eax - adcl $0,%ebx - movl 4(%esi),%edx - # sqr a[6]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 20(%esi),%eax - adcl $0,%ebx - movl 8(%esi),%edx - # sqr a[5]*a[2] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 16(%esi),%eax - adcl $0,%ebx - movl 12(%esi),%edx - # sqr a[4]*a[3] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 28(%esi),%eax - adcl $0,%ebx - movl %ecx,28(%edi) - movl 4(%esi),%edx - # saved r[7] - # ############### Calculate word 8 - xorl %ecx,%ecx - # sqr a[7]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 24(%esi),%eax - adcl $0,%ecx - movl 8(%esi),%edx - # sqr a[6]*a[2] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 20(%esi),%eax - adcl $0,%ecx - movl 12(%esi),%edx - # sqr a[5]*a[3] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 16(%esi),%eax - adcl $0,%ecx - # sqr a[4]*a[4] - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - movl 8(%esi),%edx - adcl $0,%ecx - movl %ebp,32(%edi) - movl 28(%esi),%eax - # saved r[8] - # ############### Calculate word 9 - xorl %ebp,%ebp - # sqr a[7]*a[2] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 24(%esi),%eax - adcl $0,%ebp - movl 12(%esi),%edx - # sqr a[6]*a[3] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 20(%esi),%eax - adcl $0,%ebp - movl 16(%esi),%edx - # sqr a[5]*a[4] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 28(%esi),%eax - adcl $0,%ebp - movl %ebx,36(%edi) - movl 12(%esi),%edx - # saved r[9] - # ############### Calculate word 10 - xorl %ebx,%ebx - # sqr a[7]*a[3] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 24(%esi),%eax - adcl $0,%ebx - movl 16(%esi),%edx - # sqr a[6]*a[4] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 20(%esi),%eax - adcl $0,%ebx - # sqr a[5]*a[5] - mull %eax - addl %eax,%ecx - adcl %edx,%ebp - movl 16(%esi),%edx - adcl $0,%ebx - movl %ecx,40(%edi) - movl 28(%esi),%eax - # saved r[10] - # ############### Calculate word 11 - xorl %ecx,%ecx - # sqr a[7]*a[4] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 24(%esi),%eax - adcl $0,%ecx - movl 20(%esi),%edx - # sqr a[6]*a[5] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 28(%esi),%eax - adcl $0,%ecx - movl %ebp,44(%edi) - movl 20(%esi),%edx - # saved r[11] - # ############### Calculate word 12 - xorl %ebp,%ebp - # sqr a[7]*a[5] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 24(%esi),%eax - adcl $0,%ebp - # sqr a[6]*a[6] - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl 24(%esi),%edx - adcl $0,%ebp - movl %ebx,48(%edi) - movl 28(%esi),%eax - # saved r[12] - # ############### Calculate word 13 - xorl %ebx,%ebx - # sqr a[7]*a[6] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 28(%esi),%eax - adcl $0,%ebx - movl %ecx,52(%edi) - # saved r[13] - # ############### Calculate word 14 - xorl %ecx,%ecx - # sqr a[7]*a[7] - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - adcl $0,%ecx - movl %ebp,56(%edi) - # saved r[14] - movl %ebx,60(%edi) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -.globl _bn_sqr_comba4 -.private_extern _bn_sqr_comba4 -.align 4 -_bn_sqr_comba4: -L_bn_sqr_comba4_begin: - pushl %esi - pushl %edi - pushl %ebp - pushl %ebx - movl 20(%esp),%edi - movl 24(%esp),%esi - xorl %ebx,%ebx - xorl %ecx,%ecx - movl (%esi),%eax - # ############### Calculate word 0 - xorl %ebp,%ebp - # sqr a[0]*a[0] - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - movl (%esi),%edx - adcl $0,%ebp - movl %ebx,(%edi) - movl 4(%esi),%eax - # saved r[0] - # ############### Calculate word 1 - xorl %ebx,%ebx - # sqr a[1]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - movl %ecx,4(%edi) - movl (%esi),%edx - # saved r[1] - # ############### Calculate word 2 - xorl %ecx,%ecx - # sqr a[2]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 4(%esi),%eax - adcl $0,%ecx - # sqr a[1]*a[1] - mull %eax - addl %eax,%ebp - adcl %edx,%ebx - movl (%esi),%edx - adcl $0,%ecx - movl %ebp,8(%edi) - movl 12(%esi),%eax - # saved r[2] - # ############### Calculate word 3 - xorl %ebp,%ebp - # sqr a[3]*a[0] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 8(%esi),%eax - adcl $0,%ebp - movl 4(%esi),%edx - # sqr a[2]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebp - addl %eax,%ebx - adcl %edx,%ecx - movl 12(%esi),%eax - adcl $0,%ebp - movl %ebx,12(%edi) - movl 4(%esi),%edx - # saved r[3] - # ############### Calculate word 4 - xorl %ebx,%ebx - # sqr a[3]*a[1] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ebx - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%eax - adcl $0,%ebx - # sqr a[2]*a[2] - mull %eax - addl %eax,%ecx - adcl %edx,%ebp - movl 8(%esi),%edx - adcl $0,%ebx - movl %ecx,16(%edi) - movl 12(%esi),%eax - # saved r[4] - # ############### Calculate word 5 - xorl %ecx,%ecx - # sqr a[3]*a[2] - mull %edx - addl %eax,%eax - adcl %edx,%edx - adcl $0,%ecx - addl %eax,%ebp - adcl %edx,%ebx - movl 12(%esi),%eax - adcl $0,%ecx - movl %ebp,20(%edi) - # saved r[5] - # ############### Calculate word 6 - xorl %ebp,%ebp - # sqr a[3]*a[3] - mull %eax - addl %eax,%ebx - adcl %edx,%ecx - adcl $0,%ebp - movl %ebx,24(%edi) - # saved r[6] - movl %ecx,28(%edi) - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S deleted file mode 100644 index f059e2839a..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S +++ /dev/null @@ -1,289 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _gcm_gmult_ssse3 -.private_extern _gcm_gmult_ssse3 -.align 4 -_gcm_gmult_ssse3: -L_gcm_gmult_ssse3_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%edi - movl 24(%esp),%esi - movdqu (%edi),%xmm0 - call L000pic_point -L000pic_point: - popl %eax - movdqa Lreverse_bytes-L000pic_point(%eax),%xmm7 - movdqa Llow4_mask-L000pic_point(%eax),%xmm2 -.byte 102,15,56,0,199 - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - movl $5,%eax -L001loop_row_1: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz L001loop_row_1 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $5,%eax -L002loop_row_2: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz L002loop_row_2 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $6,%eax -L003loop_row_3: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz L003loop_row_3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,0,215 - movdqu %xmm2,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _gcm_ghash_ssse3 -.private_extern _gcm_ghash_ssse3 -.align 4 -_gcm_ghash_ssse3: -L_gcm_ghash_ssse3_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%edi - movl 24(%esp),%esi - movl 28(%esp),%edx - movl 32(%esp),%ecx - movdqu (%edi),%xmm0 - call L004pic_point -L004pic_point: - popl %ebx - movdqa Lreverse_bytes-L004pic_point(%ebx),%xmm7 - andl $-16,%ecx -.byte 102,15,56,0,199 - pxor %xmm3,%xmm3 -L005loop_ghash: - movdqa Llow4_mask-L004pic_point(%ebx),%xmm2 - movdqu (%edx),%xmm1 -.byte 102,15,56,0,207 - pxor %xmm1,%xmm0 - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - pxor %xmm2,%xmm2 - movl $5,%eax -L006loop_row_4: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz L006loop_row_4 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $5,%eax -L007loop_row_5: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz L007loop_row_5 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movl $6,%eax -L008loop_row_6: - movdqa (%esi),%xmm4 - leal 16(%esi),%esi - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - pxor %xmm5,%xmm2 - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - subl $1,%eax - jnz L008loop_row_6 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movdqa %xmm2,%xmm0 - leal -256(%esi),%esi - leal 16(%edx),%edx - subl $16,%ecx - jnz L005loop_ghash -.byte 102,15,56,0,199 - movdqu %xmm0,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 4,0x90 -Lreverse_bytes: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.align 4,0x90 -Llow4_mask: -.long 252645135,252645135,252645135,252645135 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/ghash-x86.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/ghash-x86.S deleted file mode 100644 index e13bf3e858..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/ghash-x86.S +++ /dev/null @@ -1,1064 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _gcm_gmult_4bit_mmx -.private_extern _gcm_gmult_4bit_mmx -.align 4 -_gcm_gmult_4bit_mmx: -L_gcm_gmult_4bit_mmx_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%edi - movl 24(%esp),%esi - call L000pic_point -L000pic_point: - popl %eax - leal Lrem_4bit-L000pic_point(%eax),%eax - movzbl 15(%edi),%ebx - xorl %ecx,%ecx - movl %ebx,%edx - movb %dl,%cl - movl $14,%ebp - shlb $4,%cl - andl $240,%edx - movq 8(%esi,%ecx,1),%mm0 - movq (%esi,%ecx,1),%mm1 - movd %mm0,%ebx - jmp L001mmx_loop -.align 4,0x90 -L001mmx_loop: - psrlq $4,%mm0 - andl $15,%ebx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%edx,1),%mm0 - movb (%edi,%ebp,1),%cl - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - decl %ebp - movd %mm0,%ebx - pxor (%esi,%edx,1),%mm1 - movl %ecx,%edx - pxor %mm2,%mm0 - js L002mmx_break - shlb $4,%cl - andl $15,%ebx - psrlq $4,%mm0 - andl $240,%edx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%ecx,1),%mm0 - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - movd %mm0,%ebx - pxor (%esi,%ecx,1),%mm1 - pxor %mm2,%mm0 - jmp L001mmx_loop -.align 4,0x90 -L002mmx_break: - shlb $4,%cl - andl $15,%ebx - psrlq $4,%mm0 - andl $240,%edx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%ecx,1),%mm0 - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - movd %mm0,%ebx - pxor (%esi,%ecx,1),%mm1 - pxor %mm2,%mm0 - psrlq $4,%mm0 - andl $15,%ebx - movq %mm1,%mm2 - psrlq $4,%mm1 - pxor 8(%esi,%edx,1),%mm0 - psllq $60,%mm2 - pxor (%eax,%ebx,8),%mm1 - movd %mm0,%ebx - pxor (%esi,%edx,1),%mm1 - pxor %mm2,%mm0 - psrlq $32,%mm0 - movd %mm1,%edx - psrlq $32,%mm1 - movd %mm0,%ecx - movd %mm1,%ebp - bswap %ebx - bswap %edx - bswap %ecx - bswap %ebp - emms - movl %ebx,12(%edi) - movl %edx,4(%edi) - movl %ecx,8(%edi) - movl %ebp,(%edi) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _gcm_ghash_4bit_mmx -.private_extern _gcm_ghash_4bit_mmx -.align 4 -_gcm_ghash_4bit_mmx: -L_gcm_ghash_4bit_mmx_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%eax - movl 24(%esp),%ebx - movl 28(%esp),%ecx - movl 32(%esp),%edx - movl %esp,%ebp - call L003pic_point -L003pic_point: - popl %esi - leal Lrem_8bit-L003pic_point(%esi),%esi - subl $544,%esp - andl $-64,%esp - subl $16,%esp - addl %ecx,%edx - movl %eax,544(%esp) - movl %edx,552(%esp) - movl %ebp,556(%esp) - addl $128,%ebx - leal 144(%esp),%edi - leal 400(%esp),%ebp - movl -120(%ebx),%edx - movq -120(%ebx),%mm0 - movq -128(%ebx),%mm3 - shll $4,%edx - movb %dl,(%esp) - movl -104(%ebx),%edx - movq -104(%ebx),%mm2 - movq -112(%ebx),%mm5 - movq %mm0,-128(%edi) - psrlq $4,%mm0 - movq %mm3,(%edi) - movq %mm3,%mm7 - psrlq $4,%mm3 - shll $4,%edx - movb %dl,1(%esp) - movl -88(%ebx),%edx - movq -88(%ebx),%mm1 - psllq $60,%mm7 - movq -96(%ebx),%mm4 - por %mm7,%mm0 - movq %mm2,-120(%edi) - psrlq $4,%mm2 - movq %mm5,8(%edi) - movq %mm5,%mm6 - movq %mm0,-128(%ebp) - psrlq $4,%mm5 - movq %mm3,(%ebp) - shll $4,%edx - movb %dl,2(%esp) - movl -72(%ebx),%edx - movq -72(%ebx),%mm0 - psllq $60,%mm6 - movq -80(%ebx),%mm3 - por %mm6,%mm2 - movq %mm1,-112(%edi) - psrlq $4,%mm1 - movq %mm4,16(%edi) - movq %mm4,%mm7 - movq %mm2,-120(%ebp) - psrlq $4,%mm4 - movq %mm5,8(%ebp) - shll $4,%edx - movb %dl,3(%esp) - movl -56(%ebx),%edx - movq -56(%ebx),%mm2 - psllq $60,%mm7 - movq -64(%ebx),%mm5 - por %mm7,%mm1 - movq %mm0,-104(%edi) - psrlq $4,%mm0 - movq %mm3,24(%edi) - movq %mm3,%mm6 - movq %mm1,-112(%ebp) - psrlq $4,%mm3 - movq %mm4,16(%ebp) - shll $4,%edx - movb %dl,4(%esp) - movl -40(%ebx),%edx - movq -40(%ebx),%mm1 - psllq $60,%mm6 - movq -48(%ebx),%mm4 - por %mm6,%mm0 - movq %mm2,-96(%edi) - psrlq $4,%mm2 - movq %mm5,32(%edi) - movq %mm5,%mm7 - movq %mm0,-104(%ebp) - psrlq $4,%mm5 - movq %mm3,24(%ebp) - shll $4,%edx - movb %dl,5(%esp) - movl -24(%ebx),%edx - movq -24(%ebx),%mm0 - psllq $60,%mm7 - movq -32(%ebx),%mm3 - por %mm7,%mm2 - movq %mm1,-88(%edi) - psrlq $4,%mm1 - movq %mm4,40(%edi) - movq %mm4,%mm6 - movq %mm2,-96(%ebp) - psrlq $4,%mm4 - movq %mm5,32(%ebp) - shll $4,%edx - movb %dl,6(%esp) - movl -8(%ebx),%edx - movq -8(%ebx),%mm2 - psllq $60,%mm6 - movq -16(%ebx),%mm5 - por %mm6,%mm1 - movq %mm0,-80(%edi) - psrlq $4,%mm0 - movq %mm3,48(%edi) - movq %mm3,%mm7 - movq %mm1,-88(%ebp) - psrlq $4,%mm3 - movq %mm4,40(%ebp) - shll $4,%edx - movb %dl,7(%esp) - movl 8(%ebx),%edx - movq 8(%ebx),%mm1 - psllq $60,%mm7 - movq (%ebx),%mm4 - por %mm7,%mm0 - movq %mm2,-72(%edi) - psrlq $4,%mm2 - movq %mm5,56(%edi) - movq %mm5,%mm6 - movq %mm0,-80(%ebp) - psrlq $4,%mm5 - movq %mm3,48(%ebp) - shll $4,%edx - movb %dl,8(%esp) - movl 24(%ebx),%edx - movq 24(%ebx),%mm0 - psllq $60,%mm6 - movq 16(%ebx),%mm3 - por %mm6,%mm2 - movq %mm1,-64(%edi) - psrlq $4,%mm1 - movq %mm4,64(%edi) - movq %mm4,%mm7 - movq %mm2,-72(%ebp) - psrlq $4,%mm4 - movq %mm5,56(%ebp) - shll $4,%edx - movb %dl,9(%esp) - movl 40(%ebx),%edx - movq 40(%ebx),%mm2 - psllq $60,%mm7 - movq 32(%ebx),%mm5 - por %mm7,%mm1 - movq %mm0,-56(%edi) - psrlq $4,%mm0 - movq %mm3,72(%edi) - movq %mm3,%mm6 - movq %mm1,-64(%ebp) - psrlq $4,%mm3 - movq %mm4,64(%ebp) - shll $4,%edx - movb %dl,10(%esp) - movl 56(%ebx),%edx - movq 56(%ebx),%mm1 - psllq $60,%mm6 - movq 48(%ebx),%mm4 - por %mm6,%mm0 - movq %mm2,-48(%edi) - psrlq $4,%mm2 - movq %mm5,80(%edi) - movq %mm5,%mm7 - movq %mm0,-56(%ebp) - psrlq $4,%mm5 - movq %mm3,72(%ebp) - shll $4,%edx - movb %dl,11(%esp) - movl 72(%ebx),%edx - movq 72(%ebx),%mm0 - psllq $60,%mm7 - movq 64(%ebx),%mm3 - por %mm7,%mm2 - movq %mm1,-40(%edi) - psrlq $4,%mm1 - movq %mm4,88(%edi) - movq %mm4,%mm6 - movq %mm2,-48(%ebp) - psrlq $4,%mm4 - movq %mm5,80(%ebp) - shll $4,%edx - movb %dl,12(%esp) - movl 88(%ebx),%edx - movq 88(%ebx),%mm2 - psllq $60,%mm6 - movq 80(%ebx),%mm5 - por %mm6,%mm1 - movq %mm0,-32(%edi) - psrlq $4,%mm0 - movq %mm3,96(%edi) - movq %mm3,%mm7 - movq %mm1,-40(%ebp) - psrlq $4,%mm3 - movq %mm4,88(%ebp) - shll $4,%edx - movb %dl,13(%esp) - movl 104(%ebx),%edx - movq 104(%ebx),%mm1 - psllq $60,%mm7 - movq 96(%ebx),%mm4 - por %mm7,%mm0 - movq %mm2,-24(%edi) - psrlq $4,%mm2 - movq %mm5,104(%edi) - movq %mm5,%mm6 - movq %mm0,-32(%ebp) - psrlq $4,%mm5 - movq %mm3,96(%ebp) - shll $4,%edx - movb %dl,14(%esp) - movl 120(%ebx),%edx - movq 120(%ebx),%mm0 - psllq $60,%mm6 - movq 112(%ebx),%mm3 - por %mm6,%mm2 - movq %mm1,-16(%edi) - psrlq $4,%mm1 - movq %mm4,112(%edi) - movq %mm4,%mm7 - movq %mm2,-24(%ebp) - psrlq $4,%mm4 - movq %mm5,104(%ebp) - shll $4,%edx - movb %dl,15(%esp) - psllq $60,%mm7 - por %mm7,%mm1 - movq %mm0,-8(%edi) - psrlq $4,%mm0 - movq %mm3,120(%edi) - movq %mm3,%mm6 - movq %mm1,-16(%ebp) - psrlq $4,%mm3 - movq %mm4,112(%ebp) - psllq $60,%mm6 - por %mm6,%mm0 - movq %mm0,-8(%ebp) - movq %mm3,120(%ebp) - movq (%eax),%mm6 - movl 8(%eax),%ebx - movl 12(%eax),%edx -.align 4,0x90 -L004outer: - xorl 12(%ecx),%edx - xorl 8(%ecx),%ebx - pxor (%ecx),%mm6 - leal 16(%ecx),%ecx - movl %ebx,536(%esp) - movq %mm6,528(%esp) - movl %ecx,548(%esp) - xorl %eax,%eax - roll $8,%edx - movb %dl,%al - movl %eax,%ebp - andb $15,%al - shrl $4,%ebp - pxor %mm0,%mm0 - roll $8,%edx - pxor %mm1,%mm1 - pxor %mm2,%mm2 - movq 16(%esp,%eax,8),%mm7 - movq 144(%esp,%eax,8),%mm6 - movb %dl,%al - movd %mm7,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - shrl $4,%edi - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 536(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 532(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 528(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm1,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm0 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - xorb (%esp,%ebp,1),%bl - movb %dl,%al - movd %mm7,%ecx - movzbl %bl,%ebx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%ebp - psrlq $8,%mm6 - pxor 272(%esp,%edi,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm0,%mm6 - shrl $4,%ebp - pinsrw $2,(%esi,%ebx,2),%mm2 - pxor 16(%esp,%eax,8),%mm7 - roll $8,%edx - pxor 144(%esp,%eax,8),%mm6 - pxor %mm3,%mm7 - pxor 400(%esp,%edi,8),%mm6 - xorb (%esp,%edi,1),%cl - movb %dl,%al - movl 524(%esp),%edx - movd %mm7,%ebx - movzbl %cl,%ecx - psrlq $8,%mm7 - movq %mm6,%mm3 - movl %eax,%edi - psrlq $8,%mm6 - pxor 272(%esp,%ebp,8),%mm7 - andb $15,%al - psllq $56,%mm3 - pxor %mm2,%mm6 - shrl $4,%edi - pinsrw $2,(%esi,%ecx,2),%mm1 - pxor 16(%esp,%eax,8),%mm7 - pxor 144(%esp,%eax,8),%mm6 - xorb (%esp,%ebp,1),%bl - pxor %mm3,%mm7 - pxor 400(%esp,%ebp,8),%mm6 - movzbl %bl,%ebx - pxor %mm2,%mm2 - psllq $4,%mm1 - movd %mm7,%ecx - psrlq $4,%mm7 - movq %mm6,%mm3 - psrlq $4,%mm6 - shll $4,%ecx - pxor 16(%esp,%edi,8),%mm7 - psllq $60,%mm3 - movzbl %cl,%ecx - pxor %mm3,%mm7 - pxor 144(%esp,%edi,8),%mm6 - pinsrw $2,(%esi,%ebx,2),%mm0 - pxor %mm1,%mm6 - movd %mm7,%edx - pinsrw $3,(%esi,%ecx,2),%mm2 - psllq $12,%mm0 - pxor %mm0,%mm6 - psrlq $32,%mm7 - pxor %mm2,%mm6 - movl 548(%esp),%ecx - movd %mm7,%ebx - movq %mm6,%mm3 - psllw $8,%mm6 - psrlw $8,%mm3 - por %mm3,%mm6 - bswap %edx - pshufw $27,%mm6,%mm6 - bswap %ebx - cmpl 552(%esp),%ecx - jne L004outer - movl 544(%esp),%eax - movl %edx,12(%eax) - movl %ebx,8(%eax) - movq %mm6,(%eax) - movl 556(%esp),%esp - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _gcm_init_clmul -.private_extern _gcm_init_clmul -.align 4 -_gcm_init_clmul: -L_gcm_init_clmul_begin: - movl 4(%esp),%edx - movl 8(%esp),%eax - call L005pic -L005pic: - popl %ecx - leal Lbswap-L005pic(%ecx),%ecx - movdqu (%eax),%xmm2 - pshufd $78,%xmm2,%xmm2 - pshufd $255,%xmm2,%xmm4 - movdqa %xmm2,%xmm3 - psllq $1,%xmm2 - pxor %xmm5,%xmm5 - psrlq $63,%xmm3 - pcmpgtd %xmm4,%xmm5 - pslldq $8,%xmm3 - por %xmm3,%xmm2 - pand 16(%ecx),%xmm5 - pxor %xmm5,%xmm2 - movdqa %xmm2,%xmm0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pshufd $78,%xmm2,%xmm4 - pxor %xmm0,%xmm3 - pxor %xmm2,%xmm4 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - xorps %xmm0,%xmm3 - xorps %xmm1,%xmm3 - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - pshufd $78,%xmm2,%xmm3 - pshufd $78,%xmm0,%xmm4 - pxor %xmm2,%xmm3 - movdqu %xmm2,(%edx) - pxor %xmm0,%xmm4 - movdqu %xmm0,16(%edx) -.byte 102,15,58,15,227,8 - movdqu %xmm4,32(%edx) - ret -.globl _gcm_gmult_clmul -.private_extern _gcm_gmult_clmul -.align 4 -_gcm_gmult_clmul: -L_gcm_gmult_clmul_begin: - movl 4(%esp),%eax - movl 8(%esp),%edx - call L006pic -L006pic: - popl %ecx - leal Lbswap-L006pic(%ecx),%ecx - movdqu (%eax),%xmm0 - movdqa (%ecx),%xmm5 - movups (%edx),%xmm2 -.byte 102,15,56,0,197 - movups 32(%edx),%xmm4 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - xorps %xmm0,%xmm3 - xorps %xmm1,%xmm3 - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,197 - movdqu %xmm0,(%eax) - ret -.globl _gcm_ghash_clmul -.private_extern _gcm_ghash_clmul -.align 4 -_gcm_ghash_clmul: -L_gcm_ghash_clmul_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%eax - movl 24(%esp),%edx - movl 28(%esp),%esi - movl 32(%esp),%ebx - call L007pic -L007pic: - popl %ecx - leal Lbswap-L007pic(%ecx),%ecx - movdqu (%eax),%xmm0 - movdqa (%ecx),%xmm5 - movdqu (%edx),%xmm2 -.byte 102,15,56,0,197 - subl $16,%ebx - jz L008odd_tail - movdqu (%esi),%xmm3 - movdqu 16(%esi),%xmm6 -.byte 102,15,56,0,221 -.byte 102,15,56,0,245 - movdqu 32(%edx),%xmm5 - pxor %xmm3,%xmm0 - pshufd $78,%xmm6,%xmm3 - movdqa %xmm6,%xmm7 - pxor %xmm6,%xmm3 - leal 32(%esi),%esi -.byte 102,15,58,68,242,0 -.byte 102,15,58,68,250,17 -.byte 102,15,58,68,221,0 - movups 16(%edx),%xmm2 - nop - subl $32,%ebx - jbe L009even_tail - jmp L010mod_loop -.align 5,0x90 -L010mod_loop: - pshufd $78,%xmm0,%xmm4 - movdqa %xmm0,%xmm1 - pxor %xmm0,%xmm4 - nop -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,229,16 - movups (%edx),%xmm2 - xorps %xmm6,%xmm0 - movdqa (%ecx),%xmm5 - xorps %xmm7,%xmm1 - movdqu (%esi),%xmm7 - pxor %xmm0,%xmm3 - movdqu 16(%esi),%xmm6 - pxor %xmm1,%xmm3 -.byte 102,15,56,0,253 - pxor %xmm3,%xmm4 - movdqa %xmm4,%xmm3 - psrldq $8,%xmm4 - pslldq $8,%xmm3 - pxor %xmm4,%xmm1 - pxor %xmm3,%xmm0 -.byte 102,15,56,0,245 - pxor %xmm7,%xmm1 - movdqa %xmm6,%xmm7 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 -.byte 102,15,58,68,242,0 - movups 32(%edx),%xmm5 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - pshufd $78,%xmm7,%xmm3 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm7,%xmm3 - pxor %xmm4,%xmm1 -.byte 102,15,58,68,250,17 - movups 16(%edx),%xmm2 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.byte 102,15,58,68,221,0 - leal 32(%esi),%esi - subl $32,%ebx - ja L010mod_loop -L009even_tail: - pshufd $78,%xmm0,%xmm4 - movdqa %xmm0,%xmm1 - pxor %xmm0,%xmm4 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,229,16 - movdqa (%ecx),%xmm5 - xorps %xmm6,%xmm0 - xorps %xmm7,%xmm1 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - pxor %xmm3,%xmm4 - movdqa %xmm4,%xmm3 - psrldq $8,%xmm4 - pslldq $8,%xmm3 - pxor %xmm4,%xmm1 - pxor %xmm3,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - testl %ebx,%ebx - jnz L011done - movups (%edx),%xmm2 -L008odd_tail: - movdqu (%esi),%xmm3 -.byte 102,15,56,0,221 - pxor %xmm3,%xmm0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pshufd $78,%xmm2,%xmm4 - pxor %xmm0,%xmm3 - pxor %xmm2,%xmm4 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - xorps %xmm0,%xmm3 - xorps %xmm1,%xmm3 - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -L011done: -.byte 102,15,56,0,197 - movdqu %xmm0,(%eax) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 6,0x90 -Lbswap: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 -.align 6,0x90 -Lrem_8bit: -.value 0,450,900,582,1800,1738,1164,1358 -.value 3600,4050,3476,3158,2328,2266,2716,2910 -.value 7200,7650,8100,7782,6952,6890,6316,6510 -.value 4656,5106,4532,4214,5432,5370,5820,6014 -.value 14400,14722,15300,14854,16200,16010,15564,15630 -.value 13904,14226,13780,13334,12632,12442,13020,13086 -.value 9312,9634,10212,9766,9064,8874,8428,8494 -.value 10864,11186,10740,10294,11640,11450,12028,12094 -.value 28800,28994,29444,29382,30600,30282,29708,30158 -.value 32400,32594,32020,31958,31128,30810,31260,31710 -.value 27808,28002,28452,28390,27560,27242,26668,27118 -.value 25264,25458,24884,24822,26040,25722,26172,26622 -.value 18624,18690,19268,19078,20424,19978,19532,19854 -.value 18128,18194,17748,17558,16856,16410,16988,17310 -.value 21728,21794,22372,22182,21480,21034,20588,20910 -.value 23280,23346,22900,22710,24056,23610,24188,24510 -.value 57600,57538,57988,58182,58888,59338,58764,58446 -.value 61200,61138,60564,60758,59416,59866,60316,59998 -.value 64800,64738,65188,65382,64040,64490,63916,63598 -.value 62256,62194,61620,61814,62520,62970,63420,63102 -.value 55616,55426,56004,56070,56904,57226,56780,56334 -.value 55120,54930,54484,54550,53336,53658,54236,53790 -.value 50528,50338,50916,50982,49768,50090,49644,49198 -.value 52080,51890,51444,51510,52344,52666,53244,52798 -.value 37248,36930,37380,37830,38536,38730,38156,38094 -.value 40848,40530,39956,40406,39064,39258,39708,39646 -.value 36256,35938,36388,36838,35496,35690,35116,35054 -.value 33712,33394,32820,33270,33976,34170,34620,34558 -.value 43456,43010,43588,43910,44744,44810,44364,44174 -.value 42960,42514,42068,42390,41176,41242,41820,41630 -.value 46560,46114,46692,47014,45800,45866,45420,45230 -.value 48112,47666,47220,47542,48376,48442,49020,48830 -.align 6,0x90 -Lrem_4bit: -.long 0,0,0,471859200,0,943718400,0,610271232 -.long 0,1887436800,0,1822425088,0,1220542464,0,1423966208 -.long 0,3774873600,0,4246732800,0,3644850176,0,3311403008 -.long 0,2441084928,0,2376073216,0,2847932416,0,3051356160 -.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 -.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 -.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 -.byte 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/md5-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/md5-586.S deleted file mode 100644 index 391acbd123..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/md5-586.S +++ /dev/null @@ -1,685 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _md5_block_asm_data_order -.private_extern _md5_block_asm_data_order -.align 4 -_md5_block_asm_data_order: -L_md5_block_asm_data_order_begin: - pushl %esi - pushl %edi - movl 12(%esp),%edi - movl 16(%esp),%esi - movl 20(%esp),%ecx - pushl %ebp - shll $6,%ecx - pushl %ebx - addl %esi,%ecx - subl $64,%ecx - movl (%edi),%eax - pushl %ecx - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx -L000start: - - # R0 section - movl %ecx,%edi - movl (%esi),%ebp - # R0 0 - xorl %edx,%edi - andl %ebx,%edi - leal 3614090360(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 4(%esi),%ebp - addl %ebx,%eax - # R0 1 - xorl %ecx,%edi - andl %eax,%edi - leal 3905402710(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 8(%esi),%ebp - addl %eax,%edx - # R0 2 - xorl %ebx,%edi - andl %edx,%edi - leal 606105819(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 12(%esi),%ebp - addl %edx,%ecx - # R0 3 - xorl %eax,%edi - andl %ecx,%edi - leal 3250441966(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 16(%esi),%ebp - addl %ecx,%ebx - # R0 4 - xorl %edx,%edi - andl %ebx,%edi - leal 4118548399(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 20(%esi),%ebp - addl %ebx,%eax - # R0 5 - xorl %ecx,%edi - andl %eax,%edi - leal 1200080426(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 24(%esi),%ebp - addl %eax,%edx - # R0 6 - xorl %ebx,%edi - andl %edx,%edi - leal 2821735955(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 28(%esi),%ebp - addl %edx,%ecx - # R0 7 - xorl %eax,%edi - andl %ecx,%edi - leal 4249261313(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 32(%esi),%ebp - addl %ecx,%ebx - # R0 8 - xorl %edx,%edi - andl %ebx,%edi - leal 1770035416(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 36(%esi),%ebp - addl %ebx,%eax - # R0 9 - xorl %ecx,%edi - andl %eax,%edi - leal 2336552879(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 40(%esi),%ebp - addl %eax,%edx - # R0 10 - xorl %ebx,%edi - andl %edx,%edi - leal 4294925233(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 44(%esi),%ebp - addl %edx,%ecx - # R0 11 - xorl %eax,%edi - andl %ecx,%edi - leal 2304563134(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 48(%esi),%ebp - addl %ecx,%ebx - # R0 12 - xorl %edx,%edi - andl %ebx,%edi - leal 1804603682(%eax,%ebp,1),%eax - xorl %edx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $7,%eax - movl 52(%esi),%ebp - addl %ebx,%eax - # R0 13 - xorl %ecx,%edi - andl %eax,%edi - leal 4254626195(%edx,%ebp,1),%edx - xorl %ecx,%edi - addl %edi,%edx - movl %eax,%edi - roll $12,%edx - movl 56(%esi),%ebp - addl %eax,%edx - # R0 14 - xorl %ebx,%edi - andl %edx,%edi - leal 2792965006(%ecx,%ebp,1),%ecx - xorl %ebx,%edi - addl %edi,%ecx - movl %edx,%edi - roll $17,%ecx - movl 60(%esi),%ebp - addl %edx,%ecx - # R0 15 - xorl %eax,%edi - andl %ecx,%edi - leal 1236535329(%ebx,%ebp,1),%ebx - xorl %eax,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $22,%ebx - movl 4(%esi),%ebp - addl %ecx,%ebx - - # R1 section - # R1 16 - leal 4129170786(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 24(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - # R1 17 - leal 3225465664(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 44(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - # R1 18 - leal 643717713(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl (%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - # R1 19 - leal 3921069994(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 20(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - # R1 20 - leal 3593408605(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 40(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - # R1 21 - leal 38016083(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 60(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - # R1 22 - leal 3634488961(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl 16(%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - # R1 23 - leal 3889429448(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 36(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - # R1 24 - leal 568446438(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 56(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - # R1 25 - leal 3275163606(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 12(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - # R1 26 - leal 4107603335(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl 32(%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - # R1 27 - leal 1163531501(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 52(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - # R1 28 - leal 2850285829(%eax,%ebp,1),%eax - xorl %ebx,%edi - andl %edx,%edi - movl 8(%esi),%ebp - xorl %ecx,%edi - addl %edi,%eax - movl %ebx,%edi - roll $5,%eax - addl %ebx,%eax - # R1 29 - leal 4243563512(%edx,%ebp,1),%edx - xorl %eax,%edi - andl %ecx,%edi - movl 28(%esi),%ebp - xorl %ebx,%edi - addl %edi,%edx - movl %eax,%edi - roll $9,%edx - addl %eax,%edx - # R1 30 - leal 1735328473(%ecx,%ebp,1),%ecx - xorl %edx,%edi - andl %ebx,%edi - movl 48(%esi),%ebp - xorl %eax,%edi - addl %edi,%ecx - movl %edx,%edi - roll $14,%ecx - addl %edx,%ecx - # R1 31 - leal 2368359562(%ebx,%ebp,1),%ebx - xorl %ecx,%edi - andl %eax,%edi - movl 20(%esi),%ebp - xorl %edx,%edi - addl %edi,%ebx - movl %ecx,%edi - roll $20,%ebx - addl %ecx,%ebx - - # R2 section - # R2 32 - xorl %edx,%edi - xorl %ebx,%edi - leal 4294588738(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl 32(%esi),%ebp - movl %ebx,%edi - # R2 33 - leal 2272392833(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 44(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - # R2 34 - xorl %ebx,%edi - xorl %edx,%edi - leal 1839030562(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 56(%esi),%ebp - movl %edx,%edi - # R2 35 - leal 4259657740(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl 4(%esi),%ebp - addl %edi,%ebx - movl %ecx,%edi - roll $23,%ebx - addl %ecx,%ebx - # R2 36 - xorl %edx,%edi - xorl %ebx,%edi - leal 2763975236(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl 16(%esi),%ebp - movl %ebx,%edi - # R2 37 - leal 1272893353(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 28(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - # R2 38 - xorl %ebx,%edi - xorl %edx,%edi - leal 4139469664(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 40(%esi),%ebp - movl %edx,%edi - # R2 39 - leal 3200236656(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl 52(%esi),%ebp - addl %edi,%ebx - movl %ecx,%edi - roll $23,%ebx - addl %ecx,%ebx - # R2 40 - xorl %edx,%edi - xorl %ebx,%edi - leal 681279174(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl (%esi),%ebp - movl %ebx,%edi - # R2 41 - leal 3936430074(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 12(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - # R2 42 - xorl %ebx,%edi - xorl %edx,%edi - leal 3572445317(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 24(%esi),%ebp - movl %edx,%edi - # R2 43 - leal 76029189(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl 36(%esi),%ebp - addl %edi,%ebx - movl %ecx,%edi - roll $23,%ebx - addl %ecx,%ebx - # R2 44 - xorl %edx,%edi - xorl %ebx,%edi - leal 3654602809(%eax,%ebp,1),%eax - addl %edi,%eax - roll $4,%eax - movl 48(%esi),%ebp - movl %ebx,%edi - # R2 45 - leal 3873151461(%edx,%ebp,1),%edx - addl %ebx,%eax - xorl %ecx,%edi - xorl %eax,%edi - movl 60(%esi),%ebp - addl %edi,%edx - movl %eax,%edi - roll $11,%edx - addl %eax,%edx - # R2 46 - xorl %ebx,%edi - xorl %edx,%edi - leal 530742520(%ecx,%ebp,1),%ecx - addl %edi,%ecx - roll $16,%ecx - movl 8(%esi),%ebp - movl %edx,%edi - # R2 47 - leal 3299628645(%ebx,%ebp,1),%ebx - addl %edx,%ecx - xorl %eax,%edi - xorl %ecx,%edi - movl (%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $23,%ebx - addl %ecx,%ebx - - # R3 section - # R3 48 - xorl %edx,%edi - orl %ebx,%edi - leal 4096336452(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 28(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - # R3 49 - orl %eax,%edi - leal 1126891415(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 56(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - # R3 50 - orl %edx,%edi - leal 2878612391(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 20(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - # R3 51 - orl %ecx,%edi - leal 4237533241(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 48(%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $21,%ebx - xorl %edx,%edi - addl %ecx,%ebx - # R3 52 - orl %ebx,%edi - leal 1700485571(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 12(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - # R3 53 - orl %eax,%edi - leal 2399980690(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 40(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - # R3 54 - orl %edx,%edi - leal 4293915773(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 4(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - # R3 55 - orl %ecx,%edi - leal 2240044497(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 32(%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $21,%ebx - xorl %edx,%edi - addl %ecx,%ebx - # R3 56 - orl %ebx,%edi - leal 1873313359(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 60(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - # R3 57 - orl %eax,%edi - leal 4264355552(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 24(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - # R3 58 - orl %edx,%edi - leal 2734768916(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 52(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - # R3 59 - orl %ecx,%edi - leal 1309151649(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 16(%esi),%ebp - addl %edi,%ebx - movl $-1,%edi - roll $21,%ebx - xorl %edx,%edi - addl %ecx,%ebx - # R3 60 - orl %ebx,%edi - leal 4149444226(%eax,%ebp,1),%eax - xorl %ecx,%edi - movl 44(%esi),%ebp - addl %edi,%eax - movl $-1,%edi - roll $6,%eax - xorl %ecx,%edi - addl %ebx,%eax - # R3 61 - orl %eax,%edi - leal 3174756917(%edx,%ebp,1),%edx - xorl %ebx,%edi - movl 8(%esi),%ebp - addl %edi,%edx - movl $-1,%edi - roll $10,%edx - xorl %ebx,%edi - addl %eax,%edx - # R3 62 - orl %edx,%edi - leal 718787259(%ecx,%ebp,1),%ecx - xorl %eax,%edi - movl 36(%esi),%ebp - addl %edi,%ecx - movl $-1,%edi - roll $15,%ecx - xorl %eax,%edi - addl %edx,%ecx - # R3 63 - orl %ecx,%edi - leal 3951481745(%ebx,%ebp,1),%ebx - xorl %edx,%edi - movl 24(%esp),%ebp - addl %edi,%ebx - addl $64,%esi - roll $21,%ebx - movl (%ebp),%edi - addl %ecx,%ebx - addl %edi,%eax - movl 4(%ebp),%edi - addl %edi,%ebx - movl 8(%ebp),%edi - addl %edi,%ecx - movl 12(%ebp),%edi - addl %edi,%edx - movl %eax,(%ebp) - movl %ebx,4(%ebp) - movl (%esp),%edi - movl %ecx,8(%ebp) - movl %edx,12(%ebp) - cmpl %esi,%edi - jae L000start - popl %eax - popl %ebx - popl %ebp - popl %edi - popl %esi - ret -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha1-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha1-586.S deleted file mode 100644 index 89c5d168e5..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha1-586.S +++ /dev/null @@ -1,3805 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _sha1_block_data_order -.private_extern _sha1_block_data_order -.align 4 -_sha1_block_data_order: -L_sha1_block_data_order_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - call L000pic_point -L000pic_point: - popl %ebp - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L000pic_point(%ebp),%esi - leal LK_XX_XX-L000pic_point(%ebp),%ebp - movl (%esi),%eax - movl 4(%esi),%edx - testl $512,%edx - jz L001x86 - movl 8(%esi),%ecx - testl $16777216,%eax - jz L001x86 - andl $268435456,%edx - andl $1073741824,%eax - orl %edx,%eax - cmpl $1342177280,%eax - je Lavx_shortcut - jmp Lssse3_shortcut -.align 4,0x90 -L001x86: - movl 20(%esp),%ebp - movl 24(%esp),%esi - movl 28(%esp),%eax - subl $76,%esp - shll $6,%eax - addl %esi,%eax - movl %eax,104(%esp) - movl 16(%ebp),%edi - jmp L002loop -.align 4,0x90 -L002loop: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,(%esp) - movl %ebx,4(%esp) - movl %ecx,8(%esp) - movl %edx,12(%esp) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,16(%esp) - movl %ebx,20(%esp) - movl %ecx,24(%esp) - movl %edx,28(%esp) - movl 32(%esi),%eax - movl 36(%esi),%ebx - movl 40(%esi),%ecx - movl 44(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,40(%esp) - movl %edx,44(%esp) - movl 48(%esi),%eax - movl 52(%esi),%ebx - movl 56(%esi),%ecx - movl 60(%esi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - movl %eax,48(%esp) - movl %ebx,52(%esp) - movl %ecx,56(%esp) - movl %edx,60(%esp) - movl %esi,100(%esp) - movl (%ebp),%eax - movl 4(%ebp),%ebx - movl 8(%ebp),%ecx - movl 12(%ebp),%edx - # 00_15 0 - movl %ecx,%esi - movl %eax,%ebp - roll $5,%ebp - xorl %edx,%esi - addl %edi,%ebp - movl (%esp),%edi - andl %ebx,%esi - rorl $2,%ebx - xorl %edx,%esi - leal 1518500249(%ebp,%edi,1),%ebp - addl %esi,%ebp - # 00_15 1 - movl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - xorl %ecx,%edi - addl %edx,%ebp - movl 4(%esp),%edx - andl %eax,%edi - rorl $2,%eax - xorl %ecx,%edi - leal 1518500249(%ebp,%edx,1),%ebp - addl %edi,%ebp - # 00_15 2 - movl %eax,%edx - movl %ebp,%edi - roll $5,%ebp - xorl %ebx,%edx - addl %ecx,%ebp - movl 8(%esp),%ecx - andl %esi,%edx - rorl $2,%esi - xorl %ebx,%edx - leal 1518500249(%ebp,%ecx,1),%ebp - addl %edx,%ebp - # 00_15 3 - movl %esi,%ecx - movl %ebp,%edx - roll $5,%ebp - xorl %eax,%ecx - addl %ebx,%ebp - movl 12(%esp),%ebx - andl %edi,%ecx - rorl $2,%edi - xorl %eax,%ecx - leal 1518500249(%ebp,%ebx,1),%ebp - addl %ecx,%ebp - # 00_15 4 - movl %edi,%ebx - movl %ebp,%ecx - roll $5,%ebp - xorl %esi,%ebx - addl %eax,%ebp - movl 16(%esp),%eax - andl %edx,%ebx - rorl $2,%edx - xorl %esi,%ebx - leal 1518500249(%ebp,%eax,1),%ebp - addl %ebx,%ebp - # 00_15 5 - movl %edx,%eax - movl %ebp,%ebx - roll $5,%ebp - xorl %edi,%eax - addl %esi,%ebp - movl 20(%esp),%esi - andl %ecx,%eax - rorl $2,%ecx - xorl %edi,%eax - leal 1518500249(%ebp,%esi,1),%ebp - addl %eax,%ebp - # 00_15 6 - movl %ecx,%esi - movl %ebp,%eax - roll $5,%ebp - xorl %edx,%esi - addl %edi,%ebp - movl 24(%esp),%edi - andl %ebx,%esi - rorl $2,%ebx - xorl %edx,%esi - leal 1518500249(%ebp,%edi,1),%ebp - addl %esi,%ebp - # 00_15 7 - movl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - xorl %ecx,%edi - addl %edx,%ebp - movl 28(%esp),%edx - andl %eax,%edi - rorl $2,%eax - xorl %ecx,%edi - leal 1518500249(%ebp,%edx,1),%ebp - addl %edi,%ebp - # 00_15 8 - movl %eax,%edx - movl %ebp,%edi - roll $5,%ebp - xorl %ebx,%edx - addl %ecx,%ebp - movl 32(%esp),%ecx - andl %esi,%edx - rorl $2,%esi - xorl %ebx,%edx - leal 1518500249(%ebp,%ecx,1),%ebp - addl %edx,%ebp - # 00_15 9 - movl %esi,%ecx - movl %ebp,%edx - roll $5,%ebp - xorl %eax,%ecx - addl %ebx,%ebp - movl 36(%esp),%ebx - andl %edi,%ecx - rorl $2,%edi - xorl %eax,%ecx - leal 1518500249(%ebp,%ebx,1),%ebp - addl %ecx,%ebp - # 00_15 10 - movl %edi,%ebx - movl %ebp,%ecx - roll $5,%ebp - xorl %esi,%ebx - addl %eax,%ebp - movl 40(%esp),%eax - andl %edx,%ebx - rorl $2,%edx - xorl %esi,%ebx - leal 1518500249(%ebp,%eax,1),%ebp - addl %ebx,%ebp - # 00_15 11 - movl %edx,%eax - movl %ebp,%ebx - roll $5,%ebp - xorl %edi,%eax - addl %esi,%ebp - movl 44(%esp),%esi - andl %ecx,%eax - rorl $2,%ecx - xorl %edi,%eax - leal 1518500249(%ebp,%esi,1),%ebp - addl %eax,%ebp - # 00_15 12 - movl %ecx,%esi - movl %ebp,%eax - roll $5,%ebp - xorl %edx,%esi - addl %edi,%ebp - movl 48(%esp),%edi - andl %ebx,%esi - rorl $2,%ebx - xorl %edx,%esi - leal 1518500249(%ebp,%edi,1),%ebp - addl %esi,%ebp - # 00_15 13 - movl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - xorl %ecx,%edi - addl %edx,%ebp - movl 52(%esp),%edx - andl %eax,%edi - rorl $2,%eax - xorl %ecx,%edi - leal 1518500249(%ebp,%edx,1),%ebp - addl %edi,%ebp - # 00_15 14 - movl %eax,%edx - movl %ebp,%edi - roll $5,%ebp - xorl %ebx,%edx - addl %ecx,%ebp - movl 56(%esp),%ecx - andl %esi,%edx - rorl $2,%esi - xorl %ebx,%edx - leal 1518500249(%ebp,%ecx,1),%ebp - addl %edx,%ebp - # 00_15 15 - movl %esi,%ecx - movl %ebp,%edx - roll $5,%ebp - xorl %eax,%ecx - addl %ebx,%ebp - movl 60(%esp),%ebx - andl %edi,%ecx - rorl $2,%edi - xorl %eax,%ecx - leal 1518500249(%ebp,%ebx,1),%ebp - movl (%esp),%ebx - addl %ebp,%ecx - # 16_19 16 - movl %edi,%ebp - xorl 8(%esp),%ebx - xorl %esi,%ebp - xorl 32(%esp),%ebx - andl %edx,%ebp - xorl 52(%esp),%ebx - roll $1,%ebx - xorl %esi,%ebp - addl %ebp,%eax - movl %ecx,%ebp - rorl $2,%edx - movl %ebx,(%esp) - roll $5,%ebp - leal 1518500249(%ebx,%eax,1),%ebx - movl 4(%esp),%eax - addl %ebp,%ebx - # 16_19 17 - movl %edx,%ebp - xorl 12(%esp),%eax - xorl %edi,%ebp - xorl 36(%esp),%eax - andl %ecx,%ebp - xorl 56(%esp),%eax - roll $1,%eax - xorl %edi,%ebp - addl %ebp,%esi - movl %ebx,%ebp - rorl $2,%ecx - movl %eax,4(%esp) - roll $5,%ebp - leal 1518500249(%eax,%esi,1),%eax - movl 8(%esp),%esi - addl %ebp,%eax - # 16_19 18 - movl %ecx,%ebp - xorl 16(%esp),%esi - xorl %edx,%ebp - xorl 40(%esp),%esi - andl %ebx,%ebp - xorl 60(%esp),%esi - roll $1,%esi - xorl %edx,%ebp - addl %ebp,%edi - movl %eax,%ebp - rorl $2,%ebx - movl %esi,8(%esp) - roll $5,%ebp - leal 1518500249(%esi,%edi,1),%esi - movl 12(%esp),%edi - addl %ebp,%esi - # 16_19 19 - movl %ebx,%ebp - xorl 20(%esp),%edi - xorl %ecx,%ebp - xorl 44(%esp),%edi - andl %eax,%ebp - xorl (%esp),%edi - roll $1,%edi - xorl %ecx,%ebp - addl %ebp,%edx - movl %esi,%ebp - rorl $2,%eax - movl %edi,12(%esp) - roll $5,%ebp - leal 1518500249(%edi,%edx,1),%edi - movl 16(%esp),%edx - addl %ebp,%edi - # 20_39 20 - movl %esi,%ebp - xorl 24(%esp),%edx - xorl %eax,%ebp - xorl 48(%esp),%edx - xorl %ebx,%ebp - xorl 4(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,16(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 20(%esp),%ecx - addl %ebp,%edx - # 20_39 21 - movl %edi,%ebp - xorl 28(%esp),%ecx - xorl %esi,%ebp - xorl 52(%esp),%ecx - xorl %eax,%ebp - xorl 8(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,20(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 24(%esp),%ebx - addl %ebp,%ecx - # 20_39 22 - movl %edx,%ebp - xorl 32(%esp),%ebx - xorl %edi,%ebp - xorl 56(%esp),%ebx - xorl %esi,%ebp - xorl 12(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,24(%esp) - leal 1859775393(%ebx,%eax,1),%ebx - movl 28(%esp),%eax - addl %ebp,%ebx - # 20_39 23 - movl %ecx,%ebp - xorl 36(%esp),%eax - xorl %edx,%ebp - xorl 60(%esp),%eax - xorl %edi,%ebp - xorl 16(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,28(%esp) - leal 1859775393(%eax,%esi,1),%eax - movl 32(%esp),%esi - addl %ebp,%eax - # 20_39 24 - movl %ebx,%ebp - xorl 40(%esp),%esi - xorl %ecx,%ebp - xorl (%esp),%esi - xorl %edx,%ebp - xorl 20(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,32(%esp) - leal 1859775393(%esi,%edi,1),%esi - movl 36(%esp),%edi - addl %ebp,%esi - # 20_39 25 - movl %eax,%ebp - xorl 44(%esp),%edi - xorl %ebx,%ebp - xorl 4(%esp),%edi - xorl %ecx,%ebp - xorl 24(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,36(%esp) - leal 1859775393(%edi,%edx,1),%edi - movl 40(%esp),%edx - addl %ebp,%edi - # 20_39 26 - movl %esi,%ebp - xorl 48(%esp),%edx - xorl %eax,%ebp - xorl 8(%esp),%edx - xorl %ebx,%ebp - xorl 28(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,40(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 44(%esp),%ecx - addl %ebp,%edx - # 20_39 27 - movl %edi,%ebp - xorl 52(%esp),%ecx - xorl %esi,%ebp - xorl 12(%esp),%ecx - xorl %eax,%ebp - xorl 32(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,44(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 48(%esp),%ebx - addl %ebp,%ecx - # 20_39 28 - movl %edx,%ebp - xorl 56(%esp),%ebx - xorl %edi,%ebp - xorl 16(%esp),%ebx - xorl %esi,%ebp - xorl 36(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,48(%esp) - leal 1859775393(%ebx,%eax,1),%ebx - movl 52(%esp),%eax - addl %ebp,%ebx - # 20_39 29 - movl %ecx,%ebp - xorl 60(%esp),%eax - xorl %edx,%ebp - xorl 20(%esp),%eax - xorl %edi,%ebp - xorl 40(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,52(%esp) - leal 1859775393(%eax,%esi,1),%eax - movl 56(%esp),%esi - addl %ebp,%eax - # 20_39 30 - movl %ebx,%ebp - xorl (%esp),%esi - xorl %ecx,%ebp - xorl 24(%esp),%esi - xorl %edx,%ebp - xorl 44(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,56(%esp) - leal 1859775393(%esi,%edi,1),%esi - movl 60(%esp),%edi - addl %ebp,%esi - # 20_39 31 - movl %eax,%ebp - xorl 4(%esp),%edi - xorl %ebx,%ebp - xorl 28(%esp),%edi - xorl %ecx,%ebp - xorl 48(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,60(%esp) - leal 1859775393(%edi,%edx,1),%edi - movl (%esp),%edx - addl %ebp,%edi - # 20_39 32 - movl %esi,%ebp - xorl 8(%esp),%edx - xorl %eax,%ebp - xorl 32(%esp),%edx - xorl %ebx,%ebp - xorl 52(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 4(%esp),%ecx - addl %ebp,%edx - # 20_39 33 - movl %edi,%ebp - xorl 12(%esp),%ecx - xorl %esi,%ebp - xorl 36(%esp),%ecx - xorl %eax,%ebp - xorl 56(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,4(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 8(%esp),%ebx - addl %ebp,%ecx - # 20_39 34 - movl %edx,%ebp - xorl 16(%esp),%ebx - xorl %edi,%ebp - xorl 40(%esp),%ebx - xorl %esi,%ebp - xorl 60(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,8(%esp) - leal 1859775393(%ebx,%eax,1),%ebx - movl 12(%esp),%eax - addl %ebp,%ebx - # 20_39 35 - movl %ecx,%ebp - xorl 20(%esp),%eax - xorl %edx,%ebp - xorl 44(%esp),%eax - xorl %edi,%ebp - xorl (%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,12(%esp) - leal 1859775393(%eax,%esi,1),%eax - movl 16(%esp),%esi - addl %ebp,%eax - # 20_39 36 - movl %ebx,%ebp - xorl 24(%esp),%esi - xorl %ecx,%ebp - xorl 48(%esp),%esi - xorl %edx,%ebp - xorl 4(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,16(%esp) - leal 1859775393(%esi,%edi,1),%esi - movl 20(%esp),%edi - addl %ebp,%esi - # 20_39 37 - movl %eax,%ebp - xorl 28(%esp),%edi - xorl %ebx,%ebp - xorl 52(%esp),%edi - xorl %ecx,%ebp - xorl 8(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,20(%esp) - leal 1859775393(%edi,%edx,1),%edi - movl 24(%esp),%edx - addl %ebp,%edi - # 20_39 38 - movl %esi,%ebp - xorl 32(%esp),%edx - xorl %eax,%ebp - xorl 56(%esp),%edx - xorl %ebx,%ebp - xorl 12(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,24(%esp) - leal 1859775393(%edx,%ecx,1),%edx - movl 28(%esp),%ecx - addl %ebp,%edx - # 20_39 39 - movl %edi,%ebp - xorl 36(%esp),%ecx - xorl %esi,%ebp - xorl 60(%esp),%ecx - xorl %eax,%ebp - xorl 16(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,28(%esp) - leal 1859775393(%ecx,%ebx,1),%ecx - movl 32(%esp),%ebx - addl %ebp,%ecx - # 40_59 40 - movl %edi,%ebp - xorl 40(%esp),%ebx - xorl %esi,%ebp - xorl (%esp),%ebx - andl %edx,%ebp - xorl 20(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,32(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 36(%esp),%eax - addl %ebp,%ebx - # 40_59 41 - movl %edx,%ebp - xorl 44(%esp),%eax - xorl %edi,%ebp - xorl 4(%esp),%eax - andl %ecx,%ebp - xorl 24(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,36(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl 40(%esp),%esi - addl %ebp,%eax - # 40_59 42 - movl %ecx,%ebp - xorl 48(%esp),%esi - xorl %edx,%ebp - xorl 8(%esp),%esi - andl %ebx,%ebp - xorl 28(%esp),%esi - roll $1,%esi - addl %edi,%ebp - rorl $2,%ebx - movl %eax,%edi - roll $5,%edi - movl %esi,40(%esp) - leal 2400959708(%esi,%ebp,1),%esi - movl %ecx,%ebp - addl %edi,%esi - andl %edx,%ebp - movl 44(%esp),%edi - addl %ebp,%esi - # 40_59 43 - movl %ebx,%ebp - xorl 52(%esp),%edi - xorl %ecx,%ebp - xorl 12(%esp),%edi - andl %eax,%ebp - xorl 32(%esp),%edi - roll $1,%edi - addl %edx,%ebp - rorl $2,%eax - movl %esi,%edx - roll $5,%edx - movl %edi,44(%esp) - leal 2400959708(%edi,%ebp,1),%edi - movl %ebx,%ebp - addl %edx,%edi - andl %ecx,%ebp - movl 48(%esp),%edx - addl %ebp,%edi - # 40_59 44 - movl %eax,%ebp - xorl 56(%esp),%edx - xorl %ebx,%ebp - xorl 16(%esp),%edx - andl %esi,%ebp - xorl 36(%esp),%edx - roll $1,%edx - addl %ecx,%ebp - rorl $2,%esi - movl %edi,%ecx - roll $5,%ecx - movl %edx,48(%esp) - leal 2400959708(%edx,%ebp,1),%edx - movl %eax,%ebp - addl %ecx,%edx - andl %ebx,%ebp - movl 52(%esp),%ecx - addl %ebp,%edx - # 40_59 45 - movl %esi,%ebp - xorl 60(%esp),%ecx - xorl %eax,%ebp - xorl 20(%esp),%ecx - andl %edi,%ebp - xorl 40(%esp),%ecx - roll $1,%ecx - addl %ebx,%ebp - rorl $2,%edi - movl %edx,%ebx - roll $5,%ebx - movl %ecx,52(%esp) - leal 2400959708(%ecx,%ebp,1),%ecx - movl %esi,%ebp - addl %ebx,%ecx - andl %eax,%ebp - movl 56(%esp),%ebx - addl %ebp,%ecx - # 40_59 46 - movl %edi,%ebp - xorl (%esp),%ebx - xorl %esi,%ebp - xorl 24(%esp),%ebx - andl %edx,%ebp - xorl 44(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,56(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 60(%esp),%eax - addl %ebp,%ebx - # 40_59 47 - movl %edx,%ebp - xorl 4(%esp),%eax - xorl %edi,%ebp - xorl 28(%esp),%eax - andl %ecx,%ebp - xorl 48(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,60(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl (%esp),%esi - addl %ebp,%eax - # 40_59 48 - movl %ecx,%ebp - xorl 8(%esp),%esi - xorl %edx,%ebp - xorl 32(%esp),%esi - andl %ebx,%ebp - xorl 52(%esp),%esi - roll $1,%esi - addl %edi,%ebp - rorl $2,%ebx - movl %eax,%edi - roll $5,%edi - movl %esi,(%esp) - leal 2400959708(%esi,%ebp,1),%esi - movl %ecx,%ebp - addl %edi,%esi - andl %edx,%ebp - movl 4(%esp),%edi - addl %ebp,%esi - # 40_59 49 - movl %ebx,%ebp - xorl 12(%esp),%edi - xorl %ecx,%ebp - xorl 36(%esp),%edi - andl %eax,%ebp - xorl 56(%esp),%edi - roll $1,%edi - addl %edx,%ebp - rorl $2,%eax - movl %esi,%edx - roll $5,%edx - movl %edi,4(%esp) - leal 2400959708(%edi,%ebp,1),%edi - movl %ebx,%ebp - addl %edx,%edi - andl %ecx,%ebp - movl 8(%esp),%edx - addl %ebp,%edi - # 40_59 50 - movl %eax,%ebp - xorl 16(%esp),%edx - xorl %ebx,%ebp - xorl 40(%esp),%edx - andl %esi,%ebp - xorl 60(%esp),%edx - roll $1,%edx - addl %ecx,%ebp - rorl $2,%esi - movl %edi,%ecx - roll $5,%ecx - movl %edx,8(%esp) - leal 2400959708(%edx,%ebp,1),%edx - movl %eax,%ebp - addl %ecx,%edx - andl %ebx,%ebp - movl 12(%esp),%ecx - addl %ebp,%edx - # 40_59 51 - movl %esi,%ebp - xorl 20(%esp),%ecx - xorl %eax,%ebp - xorl 44(%esp),%ecx - andl %edi,%ebp - xorl (%esp),%ecx - roll $1,%ecx - addl %ebx,%ebp - rorl $2,%edi - movl %edx,%ebx - roll $5,%ebx - movl %ecx,12(%esp) - leal 2400959708(%ecx,%ebp,1),%ecx - movl %esi,%ebp - addl %ebx,%ecx - andl %eax,%ebp - movl 16(%esp),%ebx - addl %ebp,%ecx - # 40_59 52 - movl %edi,%ebp - xorl 24(%esp),%ebx - xorl %esi,%ebp - xorl 48(%esp),%ebx - andl %edx,%ebp - xorl 4(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,16(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 20(%esp),%eax - addl %ebp,%ebx - # 40_59 53 - movl %edx,%ebp - xorl 28(%esp),%eax - xorl %edi,%ebp - xorl 52(%esp),%eax - andl %ecx,%ebp - xorl 8(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,20(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl 24(%esp),%esi - addl %ebp,%eax - # 40_59 54 - movl %ecx,%ebp - xorl 32(%esp),%esi - xorl %edx,%ebp - xorl 56(%esp),%esi - andl %ebx,%ebp - xorl 12(%esp),%esi - roll $1,%esi - addl %edi,%ebp - rorl $2,%ebx - movl %eax,%edi - roll $5,%edi - movl %esi,24(%esp) - leal 2400959708(%esi,%ebp,1),%esi - movl %ecx,%ebp - addl %edi,%esi - andl %edx,%ebp - movl 28(%esp),%edi - addl %ebp,%esi - # 40_59 55 - movl %ebx,%ebp - xorl 36(%esp),%edi - xorl %ecx,%ebp - xorl 60(%esp),%edi - andl %eax,%ebp - xorl 16(%esp),%edi - roll $1,%edi - addl %edx,%ebp - rorl $2,%eax - movl %esi,%edx - roll $5,%edx - movl %edi,28(%esp) - leal 2400959708(%edi,%ebp,1),%edi - movl %ebx,%ebp - addl %edx,%edi - andl %ecx,%ebp - movl 32(%esp),%edx - addl %ebp,%edi - # 40_59 56 - movl %eax,%ebp - xorl 40(%esp),%edx - xorl %ebx,%ebp - xorl (%esp),%edx - andl %esi,%ebp - xorl 20(%esp),%edx - roll $1,%edx - addl %ecx,%ebp - rorl $2,%esi - movl %edi,%ecx - roll $5,%ecx - movl %edx,32(%esp) - leal 2400959708(%edx,%ebp,1),%edx - movl %eax,%ebp - addl %ecx,%edx - andl %ebx,%ebp - movl 36(%esp),%ecx - addl %ebp,%edx - # 40_59 57 - movl %esi,%ebp - xorl 44(%esp),%ecx - xorl %eax,%ebp - xorl 4(%esp),%ecx - andl %edi,%ebp - xorl 24(%esp),%ecx - roll $1,%ecx - addl %ebx,%ebp - rorl $2,%edi - movl %edx,%ebx - roll $5,%ebx - movl %ecx,36(%esp) - leal 2400959708(%ecx,%ebp,1),%ecx - movl %esi,%ebp - addl %ebx,%ecx - andl %eax,%ebp - movl 40(%esp),%ebx - addl %ebp,%ecx - # 40_59 58 - movl %edi,%ebp - xorl 48(%esp),%ebx - xorl %esi,%ebp - xorl 8(%esp),%ebx - andl %edx,%ebp - xorl 28(%esp),%ebx - roll $1,%ebx - addl %eax,%ebp - rorl $2,%edx - movl %ecx,%eax - roll $5,%eax - movl %ebx,40(%esp) - leal 2400959708(%ebx,%ebp,1),%ebx - movl %edi,%ebp - addl %eax,%ebx - andl %esi,%ebp - movl 44(%esp),%eax - addl %ebp,%ebx - # 40_59 59 - movl %edx,%ebp - xorl 52(%esp),%eax - xorl %edi,%ebp - xorl 12(%esp),%eax - andl %ecx,%ebp - xorl 32(%esp),%eax - roll $1,%eax - addl %esi,%ebp - rorl $2,%ecx - movl %ebx,%esi - roll $5,%esi - movl %eax,44(%esp) - leal 2400959708(%eax,%ebp,1),%eax - movl %edx,%ebp - addl %esi,%eax - andl %edi,%ebp - movl 48(%esp),%esi - addl %ebp,%eax - # 20_39 60 - movl %ebx,%ebp - xorl 56(%esp),%esi - xorl %ecx,%ebp - xorl 16(%esp),%esi - xorl %edx,%ebp - xorl 36(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,48(%esp) - leal 3395469782(%esi,%edi,1),%esi - movl 52(%esp),%edi - addl %ebp,%esi - # 20_39 61 - movl %eax,%ebp - xorl 60(%esp),%edi - xorl %ebx,%ebp - xorl 20(%esp),%edi - xorl %ecx,%ebp - xorl 40(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,52(%esp) - leal 3395469782(%edi,%edx,1),%edi - movl 56(%esp),%edx - addl %ebp,%edi - # 20_39 62 - movl %esi,%ebp - xorl (%esp),%edx - xorl %eax,%ebp - xorl 24(%esp),%edx - xorl %ebx,%ebp - xorl 44(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,56(%esp) - leal 3395469782(%edx,%ecx,1),%edx - movl 60(%esp),%ecx - addl %ebp,%edx - # 20_39 63 - movl %edi,%ebp - xorl 4(%esp),%ecx - xorl %esi,%ebp - xorl 28(%esp),%ecx - xorl %eax,%ebp - xorl 48(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,60(%esp) - leal 3395469782(%ecx,%ebx,1),%ecx - movl (%esp),%ebx - addl %ebp,%ecx - # 20_39 64 - movl %edx,%ebp - xorl 8(%esp),%ebx - xorl %edi,%ebp - xorl 32(%esp),%ebx - xorl %esi,%ebp - xorl 52(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,(%esp) - leal 3395469782(%ebx,%eax,1),%ebx - movl 4(%esp),%eax - addl %ebp,%ebx - # 20_39 65 - movl %ecx,%ebp - xorl 12(%esp),%eax - xorl %edx,%ebp - xorl 36(%esp),%eax - xorl %edi,%ebp - xorl 56(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,4(%esp) - leal 3395469782(%eax,%esi,1),%eax - movl 8(%esp),%esi - addl %ebp,%eax - # 20_39 66 - movl %ebx,%ebp - xorl 16(%esp),%esi - xorl %ecx,%ebp - xorl 40(%esp),%esi - xorl %edx,%ebp - xorl 60(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,8(%esp) - leal 3395469782(%esi,%edi,1),%esi - movl 12(%esp),%edi - addl %ebp,%esi - # 20_39 67 - movl %eax,%ebp - xorl 20(%esp),%edi - xorl %ebx,%ebp - xorl 44(%esp),%edi - xorl %ecx,%ebp - xorl (%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,12(%esp) - leal 3395469782(%edi,%edx,1),%edi - movl 16(%esp),%edx - addl %ebp,%edi - # 20_39 68 - movl %esi,%ebp - xorl 24(%esp),%edx - xorl %eax,%ebp - xorl 48(%esp),%edx - xorl %ebx,%ebp - xorl 4(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,16(%esp) - leal 3395469782(%edx,%ecx,1),%edx - movl 20(%esp),%ecx - addl %ebp,%edx - # 20_39 69 - movl %edi,%ebp - xorl 28(%esp),%ecx - xorl %esi,%ebp - xorl 52(%esp),%ecx - xorl %eax,%ebp - xorl 8(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,20(%esp) - leal 3395469782(%ecx,%ebx,1),%ecx - movl 24(%esp),%ebx - addl %ebp,%ecx - # 20_39 70 - movl %edx,%ebp - xorl 32(%esp),%ebx - xorl %edi,%ebp - xorl 56(%esp),%ebx - xorl %esi,%ebp - xorl 12(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,24(%esp) - leal 3395469782(%ebx,%eax,1),%ebx - movl 28(%esp),%eax - addl %ebp,%ebx - # 20_39 71 - movl %ecx,%ebp - xorl 36(%esp),%eax - xorl %edx,%ebp - xorl 60(%esp),%eax - xorl %edi,%ebp - xorl 16(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - movl %eax,28(%esp) - leal 3395469782(%eax,%esi,1),%eax - movl 32(%esp),%esi - addl %ebp,%eax - # 20_39 72 - movl %ebx,%ebp - xorl 40(%esp),%esi - xorl %ecx,%ebp - xorl (%esp),%esi - xorl %edx,%ebp - xorl 20(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - movl %esi,32(%esp) - leal 3395469782(%esi,%edi,1),%esi - movl 36(%esp),%edi - addl %ebp,%esi - # 20_39 73 - movl %eax,%ebp - xorl 44(%esp),%edi - xorl %ebx,%ebp - xorl 4(%esp),%edi - xorl %ecx,%ebp - xorl 24(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - movl %edi,36(%esp) - leal 3395469782(%edi,%edx,1),%edi - movl 40(%esp),%edx - addl %ebp,%edi - # 20_39 74 - movl %esi,%ebp - xorl 48(%esp),%edx - xorl %eax,%ebp - xorl 8(%esp),%edx - xorl %ebx,%ebp - xorl 28(%esp),%edx - roll $1,%edx - addl %ebp,%ecx - rorl $2,%esi - movl %edi,%ebp - roll $5,%ebp - movl %edx,40(%esp) - leal 3395469782(%edx,%ecx,1),%edx - movl 44(%esp),%ecx - addl %ebp,%edx - # 20_39 75 - movl %edi,%ebp - xorl 52(%esp),%ecx - xorl %esi,%ebp - xorl 12(%esp),%ecx - xorl %eax,%ebp - xorl 32(%esp),%ecx - roll $1,%ecx - addl %ebp,%ebx - rorl $2,%edi - movl %edx,%ebp - roll $5,%ebp - movl %ecx,44(%esp) - leal 3395469782(%ecx,%ebx,1),%ecx - movl 48(%esp),%ebx - addl %ebp,%ecx - # 20_39 76 - movl %edx,%ebp - xorl 56(%esp),%ebx - xorl %edi,%ebp - xorl 16(%esp),%ebx - xorl %esi,%ebp - xorl 36(%esp),%ebx - roll $1,%ebx - addl %ebp,%eax - rorl $2,%edx - movl %ecx,%ebp - roll $5,%ebp - movl %ebx,48(%esp) - leal 3395469782(%ebx,%eax,1),%ebx - movl 52(%esp),%eax - addl %ebp,%ebx - # 20_39 77 - movl %ecx,%ebp - xorl 60(%esp),%eax - xorl %edx,%ebp - xorl 20(%esp),%eax - xorl %edi,%ebp - xorl 40(%esp),%eax - roll $1,%eax - addl %ebp,%esi - rorl $2,%ecx - movl %ebx,%ebp - roll $5,%ebp - leal 3395469782(%eax,%esi,1),%eax - movl 56(%esp),%esi - addl %ebp,%eax - # 20_39 78 - movl %ebx,%ebp - xorl (%esp),%esi - xorl %ecx,%ebp - xorl 24(%esp),%esi - xorl %edx,%ebp - xorl 44(%esp),%esi - roll $1,%esi - addl %ebp,%edi - rorl $2,%ebx - movl %eax,%ebp - roll $5,%ebp - leal 3395469782(%esi,%edi,1),%esi - movl 60(%esp),%edi - addl %ebp,%esi - # 20_39 79 - movl %eax,%ebp - xorl 4(%esp),%edi - xorl %ebx,%ebp - xorl 28(%esp),%edi - xorl %ecx,%ebp - xorl 48(%esp),%edi - roll $1,%edi - addl %ebp,%edx - rorl $2,%eax - movl %esi,%ebp - roll $5,%ebp - leal 3395469782(%edi,%edx,1),%edi - addl %ebp,%edi - movl 96(%esp),%ebp - movl 100(%esp),%edx - addl (%ebp),%edi - addl 4(%ebp),%esi - addl 8(%ebp),%eax - addl 12(%ebp),%ebx - addl 16(%ebp),%ecx - movl %edi,(%ebp) - addl $64,%edx - movl %esi,4(%ebp) - cmpl 104(%esp),%edx - movl %eax,8(%ebp) - movl %ecx,%edi - movl %ebx,12(%ebp) - movl %edx,%esi - movl %ecx,16(%ebp) - jb L002loop - addl $76,%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.private_extern __sha1_block_data_order_ssse3 -.align 4 -__sha1_block_data_order_ssse3: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - call L003pic_point -L003pic_point: - popl %ebp - leal LK_XX_XX-L003pic_point(%ebp),%ebp -Lssse3_shortcut: - movdqa (%ebp),%xmm7 - movdqa 16(%ebp),%xmm0 - movdqa 32(%ebp),%xmm1 - movdqa 48(%ebp),%xmm2 - movdqa 64(%ebp),%xmm6 - movl 20(%esp),%edi - movl 24(%esp),%ebp - movl 28(%esp),%edx - movl %esp,%esi - subl $208,%esp - andl $-64,%esp - movdqa %xmm0,112(%esp) - movdqa %xmm1,128(%esp) - movdqa %xmm2,144(%esp) - shll $6,%edx - movdqa %xmm7,160(%esp) - addl %ebp,%edx - movdqa %xmm6,176(%esp) - addl $64,%ebp - movl %edi,192(%esp) - movl %ebp,196(%esp) - movl %edx,200(%esp) - movl %esi,204(%esp) - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl 16(%edi),%edi - movl %ebx,%esi - movdqu -64(%ebp),%xmm0 - movdqu -48(%ebp),%xmm1 - movdqu -32(%ebp),%xmm2 - movdqu -16(%ebp),%xmm3 -.byte 102,15,56,0,198 -.byte 102,15,56,0,206 -.byte 102,15,56,0,214 - movdqa %xmm7,96(%esp) -.byte 102,15,56,0,222 - paddd %xmm7,%xmm0 - paddd %xmm7,%xmm1 - paddd %xmm7,%xmm2 - movdqa %xmm0,(%esp) - psubd %xmm7,%xmm0 - movdqa %xmm1,16(%esp) - psubd %xmm7,%xmm1 - movdqa %xmm2,32(%esp) - movl %ecx,%ebp - psubd %xmm7,%xmm2 - xorl %edx,%ebp - pshufd $238,%xmm0,%xmm4 - andl %ebp,%esi - jmp L004loop -.align 4,0x90 -L004loop: - rorl $2,%ebx - xorl %edx,%esi - movl %eax,%ebp - punpcklqdq %xmm1,%xmm4 - movdqa %xmm3,%xmm6 - addl (%esp),%edi - xorl %ecx,%ebx - paddd %xmm3,%xmm7 - movdqa %xmm0,64(%esp) - roll $5,%eax - addl %esi,%edi - psrldq $4,%xmm6 - andl %ebx,%ebp - xorl %ecx,%ebx - pxor %xmm0,%xmm4 - addl %eax,%edi - rorl $7,%eax - pxor %xmm2,%xmm6 - xorl %ecx,%ebp - movl %edi,%esi - addl 4(%esp),%edx - pxor %xmm6,%xmm4 - xorl %ebx,%eax - roll $5,%edi - movdqa %xmm7,48(%esp) - addl %ebp,%edx - andl %eax,%esi - movdqa %xmm4,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - rorl $7,%edi - movdqa %xmm4,%xmm6 - xorl %ebx,%esi - pslldq $12,%xmm0 - paddd %xmm4,%xmm4 - movl %edx,%ebp - addl 8(%esp),%ecx - psrld $31,%xmm6 - xorl %eax,%edi - roll $5,%edx - movdqa %xmm0,%xmm7 - addl %esi,%ecx - andl %edi,%ebp - xorl %eax,%edi - psrld $30,%xmm0 - addl %edx,%ecx - rorl $7,%edx - por %xmm6,%xmm4 - xorl %eax,%ebp - movl %ecx,%esi - addl 12(%esp),%ebx - pslld $2,%xmm7 - xorl %edi,%edx - roll $5,%ecx - pxor %xmm0,%xmm4 - movdqa 96(%esp),%xmm0 - addl %ebp,%ebx - andl %edx,%esi - pxor %xmm7,%xmm4 - pshufd $238,%xmm1,%xmm5 - xorl %edi,%edx - addl %ecx,%ebx - rorl $7,%ecx - xorl %edi,%esi - movl %ebx,%ebp - punpcklqdq %xmm2,%xmm5 - movdqa %xmm4,%xmm7 - addl 16(%esp),%eax - xorl %edx,%ecx - paddd %xmm4,%xmm0 - movdqa %xmm1,80(%esp) - roll $5,%ebx - addl %esi,%eax - psrldq $4,%xmm7 - andl %ecx,%ebp - xorl %edx,%ecx - pxor %xmm1,%xmm5 - addl %ebx,%eax - rorl $7,%ebx - pxor %xmm3,%xmm7 - xorl %edx,%ebp - movl %eax,%esi - addl 20(%esp),%edi - pxor %xmm7,%xmm5 - xorl %ecx,%ebx - roll $5,%eax - movdqa %xmm0,(%esp) - addl %ebp,%edi - andl %ebx,%esi - movdqa %xmm5,%xmm1 - xorl %ecx,%ebx - addl %eax,%edi - rorl $7,%eax - movdqa %xmm5,%xmm7 - xorl %ecx,%esi - pslldq $12,%xmm1 - paddd %xmm5,%xmm5 - movl %edi,%ebp - addl 24(%esp),%edx - psrld $31,%xmm7 - xorl %ebx,%eax - roll $5,%edi - movdqa %xmm1,%xmm0 - addl %esi,%edx - andl %eax,%ebp - xorl %ebx,%eax - psrld $30,%xmm1 - addl %edi,%edx - rorl $7,%edi - por %xmm7,%xmm5 - xorl %ebx,%ebp - movl %edx,%esi - addl 28(%esp),%ecx - pslld $2,%xmm0 - xorl %eax,%edi - roll $5,%edx - pxor %xmm1,%xmm5 - movdqa 112(%esp),%xmm1 - addl %ebp,%ecx - andl %edi,%esi - pxor %xmm0,%xmm5 - pshufd $238,%xmm2,%xmm6 - xorl %eax,%edi - addl %edx,%ecx - rorl $7,%edx - xorl %eax,%esi - movl %ecx,%ebp - punpcklqdq %xmm3,%xmm6 - movdqa %xmm5,%xmm0 - addl 32(%esp),%ebx - xorl %edi,%edx - paddd %xmm5,%xmm1 - movdqa %xmm2,96(%esp) - roll $5,%ecx - addl %esi,%ebx - psrldq $4,%xmm0 - andl %edx,%ebp - xorl %edi,%edx - pxor %xmm2,%xmm6 - addl %ecx,%ebx - rorl $7,%ecx - pxor %xmm4,%xmm0 - xorl %edi,%ebp - movl %ebx,%esi - addl 36(%esp),%eax - pxor %xmm0,%xmm6 - xorl %edx,%ecx - roll $5,%ebx - movdqa %xmm1,16(%esp) - addl %ebp,%eax - andl %ecx,%esi - movdqa %xmm6,%xmm2 - xorl %edx,%ecx - addl %ebx,%eax - rorl $7,%ebx - movdqa %xmm6,%xmm0 - xorl %edx,%esi - pslldq $12,%xmm2 - paddd %xmm6,%xmm6 - movl %eax,%ebp - addl 40(%esp),%edi - psrld $31,%xmm0 - xorl %ecx,%ebx - roll $5,%eax - movdqa %xmm2,%xmm1 - addl %esi,%edi - andl %ebx,%ebp - xorl %ecx,%ebx - psrld $30,%xmm2 - addl %eax,%edi - rorl $7,%eax - por %xmm0,%xmm6 - xorl %ecx,%ebp - movdqa 64(%esp),%xmm0 - movl %edi,%esi - addl 44(%esp),%edx - pslld $2,%xmm1 - xorl %ebx,%eax - roll $5,%edi - pxor %xmm2,%xmm6 - movdqa 112(%esp),%xmm2 - addl %ebp,%edx - andl %eax,%esi - pxor %xmm1,%xmm6 - pshufd $238,%xmm3,%xmm7 - xorl %ebx,%eax - addl %edi,%edx - rorl $7,%edi - xorl %ebx,%esi - movl %edx,%ebp - punpcklqdq %xmm4,%xmm7 - movdqa %xmm6,%xmm1 - addl 48(%esp),%ecx - xorl %eax,%edi - paddd %xmm6,%xmm2 - movdqa %xmm3,64(%esp) - roll $5,%edx - addl %esi,%ecx - psrldq $4,%xmm1 - andl %edi,%ebp - xorl %eax,%edi - pxor %xmm3,%xmm7 - addl %edx,%ecx - rorl $7,%edx - pxor %xmm5,%xmm1 - xorl %eax,%ebp - movl %ecx,%esi - addl 52(%esp),%ebx - pxor %xmm1,%xmm7 - xorl %edi,%edx - roll $5,%ecx - movdqa %xmm2,32(%esp) - addl %ebp,%ebx - andl %edx,%esi - movdqa %xmm7,%xmm3 - xorl %edi,%edx - addl %ecx,%ebx - rorl $7,%ecx - movdqa %xmm7,%xmm1 - xorl %edi,%esi - pslldq $12,%xmm3 - paddd %xmm7,%xmm7 - movl %ebx,%ebp - addl 56(%esp),%eax - psrld $31,%xmm1 - xorl %edx,%ecx - roll $5,%ebx - movdqa %xmm3,%xmm2 - addl %esi,%eax - andl %ecx,%ebp - xorl %edx,%ecx - psrld $30,%xmm3 - addl %ebx,%eax - rorl $7,%ebx - por %xmm1,%xmm7 - xorl %edx,%ebp - movdqa 80(%esp),%xmm1 - movl %eax,%esi - addl 60(%esp),%edi - pslld $2,%xmm2 - xorl %ecx,%ebx - roll $5,%eax - pxor %xmm3,%xmm7 - movdqa 112(%esp),%xmm3 - addl %ebp,%edi - andl %ebx,%esi - pxor %xmm2,%xmm7 - pshufd $238,%xmm6,%xmm2 - xorl %ecx,%ebx - addl %eax,%edi - rorl $7,%eax - pxor %xmm4,%xmm0 - punpcklqdq %xmm7,%xmm2 - xorl %ecx,%esi - movl %edi,%ebp - addl (%esp),%edx - pxor %xmm1,%xmm0 - movdqa %xmm4,80(%esp) - xorl %ebx,%eax - roll $5,%edi - movdqa %xmm3,%xmm4 - addl %esi,%edx - paddd %xmm7,%xmm3 - andl %eax,%ebp - pxor %xmm2,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - rorl $7,%edi - xorl %ebx,%ebp - movdqa %xmm0,%xmm2 - movdqa %xmm3,48(%esp) - movl %edx,%esi - addl 4(%esp),%ecx - xorl %eax,%edi - roll $5,%edx - pslld $2,%xmm0 - addl %ebp,%ecx - andl %edi,%esi - psrld $30,%xmm2 - xorl %eax,%edi - addl %edx,%ecx - rorl $7,%edx - xorl %eax,%esi - movl %ecx,%ebp - addl 8(%esp),%ebx - xorl %edi,%edx - roll $5,%ecx - por %xmm2,%xmm0 - addl %esi,%ebx - andl %edx,%ebp - movdqa 96(%esp),%xmm2 - xorl %edi,%edx - addl %ecx,%ebx - addl 12(%esp),%eax - xorl %edi,%ebp - movl %ebx,%esi - pshufd $238,%xmm7,%xmm3 - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - addl 16(%esp),%edi - pxor %xmm5,%xmm1 - punpcklqdq %xmm0,%xmm3 - xorl %ecx,%esi - movl %eax,%ebp - roll $5,%eax - pxor %xmm2,%xmm1 - movdqa %xmm5,96(%esp) - addl %esi,%edi - xorl %ecx,%ebp - movdqa %xmm4,%xmm5 - rorl $7,%ebx - paddd %xmm0,%xmm4 - addl %eax,%edi - pxor %xmm3,%xmm1 - addl 20(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - movdqa %xmm1,%xmm3 - movdqa %xmm4,(%esp) - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - pslld $2,%xmm1 - addl 24(%esp),%ecx - xorl %eax,%esi - psrld $30,%xmm3 - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi - addl %edx,%ecx - por %xmm3,%xmm1 - addl 28(%esp),%ebx - xorl %edi,%ebp - movdqa 64(%esp),%xmm3 - movl %ecx,%esi - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - pshufd $238,%xmm0,%xmm4 - addl %ecx,%ebx - addl 32(%esp),%eax - pxor %xmm6,%xmm2 - punpcklqdq %xmm1,%xmm4 - xorl %edx,%esi - movl %ebx,%ebp - roll $5,%ebx - pxor %xmm3,%xmm2 - movdqa %xmm6,64(%esp) - addl %esi,%eax - xorl %edx,%ebp - movdqa 128(%esp),%xmm6 - rorl $7,%ecx - paddd %xmm1,%xmm5 - addl %ebx,%eax - pxor %xmm4,%xmm2 - addl 36(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - roll $5,%eax - movdqa %xmm2,%xmm4 - movdqa %xmm5,16(%esp) - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - pslld $2,%xmm2 - addl 40(%esp),%edx - xorl %ebx,%esi - psrld $30,%xmm4 - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax - addl %edi,%edx - por %xmm4,%xmm2 - addl 44(%esp),%ecx - xorl %eax,%ebp - movdqa 80(%esp),%xmm4 - movl %edx,%esi - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - pshufd $238,%xmm1,%xmm5 - addl %edx,%ecx - addl 48(%esp),%ebx - pxor %xmm7,%xmm3 - punpcklqdq %xmm2,%xmm5 - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - pxor %xmm4,%xmm3 - movdqa %xmm7,80(%esp) - addl %esi,%ebx - xorl %edi,%ebp - movdqa %xmm6,%xmm7 - rorl $7,%edx - paddd %xmm2,%xmm6 - addl %ecx,%ebx - pxor %xmm5,%xmm3 - addl 52(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - movdqa %xmm3,%xmm5 - movdqa %xmm6,32(%esp) - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - pslld $2,%xmm3 - addl 56(%esp),%edi - xorl %ecx,%esi - psrld $30,%xmm5 - movl %eax,%ebp - roll $5,%eax - addl %esi,%edi - xorl %ecx,%ebp - rorl $7,%ebx - addl %eax,%edi - por %xmm5,%xmm3 - addl 60(%esp),%edx - xorl %ebx,%ebp - movdqa 96(%esp),%xmm5 - movl %edi,%esi - roll $5,%edi - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - pshufd $238,%xmm2,%xmm6 - addl %edi,%edx - addl (%esp),%ecx - pxor %xmm0,%xmm4 - punpcklqdq %xmm3,%xmm6 - xorl %eax,%esi - movl %edx,%ebp - roll $5,%edx - pxor %xmm5,%xmm4 - movdqa %xmm0,96(%esp) - addl %esi,%ecx - xorl %eax,%ebp - movdqa %xmm7,%xmm0 - rorl $7,%edi - paddd %xmm3,%xmm7 - addl %edx,%ecx - pxor %xmm6,%xmm4 - addl 4(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - roll $5,%ecx - movdqa %xmm4,%xmm6 - movdqa %xmm7,48(%esp) - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - addl %ecx,%ebx - pslld $2,%xmm4 - addl 8(%esp),%eax - xorl %edx,%esi - psrld $30,%xmm6 - movl %ebx,%ebp - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - addl %ebx,%eax - por %xmm6,%xmm4 - addl 12(%esp),%edi - xorl %ecx,%ebp - movdqa 64(%esp),%xmm6 - movl %eax,%esi - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - pshufd $238,%xmm3,%xmm7 - addl %eax,%edi - addl 16(%esp),%edx - pxor %xmm1,%xmm5 - punpcklqdq %xmm4,%xmm7 - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - pxor %xmm6,%xmm5 - movdqa %xmm1,64(%esp) - addl %esi,%edx - xorl %ebx,%ebp - movdqa %xmm0,%xmm1 - rorl $7,%eax - paddd %xmm4,%xmm0 - addl %edi,%edx - pxor %xmm7,%xmm5 - addl 20(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - roll $5,%edx - movdqa %xmm5,%xmm7 - movdqa %xmm0,(%esp) - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - addl %edx,%ecx - pslld $2,%xmm5 - addl 24(%esp),%ebx - xorl %edi,%esi - psrld $30,%xmm7 - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - por %xmm7,%xmm5 - addl 28(%esp),%eax - movdqa 80(%esp),%xmm7 - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%ebp - roll $5,%ebx - pshufd $238,%xmm4,%xmm0 - addl %ebp,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - addl 32(%esp),%edi - pxor %xmm2,%xmm6 - punpcklqdq %xmm5,%xmm0 - andl %ecx,%esi - xorl %edx,%ecx - rorl $7,%ebx - pxor %xmm7,%xmm6 - movdqa %xmm2,80(%esp) - movl %eax,%ebp - xorl %ecx,%esi - roll $5,%eax - movdqa %xmm1,%xmm2 - addl %esi,%edi - paddd %xmm5,%xmm1 - xorl %ebx,%ebp - pxor %xmm0,%xmm6 - xorl %ecx,%ebx - addl %eax,%edi - addl 36(%esp),%edx - andl %ebx,%ebp - movdqa %xmm6,%xmm0 - movdqa %xmm1,16(%esp) - xorl %ecx,%ebx - rorl $7,%eax - movl %edi,%esi - xorl %ebx,%ebp - roll $5,%edi - pslld $2,%xmm6 - addl %ebp,%edx - xorl %eax,%esi - psrld $30,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - addl 40(%esp),%ecx - andl %eax,%esi - xorl %ebx,%eax - rorl $7,%edi - por %xmm0,%xmm6 - movl %edx,%ebp - xorl %eax,%esi - movdqa 96(%esp),%xmm0 - roll $5,%edx - addl %esi,%ecx - xorl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - pshufd $238,%xmm5,%xmm1 - addl 44(%esp),%ebx - andl %edi,%ebp - xorl %eax,%edi - rorl $7,%edx - movl %ecx,%esi - xorl %edi,%ebp - roll $5,%ecx - addl %ebp,%ebx - xorl %edx,%esi - xorl %edi,%edx - addl %ecx,%ebx - addl 48(%esp),%eax - pxor %xmm3,%xmm7 - punpcklqdq %xmm6,%xmm1 - andl %edx,%esi - xorl %edi,%edx - rorl $7,%ecx - pxor %xmm0,%xmm7 - movdqa %xmm3,96(%esp) - movl %ebx,%ebp - xorl %edx,%esi - roll $5,%ebx - movdqa 144(%esp),%xmm3 - addl %esi,%eax - paddd %xmm6,%xmm2 - xorl %ecx,%ebp - pxor %xmm1,%xmm7 - xorl %edx,%ecx - addl %ebx,%eax - addl 52(%esp),%edi - andl %ecx,%ebp - movdqa %xmm7,%xmm1 - movdqa %xmm2,32(%esp) - xorl %edx,%ecx - rorl $7,%ebx - movl %eax,%esi - xorl %ecx,%ebp - roll $5,%eax - pslld $2,%xmm7 - addl %ebp,%edi - xorl %ebx,%esi - psrld $30,%xmm1 - xorl %ecx,%ebx - addl %eax,%edi - addl 56(%esp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - rorl $7,%eax - por %xmm1,%xmm7 - movl %edi,%ebp - xorl %ebx,%esi - movdqa 64(%esp),%xmm1 - roll $5,%edi - addl %esi,%edx - xorl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - pshufd $238,%xmm6,%xmm2 - addl 60(%esp),%ecx - andl %eax,%ebp - xorl %ebx,%eax - rorl $7,%edi - movl %edx,%esi - xorl %eax,%ebp - roll $5,%edx - addl %ebp,%ecx - xorl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - addl (%esp),%ebx - pxor %xmm4,%xmm0 - punpcklqdq %xmm7,%xmm2 - andl %edi,%esi - xorl %eax,%edi - rorl $7,%edx - pxor %xmm1,%xmm0 - movdqa %xmm4,64(%esp) - movl %ecx,%ebp - xorl %edi,%esi - roll $5,%ecx - movdqa %xmm3,%xmm4 - addl %esi,%ebx - paddd %xmm7,%xmm3 - xorl %edx,%ebp - pxor %xmm2,%xmm0 - xorl %edi,%edx - addl %ecx,%ebx - addl 4(%esp),%eax - andl %edx,%ebp - movdqa %xmm0,%xmm2 - movdqa %xmm3,48(%esp) - xorl %edi,%edx - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%ebp - roll $5,%ebx - pslld $2,%xmm0 - addl %ebp,%eax - xorl %ecx,%esi - psrld $30,%xmm2 - xorl %edx,%ecx - addl %ebx,%eax - addl 8(%esp),%edi - andl %ecx,%esi - xorl %edx,%ecx - rorl $7,%ebx - por %xmm2,%xmm0 - movl %eax,%ebp - xorl %ecx,%esi - movdqa 80(%esp),%xmm2 - roll $5,%eax - addl %esi,%edi - xorl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - pshufd $238,%xmm7,%xmm3 - addl 12(%esp),%edx - andl %ebx,%ebp - xorl %ecx,%ebx - rorl $7,%eax - movl %edi,%esi - xorl %ebx,%ebp - roll $5,%edi - addl %ebp,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %edi,%edx - addl 16(%esp),%ecx - pxor %xmm5,%xmm1 - punpcklqdq %xmm0,%xmm3 - andl %eax,%esi - xorl %ebx,%eax - rorl $7,%edi - pxor %xmm2,%xmm1 - movdqa %xmm5,80(%esp) - movl %edx,%ebp - xorl %eax,%esi - roll $5,%edx - movdqa %xmm4,%xmm5 - addl %esi,%ecx - paddd %xmm0,%xmm4 - xorl %edi,%ebp - pxor %xmm3,%xmm1 - xorl %eax,%edi - addl %edx,%ecx - addl 20(%esp),%ebx - andl %edi,%ebp - movdqa %xmm1,%xmm3 - movdqa %xmm4,(%esp) - xorl %eax,%edi - rorl $7,%edx - movl %ecx,%esi - xorl %edi,%ebp - roll $5,%ecx - pslld $2,%xmm1 - addl %ebp,%ebx - xorl %edx,%esi - psrld $30,%xmm3 - xorl %edi,%edx - addl %ecx,%ebx - addl 24(%esp),%eax - andl %edx,%esi - xorl %edi,%edx - rorl $7,%ecx - por %xmm3,%xmm1 - movl %ebx,%ebp - xorl %edx,%esi - movdqa 96(%esp),%xmm3 - roll $5,%ebx - addl %esi,%eax - xorl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - pshufd $238,%xmm0,%xmm4 - addl 28(%esp),%edi - andl %ecx,%ebp - xorl %edx,%ecx - rorl $7,%ebx - movl %eax,%esi - xorl %ecx,%ebp - roll $5,%eax - addl %ebp,%edi - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%edi - addl 32(%esp),%edx - pxor %xmm6,%xmm2 - punpcklqdq %xmm1,%xmm4 - andl %ebx,%esi - xorl %ecx,%ebx - rorl $7,%eax - pxor %xmm3,%xmm2 - movdqa %xmm6,96(%esp) - movl %edi,%ebp - xorl %ebx,%esi - roll $5,%edi - movdqa %xmm5,%xmm6 - addl %esi,%edx - paddd %xmm1,%xmm5 - xorl %eax,%ebp - pxor %xmm4,%xmm2 - xorl %ebx,%eax - addl %edi,%edx - addl 36(%esp),%ecx - andl %eax,%ebp - movdqa %xmm2,%xmm4 - movdqa %xmm5,16(%esp) - xorl %ebx,%eax - rorl $7,%edi - movl %edx,%esi - xorl %eax,%ebp - roll $5,%edx - pslld $2,%xmm2 - addl %ebp,%ecx - xorl %edi,%esi - psrld $30,%xmm4 - xorl %eax,%edi - addl %edx,%ecx - addl 40(%esp),%ebx - andl %edi,%esi - xorl %eax,%edi - rorl $7,%edx - por %xmm4,%xmm2 - movl %ecx,%ebp - xorl %edi,%esi - movdqa 64(%esp),%xmm4 - roll $5,%ecx - addl %esi,%ebx - xorl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - pshufd $238,%xmm1,%xmm5 - addl 44(%esp),%eax - andl %edx,%ebp - xorl %edi,%edx - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%ebp - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - addl %ebx,%eax - addl 48(%esp),%edi - pxor %xmm7,%xmm3 - punpcklqdq %xmm2,%xmm5 - xorl %ecx,%esi - movl %eax,%ebp - roll $5,%eax - pxor %xmm4,%xmm3 - movdqa %xmm7,64(%esp) - addl %esi,%edi - xorl %ecx,%ebp - movdqa %xmm6,%xmm7 - rorl $7,%ebx - paddd %xmm2,%xmm6 - addl %eax,%edi - pxor %xmm5,%xmm3 - addl 52(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - movdqa %xmm3,%xmm5 - movdqa %xmm6,32(%esp) - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - pslld $2,%xmm3 - addl 56(%esp),%ecx - xorl %eax,%esi - psrld $30,%xmm5 - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi - addl %edx,%ecx - por %xmm5,%xmm3 - addl 60(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - addl %ecx,%ebx - addl (%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - paddd %xmm3,%xmm7 - addl %ebx,%eax - addl 4(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - movdqa %xmm7,48(%esp) - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - addl 8(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax - addl %edi,%edx - addl 12(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - addl %edx,%ecx - movl 196(%esp),%ebp - cmpl 200(%esp),%ebp - je L005done - movdqa 160(%esp),%xmm7 - movdqa 176(%esp),%xmm6 - movdqu (%ebp),%xmm0 - movdqu 16(%ebp),%xmm1 - movdqu 32(%ebp),%xmm2 - movdqu 48(%ebp),%xmm3 - addl $64,%ebp -.byte 102,15,56,0,198 - movl %ebp,196(%esp) - movdqa %xmm7,96(%esp) - addl 16(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx -.byte 102,15,56,0,206 - addl %ecx,%ebx - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - paddd %xmm7,%xmm0 - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - movdqa %xmm0,(%esp) - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - psubd %xmm7,%xmm0 - roll $5,%eax - addl %esi,%edi - xorl %ecx,%ebp - rorl $7,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi -.byte 102,15,56,0,214 - addl %edx,%ecx - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - paddd %xmm7,%xmm1 - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - movdqa %xmm1,16(%esp) - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - psubd %xmm7,%xmm1 - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax -.byte 102,15,56,0,222 - addl %edi,%edx - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - paddd %xmm7,%xmm2 - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - movdqa %xmm2,32(%esp) - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - psubd %xmm7,%xmm2 - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - addl %ebp,%eax - rorl $7,%ecx - addl %ebx,%eax - movl 192(%esp),%ebp - addl (%ebp),%eax - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,8(%ebp) - movl %ecx,%ebx - movl %edx,12(%ebp) - xorl %edx,%ebx - movl %edi,16(%ebp) - movl %esi,%ebp - pshufd $238,%xmm0,%xmm4 - andl %ebx,%esi - movl %ebp,%ebx - jmp L004loop -.align 4,0x90 -L005done: - addl 16(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - addl %ebp,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - roll $5,%eax - addl %esi,%edi - xorl %ecx,%ebp - rorl $7,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - roll $5,%edi - addl %ebp,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - roll $5,%edx - addl %esi,%ecx - xorl %eax,%ebp - rorl $7,%edi - addl %edx,%ecx - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - roll $5,%ecx - addl %ebp,%ebx - xorl %edi,%esi - rorl $7,%edx - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - roll $5,%ebx - addl %esi,%eax - xorl %edx,%ebp - rorl $7,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - roll $5,%eax - addl %ebp,%edi - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - roll $5,%edi - addl %esi,%edx - xorl %ebx,%ebp - rorl $7,%eax - addl %edi,%edx - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - roll $5,%edx - addl %ebp,%ecx - xorl %eax,%esi - rorl $7,%edi - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - roll $5,%ecx - addl %esi,%ebx - xorl %edi,%ebp - rorl $7,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - roll $5,%ebx - addl %ebp,%eax - rorl $7,%ecx - addl %ebx,%eax - movl 192(%esp),%ebp - addl (%ebp),%eax - movl 204(%esp),%esp - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,8(%ebp) - movl %edx,12(%ebp) - movl %edi,16(%ebp) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.private_extern __sha1_block_data_order_avx -.align 4 -__sha1_block_data_order_avx: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - call L006pic_point -L006pic_point: - popl %ebp - leal LK_XX_XX-L006pic_point(%ebp),%ebp -Lavx_shortcut: - vzeroall - vmovdqa (%ebp),%xmm7 - vmovdqa 16(%ebp),%xmm0 - vmovdqa 32(%ebp),%xmm1 - vmovdqa 48(%ebp),%xmm2 - vmovdqa 64(%ebp),%xmm6 - movl 20(%esp),%edi - movl 24(%esp),%ebp - movl 28(%esp),%edx - movl %esp,%esi - subl $208,%esp - andl $-64,%esp - vmovdqa %xmm0,112(%esp) - vmovdqa %xmm1,128(%esp) - vmovdqa %xmm2,144(%esp) - shll $6,%edx - vmovdqa %xmm7,160(%esp) - addl %ebp,%edx - vmovdqa %xmm6,176(%esp) - addl $64,%ebp - movl %edi,192(%esp) - movl %ebp,196(%esp) - movl %edx,200(%esp) - movl %esi,204(%esp) - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - movl 16(%edi),%edi - movl %ebx,%esi - vmovdqu -64(%ebp),%xmm0 - vmovdqu -48(%ebp),%xmm1 - vmovdqu -32(%ebp),%xmm2 - vmovdqu -16(%ebp),%xmm3 - vpshufb %xmm6,%xmm0,%xmm0 - vpshufb %xmm6,%xmm1,%xmm1 - vpshufb %xmm6,%xmm2,%xmm2 - vmovdqa %xmm7,96(%esp) - vpshufb %xmm6,%xmm3,%xmm3 - vpaddd %xmm7,%xmm0,%xmm4 - vpaddd %xmm7,%xmm1,%xmm5 - vpaddd %xmm7,%xmm2,%xmm6 - vmovdqa %xmm4,(%esp) - movl %ecx,%ebp - vmovdqa %xmm5,16(%esp) - xorl %edx,%ebp - vmovdqa %xmm6,32(%esp) - andl %ebp,%esi - jmp L007loop -.align 4,0x90 -L007loop: - shrdl $2,%ebx,%ebx - xorl %edx,%esi - vpalignr $8,%xmm0,%xmm1,%xmm4 - movl %eax,%ebp - addl (%esp),%edi - vpaddd %xmm3,%xmm7,%xmm7 - vmovdqa %xmm0,64(%esp) - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrldq $4,%xmm3,%xmm6 - addl %esi,%edi - andl %ebx,%ebp - vpxor %xmm0,%xmm4,%xmm4 - xorl %ecx,%ebx - addl %eax,%edi - vpxor %xmm2,%xmm6,%xmm6 - shrdl $7,%eax,%eax - xorl %ecx,%ebp - vmovdqa %xmm7,48(%esp) - movl %edi,%esi - addl 4(%esp),%edx - vpxor %xmm6,%xmm4,%xmm4 - xorl %ebx,%eax - shldl $5,%edi,%edi - addl %ebp,%edx - andl %eax,%esi - vpsrld $31,%xmm4,%xmm6 - xorl %ebx,%eax - addl %edi,%edx - shrdl $7,%edi,%edi - xorl %ebx,%esi - vpslldq $12,%xmm4,%xmm0 - vpaddd %xmm4,%xmm4,%xmm4 - movl %edx,%ebp - addl 8(%esp),%ecx - xorl %eax,%edi - shldl $5,%edx,%edx - vpsrld $30,%xmm0,%xmm7 - vpor %xmm6,%xmm4,%xmm4 - addl %esi,%ecx - andl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - vpslld $2,%xmm0,%xmm0 - shrdl $7,%edx,%edx - xorl %eax,%ebp - vpxor %xmm7,%xmm4,%xmm4 - movl %ecx,%esi - addl 12(%esp),%ebx - xorl %edi,%edx - shldl $5,%ecx,%ecx - vpxor %xmm0,%xmm4,%xmm4 - addl %ebp,%ebx - andl %edx,%esi - vmovdqa 96(%esp),%xmm0 - xorl %edi,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %edi,%esi - vpalignr $8,%xmm1,%xmm2,%xmm5 - movl %ebx,%ebp - addl 16(%esp),%eax - vpaddd %xmm4,%xmm0,%xmm0 - vmovdqa %xmm1,80(%esp) - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrldq $4,%xmm4,%xmm7 - addl %esi,%eax - andl %ecx,%ebp - vpxor %xmm1,%xmm5,%xmm5 - xorl %edx,%ecx - addl %ebx,%eax - vpxor %xmm3,%xmm7,%xmm7 - shrdl $7,%ebx,%ebx - xorl %edx,%ebp - vmovdqa %xmm0,(%esp) - movl %eax,%esi - addl 20(%esp),%edi - vpxor %xmm7,%xmm5,%xmm5 - xorl %ecx,%ebx - shldl $5,%eax,%eax - addl %ebp,%edi - andl %ebx,%esi - vpsrld $31,%xmm5,%xmm7 - xorl %ecx,%ebx - addl %eax,%edi - shrdl $7,%eax,%eax - xorl %ecx,%esi - vpslldq $12,%xmm5,%xmm1 - vpaddd %xmm5,%xmm5,%xmm5 - movl %edi,%ebp - addl 24(%esp),%edx - xorl %ebx,%eax - shldl $5,%edi,%edi - vpsrld $30,%xmm1,%xmm0 - vpor %xmm7,%xmm5,%xmm5 - addl %esi,%edx - andl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - vpslld $2,%xmm1,%xmm1 - shrdl $7,%edi,%edi - xorl %ebx,%ebp - vpxor %xmm0,%xmm5,%xmm5 - movl %edx,%esi - addl 28(%esp),%ecx - xorl %eax,%edi - shldl $5,%edx,%edx - vpxor %xmm1,%xmm5,%xmm5 - addl %ebp,%ecx - andl %edi,%esi - vmovdqa 112(%esp),%xmm1 - xorl %eax,%edi - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - vpalignr $8,%xmm2,%xmm3,%xmm6 - movl %ecx,%ebp - addl 32(%esp),%ebx - vpaddd %xmm5,%xmm1,%xmm1 - vmovdqa %xmm2,96(%esp) - xorl %edi,%edx - shldl $5,%ecx,%ecx - vpsrldq $4,%xmm5,%xmm0 - addl %esi,%ebx - andl %edx,%ebp - vpxor %xmm2,%xmm6,%xmm6 - xorl %edi,%edx - addl %ecx,%ebx - vpxor %xmm4,%xmm0,%xmm0 - shrdl $7,%ecx,%ecx - xorl %edi,%ebp - vmovdqa %xmm1,16(%esp) - movl %ebx,%esi - addl 36(%esp),%eax - vpxor %xmm0,%xmm6,%xmm6 - xorl %edx,%ecx - shldl $5,%ebx,%ebx - addl %ebp,%eax - andl %ecx,%esi - vpsrld $31,%xmm6,%xmm0 - xorl %edx,%ecx - addl %ebx,%eax - shrdl $7,%ebx,%ebx - xorl %edx,%esi - vpslldq $12,%xmm6,%xmm2 - vpaddd %xmm6,%xmm6,%xmm6 - movl %eax,%ebp - addl 40(%esp),%edi - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrld $30,%xmm2,%xmm1 - vpor %xmm0,%xmm6,%xmm6 - addl %esi,%edi - andl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - vpslld $2,%xmm2,%xmm2 - vmovdqa 64(%esp),%xmm0 - shrdl $7,%eax,%eax - xorl %ecx,%ebp - vpxor %xmm1,%xmm6,%xmm6 - movl %edi,%esi - addl 44(%esp),%edx - xorl %ebx,%eax - shldl $5,%edi,%edi - vpxor %xmm2,%xmm6,%xmm6 - addl %ebp,%edx - andl %eax,%esi - vmovdqa 112(%esp),%xmm2 - xorl %ebx,%eax - addl %edi,%edx - shrdl $7,%edi,%edi - xorl %ebx,%esi - vpalignr $8,%xmm3,%xmm4,%xmm7 - movl %edx,%ebp - addl 48(%esp),%ecx - vpaddd %xmm6,%xmm2,%xmm2 - vmovdqa %xmm3,64(%esp) - xorl %eax,%edi - shldl $5,%edx,%edx - vpsrldq $4,%xmm6,%xmm1 - addl %esi,%ecx - andl %edi,%ebp - vpxor %xmm3,%xmm7,%xmm7 - xorl %eax,%edi - addl %edx,%ecx - vpxor %xmm5,%xmm1,%xmm1 - shrdl $7,%edx,%edx - xorl %eax,%ebp - vmovdqa %xmm2,32(%esp) - movl %ecx,%esi - addl 52(%esp),%ebx - vpxor %xmm1,%xmm7,%xmm7 - xorl %edi,%edx - shldl $5,%ecx,%ecx - addl %ebp,%ebx - andl %edx,%esi - vpsrld $31,%xmm7,%xmm1 - xorl %edi,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %edi,%esi - vpslldq $12,%xmm7,%xmm3 - vpaddd %xmm7,%xmm7,%xmm7 - movl %ebx,%ebp - addl 56(%esp),%eax - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrld $30,%xmm3,%xmm2 - vpor %xmm1,%xmm7,%xmm7 - addl %esi,%eax - andl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - vpslld $2,%xmm3,%xmm3 - vmovdqa 80(%esp),%xmm1 - shrdl $7,%ebx,%ebx - xorl %edx,%ebp - vpxor %xmm2,%xmm7,%xmm7 - movl %eax,%esi - addl 60(%esp),%edi - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpxor %xmm3,%xmm7,%xmm7 - addl %ebp,%edi - andl %ebx,%esi - vmovdqa 112(%esp),%xmm3 - xorl %ecx,%ebx - addl %eax,%edi - vpalignr $8,%xmm6,%xmm7,%xmm2 - vpxor %xmm4,%xmm0,%xmm0 - shrdl $7,%eax,%eax - xorl %ecx,%esi - movl %edi,%ebp - addl (%esp),%edx - vpxor %xmm1,%xmm0,%xmm0 - vmovdqa %xmm4,80(%esp) - xorl %ebx,%eax - shldl $5,%edi,%edi - vmovdqa %xmm3,%xmm4 - vpaddd %xmm7,%xmm3,%xmm3 - addl %esi,%edx - andl %eax,%ebp - vpxor %xmm2,%xmm0,%xmm0 - xorl %ebx,%eax - addl %edi,%edx - shrdl $7,%edi,%edi - xorl %ebx,%ebp - vpsrld $30,%xmm0,%xmm2 - vmovdqa %xmm3,48(%esp) - movl %edx,%esi - addl 4(%esp),%ecx - xorl %eax,%edi - shldl $5,%edx,%edx - vpslld $2,%xmm0,%xmm0 - addl %ebp,%ecx - andl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - movl %ecx,%ebp - addl 8(%esp),%ebx - vpor %xmm2,%xmm0,%xmm0 - xorl %edi,%edx - shldl $5,%ecx,%ecx - vmovdqa 96(%esp),%xmm2 - addl %esi,%ebx - andl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - addl 12(%esp),%eax - xorl %edi,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm7,%xmm0,%xmm3 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm5,96(%esp) - addl %esi,%edi - xorl %ecx,%ebp - vmovdqa %xmm4,%xmm5 - vpaddd %xmm0,%xmm4,%xmm4 - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpxor %xmm3,%xmm1,%xmm1 - addl 20(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - vpsrld $30,%xmm1,%xmm3 - vmovdqa %xmm4,(%esp) - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - vpslld $2,%xmm1,%xmm1 - addl 24(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - vpor %xmm3,%xmm1,%xmm1 - addl 28(%esp),%ebx - xorl %edi,%ebp - vmovdqa 64(%esp),%xmm3 - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpalignr $8,%xmm0,%xmm1,%xmm4 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - vpxor %xmm3,%xmm2,%xmm2 - vmovdqa %xmm6,64(%esp) - addl %esi,%eax - xorl %edx,%ebp - vmovdqa 128(%esp),%xmm6 - vpaddd %xmm1,%xmm5,%xmm5 - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpxor %xmm4,%xmm2,%xmm2 - addl 36(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - vpsrld $30,%xmm2,%xmm4 - vmovdqa %xmm5,16(%esp) - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpslld $2,%xmm2,%xmm2 - addl 40(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - vpor %xmm4,%xmm2,%xmm2 - addl 44(%esp),%ecx - xorl %eax,%ebp - vmovdqa 80(%esp),%xmm4 - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - vpalignr $8,%xmm1,%xmm2,%xmm5 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - vpxor %xmm4,%xmm3,%xmm3 - vmovdqa %xmm7,80(%esp) - addl %esi,%ebx - xorl %edi,%ebp - vmovdqa %xmm6,%xmm7 - vpaddd %xmm2,%xmm6,%xmm6 - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpxor %xmm5,%xmm3,%xmm3 - addl 52(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - vpsrld $30,%xmm3,%xmm5 - vmovdqa %xmm6,32(%esp) - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpslld $2,%xmm3,%xmm3 - addl 56(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ecx,%ebp - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpor %xmm5,%xmm3,%xmm3 - addl 60(%esp),%edx - xorl %ebx,%ebp - vmovdqa 96(%esp),%xmm5 - movl %edi,%esi - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - vpalignr $8,%xmm2,%xmm3,%xmm6 - vpxor %xmm0,%xmm4,%xmm4 - addl (%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - vpxor %xmm5,%xmm4,%xmm4 - vmovdqa %xmm0,96(%esp) - addl %esi,%ecx - xorl %eax,%ebp - vmovdqa %xmm7,%xmm0 - vpaddd %xmm3,%xmm7,%xmm7 - shrdl $7,%edi,%edi - addl %edx,%ecx - vpxor %xmm6,%xmm4,%xmm4 - addl 4(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - vpsrld $30,%xmm4,%xmm6 - vmovdqa %xmm7,48(%esp) - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpslld $2,%xmm4,%xmm4 - addl 8(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpor %xmm6,%xmm4,%xmm4 - addl 12(%esp),%edi - xorl %ecx,%ebp - vmovdqa 64(%esp),%xmm6 - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpalignr $8,%xmm3,%xmm4,%xmm7 - vpxor %xmm1,%xmm5,%xmm5 - addl 16(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - vpxor %xmm6,%xmm5,%xmm5 - vmovdqa %xmm1,64(%esp) - addl %esi,%edx - xorl %ebx,%ebp - vmovdqa %xmm0,%xmm1 - vpaddd %xmm4,%xmm0,%xmm0 - shrdl $7,%eax,%eax - addl %edi,%edx - vpxor %xmm7,%xmm5,%xmm5 - addl 20(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - vpsrld $30,%xmm5,%xmm7 - vmovdqa %xmm0,(%esp) - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - vpslld $2,%xmm5,%xmm5 - addl 24(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpor %xmm7,%xmm5,%xmm5 - addl 28(%esp),%eax - vmovdqa 80(%esp),%xmm7 - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%ebp - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm4,%xmm5,%xmm0 - vpxor %xmm2,%xmm6,%xmm6 - addl 32(%esp),%edi - andl %ecx,%esi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - vpxor %xmm7,%xmm6,%xmm6 - vmovdqa %xmm2,80(%esp) - movl %eax,%ebp - xorl %ecx,%esi - vmovdqa %xmm1,%xmm2 - vpaddd %xmm5,%xmm1,%xmm1 - shldl $5,%eax,%eax - addl %esi,%edi - vpxor %xmm0,%xmm6,%xmm6 - xorl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - addl 36(%esp),%edx - vpsrld $30,%xmm6,%xmm0 - vmovdqa %xmm1,16(%esp) - andl %ebx,%ebp - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %edi,%esi - vpslld $2,%xmm6,%xmm6 - xorl %ebx,%ebp - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %edi,%edx - addl 40(%esp),%ecx - andl %eax,%esi - vpor %xmm0,%xmm6,%xmm6 - xorl %ebx,%eax - shrdl $7,%edi,%edi - vmovdqa 96(%esp),%xmm0 - movl %edx,%ebp - xorl %eax,%esi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - addl 44(%esp),%ebx - andl %edi,%ebp - xorl %eax,%edi - shrdl $7,%edx,%edx - movl %ecx,%esi - xorl %edi,%ebp - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edx,%esi - xorl %edi,%edx - addl %ecx,%ebx - vpalignr $8,%xmm5,%xmm6,%xmm1 - vpxor %xmm3,%xmm7,%xmm7 - addl 48(%esp),%eax - andl %edx,%esi - xorl %edi,%edx - shrdl $7,%ecx,%ecx - vpxor %xmm0,%xmm7,%xmm7 - vmovdqa %xmm3,96(%esp) - movl %ebx,%ebp - xorl %edx,%esi - vmovdqa 144(%esp),%xmm3 - vpaddd %xmm6,%xmm2,%xmm2 - shldl $5,%ebx,%ebx - addl %esi,%eax - vpxor %xmm1,%xmm7,%xmm7 - xorl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - addl 52(%esp),%edi - vpsrld $30,%xmm7,%xmm1 - vmovdqa %xmm2,32(%esp) - andl %ecx,%ebp - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - vpslld $2,%xmm7,%xmm7 - xorl %ecx,%ebp - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%edi - addl 56(%esp),%edx - andl %ebx,%esi - vpor %xmm1,%xmm7,%xmm7 - xorl %ecx,%ebx - shrdl $7,%eax,%eax - vmovdqa 64(%esp),%xmm1 - movl %edi,%ebp - xorl %ebx,%esi - shldl $5,%edi,%edi - addl %esi,%edx - xorl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - addl 60(%esp),%ecx - andl %eax,%ebp - xorl %ebx,%eax - shrdl $7,%edi,%edi - movl %edx,%esi - xorl %eax,%ebp - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - vpalignr $8,%xmm6,%xmm7,%xmm2 - vpxor %xmm4,%xmm0,%xmm0 - addl (%esp),%ebx - andl %edi,%esi - xorl %eax,%edi - shrdl $7,%edx,%edx - vpxor %xmm1,%xmm0,%xmm0 - vmovdqa %xmm4,64(%esp) - movl %ecx,%ebp - xorl %edi,%esi - vmovdqa %xmm3,%xmm4 - vpaddd %xmm7,%xmm3,%xmm3 - shldl $5,%ecx,%ecx - addl %esi,%ebx - vpxor %xmm2,%xmm0,%xmm0 - xorl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - addl 4(%esp),%eax - vpsrld $30,%xmm0,%xmm2 - vmovdqa %xmm3,48(%esp) - andl %edx,%ebp - xorl %edi,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - vpslld $2,%xmm0,%xmm0 - xorl %edx,%ebp - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - addl 8(%esp),%edi - andl %ecx,%esi - vpor %xmm2,%xmm0,%xmm0 - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - vmovdqa 80(%esp),%xmm2 - movl %eax,%ebp - xorl %ecx,%esi - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ebx,%ebp - xorl %ecx,%ebx - addl %eax,%edi - addl 12(%esp),%edx - andl %ebx,%ebp - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %edi,%esi - xorl %ebx,%ebp - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %edi,%edx - vpalignr $8,%xmm7,%xmm0,%xmm3 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%esp),%ecx - andl %eax,%esi - xorl %ebx,%eax - shrdl $7,%edi,%edi - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm5,80(%esp) - movl %edx,%ebp - xorl %eax,%esi - vmovdqa %xmm4,%xmm5 - vpaddd %xmm0,%xmm4,%xmm4 - shldl $5,%edx,%edx - addl %esi,%ecx - vpxor %xmm3,%xmm1,%xmm1 - xorl %edi,%ebp - xorl %eax,%edi - addl %edx,%ecx - addl 20(%esp),%ebx - vpsrld $30,%xmm1,%xmm3 - vmovdqa %xmm4,(%esp) - andl %edi,%ebp - xorl %eax,%edi - shrdl $7,%edx,%edx - movl %ecx,%esi - vpslld $2,%xmm1,%xmm1 - xorl %edi,%ebp - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edx,%esi - xorl %edi,%edx - addl %ecx,%ebx - addl 24(%esp),%eax - andl %edx,%esi - vpor %xmm3,%xmm1,%xmm1 - xorl %edi,%edx - shrdl $7,%ecx,%ecx - vmovdqa 96(%esp),%xmm3 - movl %ebx,%ebp - xorl %edx,%esi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %ecx,%ebp - xorl %edx,%ecx - addl %ebx,%eax - addl 28(%esp),%edi - andl %ecx,%ebp - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - xorl %ecx,%ebp - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%edi - vpalignr $8,%xmm0,%xmm1,%xmm4 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%esp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - vpxor %xmm3,%xmm2,%xmm2 - vmovdqa %xmm6,96(%esp) - movl %edi,%ebp - xorl %ebx,%esi - vmovdqa %xmm5,%xmm6 - vpaddd %xmm1,%xmm5,%xmm5 - shldl $5,%edi,%edi - addl %esi,%edx - vpxor %xmm4,%xmm2,%xmm2 - xorl %eax,%ebp - xorl %ebx,%eax - addl %edi,%edx - addl 36(%esp),%ecx - vpsrld $30,%xmm2,%xmm4 - vmovdqa %xmm5,16(%esp) - andl %eax,%ebp - xorl %ebx,%eax - shrdl $7,%edi,%edi - movl %edx,%esi - vpslld $2,%xmm2,%xmm2 - xorl %eax,%ebp - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %edi,%esi - xorl %eax,%edi - addl %edx,%ecx - addl 40(%esp),%ebx - andl %edi,%esi - vpor %xmm4,%xmm2,%xmm2 - xorl %eax,%edi - shrdl $7,%edx,%edx - vmovdqa 64(%esp),%xmm4 - movl %ecx,%ebp - xorl %edi,%esi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edx,%ebp - xorl %edi,%edx - addl %ecx,%ebx - addl 44(%esp),%eax - andl %edx,%ebp - xorl %edi,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%ebp - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - addl %ebx,%eax - vpalignr $8,%xmm1,%xmm2,%xmm5 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - vpxor %xmm4,%xmm3,%xmm3 - vmovdqa %xmm7,64(%esp) - addl %esi,%edi - xorl %ecx,%ebp - vmovdqa %xmm6,%xmm7 - vpaddd %xmm2,%xmm6,%xmm6 - shrdl $7,%ebx,%ebx - addl %eax,%edi - vpxor %xmm5,%xmm3,%xmm3 - addl 52(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - vpsrld $30,%xmm3,%xmm5 - vmovdqa %xmm6,32(%esp) - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - vpslld $2,%xmm3,%xmm3 - addl 56(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - vpor %xmm5,%xmm3,%xmm3 - addl 60(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl (%esp),%eax - vpaddd %xmm3,%xmm7,%xmm7 - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - vmovdqa %xmm7,48(%esp) - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 4(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 8(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - addl 12(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - movl 196(%esp),%ebp - cmpl 200(%esp),%ebp - je L008done - vmovdqa 160(%esp),%xmm7 - vmovdqa 176(%esp),%xmm6 - vmovdqu (%ebp),%xmm0 - vmovdqu 16(%ebp),%xmm1 - vmovdqu 32(%ebp),%xmm2 - vmovdqu 48(%ebp),%xmm3 - addl $64,%ebp - vpshufb %xmm6,%xmm0,%xmm0 - movl %ebp,196(%esp) - vmovdqa %xmm7,96(%esp) - addl 16(%esp),%ebx - xorl %edi,%esi - vpshufb %xmm6,%xmm1,%xmm1 - movl %ecx,%ebp - shldl $5,%ecx,%ecx - vpaddd %xmm7,%xmm0,%xmm4 - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - vmovdqa %xmm4,(%esp) - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ecx,%ebp - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - vpshufb %xmm6,%xmm2,%xmm2 - movl %edx,%ebp - shldl $5,%edx,%edx - vpaddd %xmm7,%xmm1,%xmm5 - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - vmovdqa %xmm5,16(%esp) - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - vpshufb %xmm6,%xmm3,%xmm3 - movl %edi,%ebp - shldl $5,%edi,%edi - vpaddd %xmm7,%xmm2,%xmm6 - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - vmovdqa %xmm6,32(%esp) - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - movl 192(%esp),%ebp - addl (%ebp),%eax - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,%ebx - movl %ecx,8(%ebp) - xorl %edx,%ebx - movl %edx,12(%ebp) - movl %edi,16(%ebp) - movl %esi,%ebp - andl %ebx,%esi - movl %ebp,%ebx - jmp L007loop -.align 4,0x90 -L008done: - addl 16(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 20(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%esp),%edi - xorl %ecx,%esi - movl %eax,%ebp - shldl $5,%eax,%eax - addl %esi,%edi - xorl %ecx,%ebp - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 28(%esp),%edx - xorl %ebx,%ebp - movl %edi,%esi - shldl $5,%edi,%edi - addl %ebp,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %edi,%edx - addl 32(%esp),%ecx - xorl %eax,%esi - movl %edx,%ebp - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%ebp - shrdl $7,%edi,%edi - addl %edx,%ecx - addl 36(%esp),%ebx - xorl %edi,%ebp - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %ebp,%ebx - xorl %edi,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%esp),%eax - xorl %edx,%esi - movl %ebx,%ebp - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%ebp - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%esp),%edi - xorl %ecx,%ebp - movl %eax,%esi - shldl $5,%eax,%eax - addl %ebp,%edi - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%edi - addl 48(%esp),%edx - xorl %ebx,%esi - movl %edi,%ebp - shldl $5,%edi,%edi - addl %esi,%edx - xorl %ebx,%ebp - shrdl $7,%eax,%eax - addl %edi,%edx - addl 52(%esp),%ecx - xorl %eax,%ebp - movl %edx,%esi - shldl $5,%edx,%edx - addl %ebp,%ecx - xorl %eax,%esi - shrdl $7,%edi,%edi - addl %edx,%ecx - addl 56(%esp),%ebx - xorl %edi,%esi - movl %ecx,%ebp - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edi,%ebp - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%esp),%eax - xorl %edx,%ebp - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %ebp,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vzeroall - movl 192(%esp),%ebp - addl (%ebp),%eax - movl 204(%esp),%esp - addl 4(%ebp),%esi - addl 8(%ebp),%ecx - movl %eax,(%ebp) - addl 12(%ebp),%edx - movl %esi,4(%ebp) - addl 16(%ebp),%edi - movl %ecx,8(%ebp) - movl %edx,12(%ebp) - movl %edi,16(%ebp) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 6,0x90 -LK_XX_XX: -.long 1518500249,1518500249,1518500249,1518500249 -.long 1859775393,1859775393,1859775393,1859775393 -.long 2400959708,2400959708,2400959708,2400959708 -.long 3395469782,3395469782,3395469782,3395469782 -.long 66051,67438087,134810123,202182159 -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 -.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 -.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 -.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha256-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha256-586.S deleted file mode 100644 index a974488943..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha256-586.S +++ /dev/null @@ -1,5568 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _sha256_block_data_order -.private_extern _sha256_block_data_order -.align 4 -_sha256_block_data_order: -L_sha256_block_data_order_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl %esp,%ebx - call L000pic_point -L000pic_point: - popl %ebp - leal L001K256-L000pic_point(%ebp),%ebp - subl $16,%esp - andl $-64,%esp - shll $6,%eax - addl %edi,%eax - movl %esi,(%esp) - movl %edi,4(%esp) - movl %eax,8(%esp) - movl %ebx,12(%esp) - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L001K256(%ebp),%edx - movl (%edx),%ecx - movl 4(%edx),%ebx - testl $1048576,%ecx - jnz L002loop - movl 8(%edx),%edx - testl $16777216,%ecx - jz L003no_xmm - andl $1073741824,%ecx - andl $268435968,%ebx - orl %ebx,%ecx - andl $1342177280,%ecx - cmpl $1342177280,%ecx - je L004AVX - testl $512,%ebx - jnz L005SSSE3 -L003no_xmm: - subl %edi,%eax - cmpl $256,%eax - jae L006unrolled - jmp L002loop -.align 4,0x90 -L002loop: - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - bswap %eax - movl 12(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - movl 16(%edi),%eax - movl 20(%edi),%ebx - movl 24(%edi),%ecx - bswap %eax - movl 28(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - movl 32(%edi),%eax - movl 36(%edi),%ebx - movl 40(%edi),%ecx - bswap %eax - movl 44(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - movl 48(%edi),%eax - movl 52(%edi),%ebx - movl 56(%edi),%ecx - bswap %eax - movl 60(%edi),%edx - bswap %ebx - pushl %eax - bswap %ecx - pushl %ebx - bswap %edx - pushl %ecx - pushl %edx - addl $64,%edi - leal -36(%esp),%esp - movl %edi,104(%esp) - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edi - movl %ebx,8(%esp) - xorl %ecx,%ebx - movl %ecx,12(%esp) - movl %edi,16(%esp) - movl %ebx,(%esp) - movl 16(%esi),%edx - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edi - movl %ebx,24(%esp) - movl %ecx,28(%esp) - movl %edi,32(%esp) -.align 4,0x90 -L00700_15: - movl %edx,%ecx - movl 24(%esp),%esi - rorl $14,%ecx - movl 28(%esp),%edi - xorl %edx,%ecx - xorl %edi,%esi - movl 96(%esp),%ebx - rorl $5,%ecx - andl %edx,%esi - movl %edx,20(%esp) - xorl %ecx,%edx - addl 32(%esp),%ebx - xorl %edi,%esi - rorl $6,%edx - movl %eax,%ecx - addl %esi,%ebx - rorl $9,%ecx - addl %edx,%ebx - movl 8(%esp),%edi - xorl %eax,%ecx - movl %eax,4(%esp) - leal -4(%esp),%esp - rorl $11,%ecx - movl (%ebp),%esi - xorl %eax,%ecx - movl 20(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %esi,%ebx - movl %eax,(%esp) - addl %ebx,%edx - andl 4(%esp),%eax - addl %ecx,%ebx - xorl %edi,%eax - addl $4,%ebp - addl %ebx,%eax - cmpl $3248222580,%esi - jne L00700_15 - movl 156(%esp),%ecx - jmp L00816_63 -.align 4,0x90 -L00816_63: - movl %ecx,%ebx - movl 104(%esp),%esi - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 160(%esp),%ebx - shrl $10,%edi - addl 124(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 24(%esp),%esi - rorl $14,%ecx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %edx,%ecx - xorl %edi,%esi - movl %ebx,96(%esp) - rorl $5,%ecx - andl %edx,%esi - movl %edx,20(%esp) - xorl %ecx,%edx - addl 32(%esp),%ebx - xorl %edi,%esi - rorl $6,%edx - movl %eax,%ecx - addl %esi,%ebx - rorl $9,%ecx - addl %edx,%ebx - movl 8(%esp),%edi - xorl %eax,%ecx - movl %eax,4(%esp) - leal -4(%esp),%esp - rorl $11,%ecx - movl (%ebp),%esi - xorl %eax,%ecx - movl 20(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %esi,%ebx - movl %eax,(%esp) - addl %ebx,%edx - andl 4(%esp),%eax - addl %ecx,%ebx - xorl %edi,%eax - movl 156(%esp),%ecx - addl $4,%ebp - addl %ebx,%eax - cmpl $3329325298,%esi - jne L00816_63 - movl 356(%esp),%esi - movl 8(%esp),%ebx - movl 16(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebx - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl 24(%esp),%eax - movl 28(%esp),%ebx - movl 32(%esp),%ecx - movl 360(%esp),%edi - addl 16(%esi),%edx - addl 20(%esi),%eax - addl 24(%esi),%ebx - addl 28(%esi),%ecx - movl %edx,16(%esi) - movl %eax,20(%esi) - movl %ebx,24(%esi) - movl %ecx,28(%esi) - leal 356(%esp),%esp - subl $256,%ebp - cmpl 8(%esp),%edi - jb L002loop - movl 12(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 6,0x90 -L001K256: -.long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298 -.long 66051,67438087,134810123,202182159 -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 -.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 -.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -.byte 62,0 -.align 4,0x90 -L006unrolled: - leal -96(%esp),%esp - movl (%esi),%eax - movl 4(%esi),%ebp - movl 8(%esi),%ecx - movl 12(%esi),%ebx - movl %ebp,4(%esp) - xorl %ecx,%ebp - movl %ecx,8(%esp) - movl %ebx,12(%esp) - movl 16(%esi),%edx - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%esi - movl %ebx,20(%esp) - movl %ecx,24(%esp) - movl %esi,28(%esp) - jmp L009grand_loop -.align 4,0x90 -L009grand_loop: - movl (%edi),%ebx - movl 4(%edi),%ecx - bswap %ebx - movl 8(%edi),%esi - bswap %ecx - movl %ebx,32(%esp) - bswap %esi - movl %ecx,36(%esp) - movl %esi,40(%esp) - movl 12(%edi),%ebx - movl 16(%edi),%ecx - bswap %ebx - movl 20(%edi),%esi - bswap %ecx - movl %ebx,44(%esp) - bswap %esi - movl %ecx,48(%esp) - movl %esi,52(%esp) - movl 24(%edi),%ebx - movl 28(%edi),%ecx - bswap %ebx - movl 32(%edi),%esi - bswap %ecx - movl %ebx,56(%esp) - bswap %esi - movl %ecx,60(%esp) - movl %esi,64(%esp) - movl 36(%edi),%ebx - movl 40(%edi),%ecx - bswap %ebx - movl 44(%edi),%esi - bswap %ecx - movl %ebx,68(%esp) - bswap %esi - movl %ecx,72(%esp) - movl %esi,76(%esp) - movl 48(%edi),%ebx - movl 52(%edi),%ecx - bswap %ebx - movl 56(%edi),%esi - bswap %ecx - movl %ebx,80(%esp) - bswap %esi - movl %ecx,84(%esp) - movl %esi,88(%esp) - movl 60(%edi),%ebx - addl $64,%edi - bswap %ebx - movl %edi,100(%esp) - movl %ebx,92(%esp) - movl %edx,%ecx - movl 20(%esp),%esi - rorl $14,%edx - movl 24(%esp),%edi - xorl %ecx,%edx - movl 32(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1116352408(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 16(%esp),%ecx - rorl $14,%edx - movl 20(%esp),%edi - xorl %esi,%edx - movl 36(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1899447441(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 12(%esp),%esi - rorl $14,%edx - movl 16(%esp),%edi - xorl %ecx,%edx - movl 40(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3049323471(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 8(%esp),%ecx - rorl $14,%edx - movl 12(%esp),%edi - xorl %esi,%edx - movl 44(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3921009573(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 4(%esp),%esi - rorl $14,%edx - movl 8(%esp),%edi - xorl %ecx,%edx - movl 48(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 961987163(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl (%esp),%ecx - rorl $14,%edx - movl 4(%esp),%edi - xorl %esi,%edx - movl 52(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1508970993(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 28(%esp),%esi - rorl $14,%edx - movl (%esp),%edi - xorl %ecx,%edx - movl 56(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2453635748(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 24(%esp),%ecx - rorl $14,%edx - movl 28(%esp),%edi - xorl %esi,%edx - movl 60(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2870763221(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 20(%esp),%esi - rorl $14,%edx - movl 24(%esp),%edi - xorl %ecx,%edx - movl 64(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3624381080(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 16(%esp),%ecx - rorl $14,%edx - movl 20(%esp),%edi - xorl %esi,%edx - movl 68(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 310598401(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 12(%esp),%esi - rorl $14,%edx - movl 16(%esp),%edi - xorl %ecx,%edx - movl 72(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 607225278(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 8(%esp),%ecx - rorl $14,%edx - movl 12(%esp),%edi - xorl %esi,%edx - movl 76(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1426881987(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 4(%esp),%esi - rorl $14,%edx - movl 8(%esp),%edi - xorl %ecx,%edx - movl 80(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1925078388(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl (%esp),%ecx - rorl $14,%edx - movl 4(%esp),%edi - xorl %esi,%edx - movl 84(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2162078206(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl %edx,%ecx - movl 28(%esp),%esi - rorl $14,%edx - movl (%esp),%edi - xorl %ecx,%edx - movl 88(%esp),%ebx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2614888103(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl %edx,%esi - movl 24(%esp),%ecx - rorl $14,%edx - movl 28(%esp),%edi - xorl %esi,%edx - movl 92(%esp),%ebx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3248222580(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 36(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 88(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 32(%esp),%ebx - shrl $10,%edi - addl 68(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,32(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3835390401(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 40(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 92(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 36(%esp),%ebx - shrl $10,%edi - addl 72(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,36(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 4022224774(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 44(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 32(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 40(%esp),%ebx - shrl $10,%edi - addl 76(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,40(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 264347078(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 48(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 36(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 44(%esp),%ebx - shrl $10,%edi - addl 80(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,44(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 604807628(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 52(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 40(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 48(%esp),%ebx - shrl $10,%edi - addl 84(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,48(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 770255983(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 56(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 44(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 52(%esp),%ebx - shrl $10,%edi - addl 88(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,52(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1249150122(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 60(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 48(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 56(%esp),%ebx - shrl $10,%edi - addl 92(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,56(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1555081692(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 64(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 52(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 60(%esp),%ebx - shrl $10,%edi - addl 32(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,60(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1996064986(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 68(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 56(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 64(%esp),%ebx - shrl $10,%edi - addl 36(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,64(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2554220882(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 72(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 60(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 68(%esp),%ebx - shrl $10,%edi - addl 40(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,68(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2821834349(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 76(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 64(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 72(%esp),%ebx - shrl $10,%edi - addl 44(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,72(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2952996808(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 80(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 68(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 76(%esp),%ebx - shrl $10,%edi - addl 48(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,76(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3210313671(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 84(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 72(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 80(%esp),%ebx - shrl $10,%edi - addl 52(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,80(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3336571891(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 88(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 76(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 84(%esp),%ebx - shrl $10,%edi - addl 56(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,84(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3584528711(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 92(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 80(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 88(%esp),%ebx - shrl $10,%edi - addl 60(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,88(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 113926993(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 32(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 84(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 92(%esp),%ebx - shrl $10,%edi - addl 64(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,92(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 338241895(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 36(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 88(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 32(%esp),%ebx - shrl $10,%edi - addl 68(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,32(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 666307205(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 40(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 92(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 36(%esp),%ebx - shrl $10,%edi - addl 72(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,36(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 773529912(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 44(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 32(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 40(%esp),%ebx - shrl $10,%edi - addl 76(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,40(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1294757372(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 48(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 36(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 44(%esp),%ebx - shrl $10,%edi - addl 80(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,44(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1396182291(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 52(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 40(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 48(%esp),%ebx - shrl $10,%edi - addl 84(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,48(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1695183700(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 56(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 44(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 52(%esp),%ebx - shrl $10,%edi - addl 88(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,52(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1986661051(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 60(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 48(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 56(%esp),%ebx - shrl $10,%edi - addl 92(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,56(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2177026350(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 64(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 52(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 60(%esp),%ebx - shrl $10,%edi - addl 32(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,60(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2456956037(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 68(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 56(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 64(%esp),%ebx - shrl $10,%edi - addl 36(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,64(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2730485921(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 72(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 60(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 68(%esp),%ebx - shrl $10,%edi - addl 40(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,68(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2820302411(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 76(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 64(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 72(%esp),%ebx - shrl $10,%edi - addl 44(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,72(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3259730800(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 80(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 68(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 76(%esp),%ebx - shrl $10,%edi - addl 48(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,76(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3345764771(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 84(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 72(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 80(%esp),%ebx - shrl $10,%edi - addl 52(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,80(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3516065817(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 88(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 76(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 84(%esp),%ebx - shrl $10,%edi - addl 56(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,84(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3600352804(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 92(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 80(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 88(%esp),%ebx - shrl $10,%edi - addl 60(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,88(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 4094571909(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 32(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 84(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 92(%esp),%ebx - shrl $10,%edi - addl 64(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,92(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 275423344(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 36(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 88(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 32(%esp),%ebx - shrl $10,%edi - addl 68(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,32(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 430227734(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 40(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 92(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 36(%esp),%ebx - shrl $10,%edi - addl 72(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,36(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 506948616(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 44(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 32(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 40(%esp),%ebx - shrl $10,%edi - addl 76(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,40(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 659060556(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 48(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 36(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 44(%esp),%ebx - shrl $10,%edi - addl 80(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,44(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 883997877(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 52(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 40(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 48(%esp),%ebx - shrl $10,%edi - addl 84(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,48(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 958139571(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 56(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 44(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 52(%esp),%ebx - shrl $10,%edi - addl 88(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,52(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1322822218(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 60(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 48(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 56(%esp),%ebx - shrl $10,%edi - addl 92(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - movl %ebx,56(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1537002063(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 64(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 52(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 60(%esp),%ebx - shrl $10,%edi - addl 32(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - movl %ebx,60(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 1747873779(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 68(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 56(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 64(%esp),%ebx - shrl $10,%edi - addl 36(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 20(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 24(%esp),%edi - xorl %ecx,%edx - movl %ebx,64(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - addl 28(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 4(%esp),%edi - xorl %eax,%ecx - movl %eax,(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 1955562222(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 72(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 12(%esp),%edx - addl %ecx,%ebp - movl 60(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 68(%esp),%ebx - shrl $10,%edi - addl 40(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 16(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 20(%esp),%edi - xorl %esi,%edx - movl %ebx,68(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,12(%esp) - xorl %esi,%edx - addl 24(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl (%esp),%edi - xorl %ebp,%esi - movl %ebp,28(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2024104815(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 76(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 8(%esp),%edx - addl %esi,%eax - movl 64(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 72(%esp),%ebx - shrl $10,%edi - addl 44(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 12(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 16(%esp),%edi - xorl %ecx,%edx - movl %ebx,72(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - addl 20(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 28(%esp),%edi - xorl %eax,%ecx - movl %eax,24(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2227730452(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 80(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 4(%esp),%edx - addl %ecx,%ebp - movl 68(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 76(%esp),%ebx - shrl $10,%edi - addl 48(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 8(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 12(%esp),%edi - xorl %esi,%edx - movl %ebx,76(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,4(%esp) - xorl %esi,%edx - addl 16(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 24(%esp),%edi - xorl %ebp,%esi - movl %ebp,20(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2361852424(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 84(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl (%esp),%edx - addl %esi,%eax - movl 72(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 80(%esp),%ebx - shrl $10,%edi - addl 52(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 4(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl 8(%esp),%edi - xorl %ecx,%edx - movl %ebx,80(%esp) - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - addl 12(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 20(%esp),%edi - xorl %eax,%ecx - movl %eax,16(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 2428436474(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 88(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 28(%esp),%edx - addl %ecx,%ebp - movl 76(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 84(%esp),%ebx - shrl $10,%edi - addl 56(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl (%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 4(%esp),%edi - xorl %esi,%edx - movl %ebx,84(%esp) - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,28(%esp) - xorl %esi,%edx - addl 8(%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 16(%esp),%edi - xorl %ebp,%esi - movl %ebp,12(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 2756734187(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - movl 92(%esp),%ecx - rorl $2,%esi - addl %edx,%eax - addl 24(%esp),%edx - addl %esi,%eax - movl 80(%esp),%esi - movl %ecx,%ebx - rorl $11,%ecx - movl %esi,%edi - rorl $2,%esi - xorl %ebx,%ecx - shrl $3,%ebx - rorl $7,%ecx - xorl %edi,%esi - xorl %ecx,%ebx - rorl $17,%esi - addl 88(%esp),%ebx - shrl $10,%edi - addl 60(%esp),%ebx - movl %edx,%ecx - xorl %esi,%edi - movl 28(%esp),%esi - rorl $14,%edx - addl %edi,%ebx - movl (%esp),%edi - xorl %ecx,%edx - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - addl 4(%esp),%ebx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%ebx - rorl $9,%ecx - movl %eax,%esi - movl 12(%esp),%edi - xorl %eax,%ecx - movl %eax,8(%esp) - xorl %edi,%eax - rorl $11,%ecx - andl %eax,%ebp - leal 3204031479(%ebx,%edx,1),%edx - xorl %esi,%ecx - xorl %edi,%ebp - movl 32(%esp),%esi - rorl $2,%ecx - addl %edx,%ebp - addl 20(%esp),%edx - addl %ecx,%ebp - movl 84(%esp),%ecx - movl %esi,%ebx - rorl $11,%esi - movl %ecx,%edi - rorl $2,%ecx - xorl %ebx,%esi - shrl $3,%ebx - rorl $7,%esi - xorl %edi,%ecx - xorl %esi,%ebx - rorl $17,%ecx - addl 92(%esp),%ebx - shrl $10,%edi - addl 64(%esp),%ebx - movl %edx,%esi - xorl %ecx,%edi - movl 24(%esp),%ecx - rorl $14,%edx - addl %edi,%ebx - movl 28(%esp),%edi - xorl %esi,%edx - xorl %edi,%ecx - rorl $5,%edx - andl %esi,%ecx - movl %esi,20(%esp) - xorl %esi,%edx - addl (%esp),%ebx - xorl %ecx,%edi - rorl $6,%edx - movl %ebp,%esi - addl %edi,%ebx - rorl $9,%esi - movl %ebp,%ecx - movl 8(%esp),%edi - xorl %ebp,%esi - movl %ebp,4(%esp) - xorl %edi,%ebp - rorl $11,%esi - andl %ebp,%eax - leal 3329325298(%ebx,%edx,1),%edx - xorl %ecx,%esi - xorl %edi,%eax - rorl $2,%esi - addl %edx,%eax - addl 16(%esp),%edx - addl %esi,%eax - movl 96(%esp),%esi - xorl %edi,%ebp - movl 12(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebp - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebp,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl %ebp,4(%esp) - xorl %edi,%ebp - movl %edi,8(%esp) - movl %ecx,12(%esp) - movl 20(%esp),%edi - movl 24(%esp),%ebx - movl 28(%esp),%ecx - addl 16(%esi),%edx - addl 20(%esi),%edi - addl 24(%esi),%ebx - addl 28(%esi),%ecx - movl %edx,16(%esi) - movl %edi,20(%esi) - movl %ebx,24(%esi) - movl %ecx,28(%esi) - movl %edi,20(%esp) - movl 100(%esp),%edi - movl %ebx,24(%esp) - movl %ecx,28(%esp) - cmpl 104(%esp),%edi - jb L009grand_loop - movl 108(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 5,0x90 -L005SSSE3: - leal -96(%esp),%esp - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edi - movl %ebx,4(%esp) - xorl %ecx,%ebx - movl %ecx,8(%esp) - movl %edi,12(%esp) - movl 16(%esi),%edx - movl 20(%esi),%edi - movl 24(%esi),%ecx - movl 28(%esi),%esi - movl %edi,20(%esp) - movl 100(%esp),%edi - movl %ecx,24(%esp) - movl %esi,28(%esp) - movdqa 256(%ebp),%xmm7 - jmp L010grand_ssse3 -.align 4,0x90 -L010grand_ssse3: - movdqu (%edi),%xmm0 - movdqu 16(%edi),%xmm1 - movdqu 32(%edi),%xmm2 - movdqu 48(%edi),%xmm3 - addl $64,%edi -.byte 102,15,56,0,199 - movl %edi,100(%esp) -.byte 102,15,56,0,207 - movdqa (%ebp),%xmm4 -.byte 102,15,56,0,215 - movdqa 16(%ebp),%xmm5 - paddd %xmm0,%xmm4 -.byte 102,15,56,0,223 - movdqa 32(%ebp),%xmm6 - paddd %xmm1,%xmm5 - movdqa 48(%ebp),%xmm7 - movdqa %xmm4,32(%esp) - paddd %xmm2,%xmm6 - movdqa %xmm5,48(%esp) - paddd %xmm3,%xmm7 - movdqa %xmm6,64(%esp) - movdqa %xmm7,80(%esp) - jmp L011ssse3_00_47 -.align 4,0x90 -L011ssse3_00_47: - addl $64,%ebp - movl %edx,%ecx - movdqa %xmm1,%xmm4 - rorl $14,%edx - movl 20(%esp),%esi - movdqa %xmm3,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi -.byte 102,15,58,15,224,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,250,4 - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 4(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm0 - movl %eax,(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm3,%xmm7 - xorl %esi,%ecx - addl 32(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl 16(%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,12(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm0 - movl %ebx,28(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 36(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm0 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - pshufd $80,%xmm0,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 40(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa (%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,4(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm0 - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - paddd %xmm0,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movdqa %xmm6,32(%esp) - movl %edx,%ecx - movdqa %xmm2,%xmm4 - rorl $14,%edx - movl 4(%esp),%esi - movdqa %xmm0,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi -.byte 102,15,58,15,225,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,251,4 - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 20(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm1 - movl %eax,16(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm0,%xmm7 - xorl %esi,%ecx - addl 48(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl (%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,28(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm1 - movl %ebx,12(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 52(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm1 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - pshufd $80,%xmm1,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 56(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa 16(%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,20(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm1 - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - paddd %xmm1,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movdqa %xmm6,48(%esp) - movl %edx,%ecx - movdqa %xmm3,%xmm4 - rorl $14,%edx - movl 20(%esp),%esi - movdqa %xmm1,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi -.byte 102,15,58,15,226,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,248,4 - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 4(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm2 - movl %eax,(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm1,%xmm7 - xorl %esi,%ecx - addl 64(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl 16(%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,12(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm2 - movl %ebx,28(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 68(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm2 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - pshufd $80,%xmm2,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 72(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa 32(%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,4(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm2 - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - paddd %xmm2,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movdqa %xmm6,64(%esp) - movl %edx,%ecx - movdqa %xmm0,%xmm4 - rorl $14,%edx - movl 4(%esp),%esi - movdqa %xmm2,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi -.byte 102,15,58,15,227,4 - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi -.byte 102,15,58,15,249,4 - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - movdqa %xmm4,%xmm5 - rorl $6,%edx - movl %eax,%ecx - movdqa %xmm4,%xmm6 - addl %edi,%edx - movl 20(%esp),%edi - psrld $3,%xmm4 - movl %eax,%esi - rorl $9,%ecx - paddd %xmm7,%xmm3 - movl %eax,16(%esp) - xorl %eax,%ecx - psrld $7,%xmm6 - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - pshufd $250,%xmm2,%xmm7 - xorl %esi,%ecx - addl 80(%esp),%edx - pslld $14,%xmm5 - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - psrld $11,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm5,%xmm4 - movl (%esp),%esi - xorl %ecx,%edx - pslld $11,%xmm5 - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - pxor %xmm6,%xmm4 - andl %ecx,%esi - movl %ecx,28(%esp) - movdqa %xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - pxor %xmm5,%xmm4 - movl %ebx,%ecx - addl %edi,%edx - psrld $10,%xmm7 - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm4,%xmm3 - movl %ebx,12(%esp) - xorl %ebx,%ecx - psrlq $17,%xmm6 - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - pxor %xmm6,%xmm7 - andl %ebx,%eax - xorl %esi,%ecx - psrlq $2,%xmm6 - addl 84(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - pshufd $128,%xmm7,%xmm7 - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - psrldq $8,%xmm7 - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - paddd %xmm7,%xmm3 - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - pshufd $80,%xmm3,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - movdqa %xmm7,%xmm6 - rorl $11,%ecx - psrld $10,%xmm7 - andl %eax,%ebx - psrlq $17,%xmm6 - xorl %esi,%ecx - addl 88(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - pxor %xmm6,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - psrlq $2,%xmm6 - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - pxor %xmm6,%xmm7 - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - pshufd $8,%xmm7,%xmm7 - xorl %edi,%esi - rorl $5,%edx - movdqa 48(%ebp),%xmm6 - andl %ecx,%esi - movl %ecx,20(%esp) - pslldq $8,%xmm7 - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - paddd %xmm7,%xmm3 - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - paddd %xmm3,%xmm6 - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movdqa %xmm6,80(%esp) - cmpl $66051,64(%ebp) - jne L011ssse3_00_47 - movl %edx,%ecx - rorl $14,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 32(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 36(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 40(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 48(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 52(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 56(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 64(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 68(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 72(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 80(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 84(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - rorl $14,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - rorl $9,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - rorl $11,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 88(%esp),%edx - xorl %edi,%ebx - rorl $2,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - rorl $14,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - rorl $5,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - rorl $6,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - rorl $9,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - rorl $11,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - rorl $2,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl 96(%esp),%esi - xorl %edi,%ebx - movl 12(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebx - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl %ebx,4(%esp) - xorl %edi,%ebx - movl %edi,8(%esp) - movl %ecx,12(%esp) - movl 20(%esp),%edi - movl 24(%esp),%ecx - addl 16(%esi),%edx - addl 20(%esi),%edi - addl 24(%esi),%ecx - movl %edx,16(%esi) - movl %edi,20(%esi) - movl %edi,20(%esp) - movl 28(%esp),%edi - movl %ecx,24(%esi) - addl 28(%esi),%edi - movl %ecx,24(%esp) - movl %edi,28(%esi) - movl %edi,28(%esp) - movl 100(%esp),%edi - movdqa 64(%ebp),%xmm7 - subl $192,%ebp - cmpl 104(%esp),%edi - jb L010grand_ssse3 - movl 108(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 5,0x90 -L004AVX: - leal -96(%esp),%esp - vzeroall - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edi - movl %ebx,4(%esp) - xorl %ecx,%ebx - movl %ecx,8(%esp) - movl %edi,12(%esp) - movl 16(%esi),%edx - movl 20(%esi),%edi - movl 24(%esi),%ecx - movl 28(%esi),%esi - movl %edi,20(%esp) - movl 100(%esp),%edi - movl %ecx,24(%esp) - movl %esi,28(%esp) - vmovdqa 256(%ebp),%xmm7 - jmp L012grand_avx -.align 5,0x90 -L012grand_avx: - vmovdqu (%edi),%xmm0 - vmovdqu 16(%edi),%xmm1 - vmovdqu 32(%edi),%xmm2 - vmovdqu 48(%edi),%xmm3 - addl $64,%edi - vpshufb %xmm7,%xmm0,%xmm0 - movl %edi,100(%esp) - vpshufb %xmm7,%xmm1,%xmm1 - vpshufb %xmm7,%xmm2,%xmm2 - vpaddd (%ebp),%xmm0,%xmm4 - vpshufb %xmm7,%xmm3,%xmm3 - vpaddd 16(%ebp),%xmm1,%xmm5 - vpaddd 32(%ebp),%xmm2,%xmm6 - vpaddd 48(%ebp),%xmm3,%xmm7 - vmovdqa %xmm4,32(%esp) - vmovdqa %xmm5,48(%esp) - vmovdqa %xmm6,64(%esp) - vmovdqa %xmm7,80(%esp) - jmp L013avx_00_47 -.align 4,0x90 -L013avx_00_47: - addl $64,%ebp - vpalignr $4,%xmm0,%xmm1,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - vpalignr $4,%xmm2,%xmm3,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - vpaddd %xmm7,%xmm0,%xmm0 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - vpshufd $250,%xmm3,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 32(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - vpaddd %xmm4,%xmm0,%xmm0 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 36(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - vpaddd %xmm7,%xmm0,%xmm0 - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm0,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 40(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm0,%xmm0 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - vpaddd (%ebp),%xmm0,%xmm6 - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,32(%esp) - vpalignr $4,%xmm1,%xmm2,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - vpalignr $4,%xmm3,%xmm0,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - vpaddd %xmm7,%xmm1,%xmm1 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - vpshufd $250,%xmm0,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 48(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - vpaddd %xmm4,%xmm1,%xmm1 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 52(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - vpaddd %xmm7,%xmm1,%xmm1 - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm1,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 56(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm1,%xmm1 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - vpaddd 16(%ebp),%xmm1,%xmm6 - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,48(%esp) - vpalignr $4,%xmm2,%xmm3,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - vpalignr $4,%xmm0,%xmm1,%xmm7 - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - vpaddd %xmm7,%xmm2,%xmm2 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - vpshufd $250,%xmm1,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 64(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - vpaddd %xmm4,%xmm2,%xmm2 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 68(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - vpaddd %xmm7,%xmm2,%xmm2 - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm2,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 72(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm2,%xmm2 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - vpaddd 32(%ebp),%xmm2,%xmm6 - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,64(%esp) - vpalignr $4,%xmm3,%xmm0,%xmm4 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - vpalignr $4,%xmm1,%xmm2,%xmm7 - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - vpaddd %xmm7,%xmm3,%xmm3 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrld $3,%xmm4,%xmm7 - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - vpslld $14,%xmm4,%xmm5 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - vpshufd $250,%xmm2,%xmm7 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpsrld $11,%xmm6,%xmm6 - addl 80(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpxor %xmm5,%xmm4,%xmm4 - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - vpslld $11,%xmm5,%xmm5 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - vpxor %xmm6,%xmm4,%xmm4 - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - vpsrld $10,%xmm7,%xmm6 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - vpxor %xmm5,%xmm4,%xmm4 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - vpaddd %xmm4,%xmm3,%xmm3 - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - vpxor %xmm5,%xmm6,%xmm6 - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - vpsrlq $19,%xmm7,%xmm7 - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - vpxor %xmm7,%xmm6,%xmm6 - addl 84(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - vpshufd $132,%xmm6,%xmm7 - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - vpsrldq $8,%xmm7,%xmm7 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - vpaddd %xmm7,%xmm3,%xmm3 - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - vpshufd $80,%xmm3,%xmm7 - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - vpsrld $10,%xmm7,%xmm6 - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - vpsrlq $17,%xmm7,%xmm5 - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - vpxor %xmm5,%xmm6,%xmm6 - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - vpsrlq $19,%xmm7,%xmm7 - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - vpxor %xmm7,%xmm6,%xmm6 - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - vpshufd $232,%xmm6,%xmm7 - addl 88(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - vpslldq $8,%xmm7,%xmm7 - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - vpaddd %xmm7,%xmm3,%xmm3 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - vpaddd 48(%ebp),%xmm3,%xmm6 - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - vmovdqa %xmm6,80(%esp) - cmpl $66051,64(%ebp) - jne L013avx_00_47 - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 32(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 36(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 40(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 44(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 48(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 52(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 56(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 60(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 20(%esp),%esi - xorl %ecx,%edx - movl 24(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,16(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 4(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 28(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 64(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 12(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 16(%esp),%esi - xorl %ecx,%edx - movl 20(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,12(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl (%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,28(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 24(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 68(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 8(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 12(%esp),%esi - xorl %ecx,%edx - movl 16(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,8(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 28(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,24(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 20(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 72(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 4(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 8(%esp),%esi - xorl %ecx,%edx - movl 12(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,4(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 24(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,20(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 16(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 76(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl (%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 4(%esp),%esi - xorl %ecx,%edx - movl 8(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 20(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,16(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 12(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 80(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 28(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl (%esp),%esi - xorl %ecx,%edx - movl 4(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,28(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 16(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,12(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl 8(%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 84(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 24(%esp),%edx - addl %ecx,%eax - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 28(%esp),%esi - xorl %ecx,%edx - movl (%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,24(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %eax,%ecx - addl %edi,%edx - movl 12(%esp),%edi - movl %eax,%esi - shrdl $9,%ecx,%ecx - movl %eax,8(%esp) - xorl %eax,%ecx - xorl %edi,%eax - addl 4(%esp),%edx - shrdl $11,%ecx,%ecx - andl %eax,%ebx - xorl %esi,%ecx - addl 88(%esp),%edx - xorl %edi,%ebx - shrdl $2,%ecx,%ecx - addl %edx,%ebx - addl 20(%esp),%edx - addl %ecx,%ebx - movl %edx,%ecx - shrdl $14,%edx,%edx - movl 24(%esp),%esi - xorl %ecx,%edx - movl 28(%esp),%edi - xorl %edi,%esi - shrdl $5,%edx,%edx - andl %ecx,%esi - movl %ecx,20(%esp) - xorl %ecx,%edx - xorl %esi,%edi - shrdl $6,%edx,%edx - movl %ebx,%ecx - addl %edi,%edx - movl 8(%esp),%edi - movl %ebx,%esi - shrdl $9,%ecx,%ecx - movl %ebx,4(%esp) - xorl %ebx,%ecx - xorl %edi,%ebx - addl (%esp),%edx - shrdl $11,%ecx,%ecx - andl %ebx,%eax - xorl %esi,%ecx - addl 92(%esp),%edx - xorl %edi,%eax - shrdl $2,%ecx,%ecx - addl %edx,%eax - addl 16(%esp),%edx - addl %ecx,%eax - movl 96(%esp),%esi - xorl %edi,%ebx - movl 12(%esp),%ecx - addl (%esi),%eax - addl 4(%esi),%ebx - addl 8(%esi),%edi - addl 12(%esi),%ecx - movl %eax,(%esi) - movl %ebx,4(%esi) - movl %edi,8(%esi) - movl %ecx,12(%esi) - movl %ebx,4(%esp) - xorl %edi,%ebx - movl %edi,8(%esp) - movl %ecx,12(%esp) - movl 20(%esp),%edi - movl 24(%esp),%ecx - addl 16(%esi),%edx - addl 20(%esi),%edi - addl 24(%esi),%ecx - movl %edx,16(%esi) - movl %edi,20(%esi) - movl %edi,20(%esp) - movl 28(%esp),%edi - movl %ecx,24(%esi) - addl 28(%esi),%edi - movl %ecx,24(%esp) - movl %edi,28(%esi) - movl %edi,28(%esp) - movl 100(%esp),%edi - vmovdqa 64(%ebp),%xmm7 - subl $192,%ebp - cmpl 104(%esp),%edi - jb L012grand_avx - movl 108(%esp),%esp - vzeroall - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha512-586.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha512-586.S deleted file mode 100644 index a08e6ef5d7..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/sha512-586.S +++ /dev/null @@ -1,2838 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _sha512_block_data_order -.private_extern _sha512_block_data_order -.align 4 -_sha512_block_data_order: -L_sha512_block_data_order_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl %esp,%ebx - call L000pic_point -L000pic_point: - popl %ebp - leal L001K512-L000pic_point(%ebp),%ebp - subl $16,%esp - andl $-64,%esp - shll $7,%eax - addl %edi,%eax - movl %esi,(%esp) - movl %edi,4(%esp) - movl %eax,8(%esp) - movl %ebx,12(%esp) - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L001K512(%ebp),%edx - movl (%edx),%ecx - testl $67108864,%ecx - jz L002loop_x86 - movl 4(%edx),%edx - movq (%esi),%mm0 - andl $16777216,%ecx - movq 8(%esi),%mm1 - andl $512,%edx - movq 16(%esi),%mm2 - orl %edx,%ecx - movq 24(%esi),%mm3 - movq 32(%esi),%mm4 - movq 40(%esi),%mm5 - movq 48(%esi),%mm6 - movq 56(%esi),%mm7 - cmpl $16777728,%ecx - je L003SSSE3 - subl $80,%esp - jmp L004loop_sse2 -.align 4,0x90 -L004loop_sse2: - movq %mm1,8(%esp) - movq %mm2,16(%esp) - movq %mm3,24(%esp) - movq %mm5,40(%esp) - movq %mm6,48(%esp) - pxor %mm1,%mm2 - movq %mm7,56(%esp) - movq %mm0,%mm3 - movl (%edi),%eax - movl 4(%edi),%ebx - addl $8,%edi - movl $15,%edx - bswap %eax - bswap %ebx - jmp L00500_14_sse2 -.align 4,0x90 -L00500_14_sse2: - movd %eax,%mm1 - movl (%edi),%eax - movd %ebx,%mm7 - movl 4(%edi),%ebx - addl $8,%edi - bswap %eax - bswap %ebx - punpckldq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm3,%mm0 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm2,%mm3 - movq %mm0,%mm2 - addl $8,%ebp - paddq %mm6,%mm3 - movq 48(%esp),%mm6 - decl %edx - jnz L00500_14_sse2 - movd %eax,%mm1 - movd %ebx,%mm7 - punpckldq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm3,%mm0 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 192(%esp),%mm7 - paddq %mm2,%mm3 - movq %mm0,%mm2 - addl $8,%ebp - paddq %mm6,%mm3 - pxor %mm0,%mm0 - movl $32,%edx - jmp L00616_79_sse2 -.align 4,0x90 -L00616_79_sse2: - movq 88(%esp),%mm5 - movq %mm7,%mm1 - psrlq $1,%mm7 - movq %mm5,%mm6 - psrlq $6,%mm5 - psllq $56,%mm1 - paddq %mm3,%mm0 - movq %mm7,%mm3 - psrlq $6,%mm7 - pxor %mm1,%mm3 - psllq $7,%mm1 - pxor %mm7,%mm3 - psrlq $1,%mm7 - pxor %mm1,%mm3 - movq %mm5,%mm1 - psrlq $13,%mm5 - pxor %mm3,%mm7 - psllq $3,%mm6 - pxor %mm5,%mm1 - paddq 200(%esp),%mm7 - pxor %mm6,%mm1 - psrlq $42,%mm5 - paddq 128(%esp),%mm7 - pxor %mm5,%mm1 - psllq $42,%mm6 - movq 40(%esp),%mm5 - pxor %mm6,%mm1 - movq 48(%esp),%mm6 - paddq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 192(%esp),%mm7 - paddq %mm6,%mm2 - addl $8,%ebp - movq 88(%esp),%mm5 - movq %mm7,%mm1 - psrlq $1,%mm7 - movq %mm5,%mm6 - psrlq $6,%mm5 - psllq $56,%mm1 - paddq %mm3,%mm2 - movq %mm7,%mm3 - psrlq $6,%mm7 - pxor %mm1,%mm3 - psllq $7,%mm1 - pxor %mm7,%mm3 - psrlq $1,%mm7 - pxor %mm1,%mm3 - movq %mm5,%mm1 - psrlq $13,%mm5 - pxor %mm3,%mm7 - psllq $3,%mm6 - pxor %mm5,%mm1 - paddq 200(%esp),%mm7 - pxor %mm6,%mm1 - psrlq $42,%mm5 - paddq 128(%esp),%mm7 - pxor %mm5,%mm1 - psllq $42,%mm6 - movq 40(%esp),%mm5 - pxor %mm6,%mm1 - movq 48(%esp),%mm6 - paddq %mm1,%mm7 - movq %mm4,%mm1 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - movq %mm7,72(%esp) - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - paddq (%ebp),%mm7 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - subl $8,%esp - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 192(%esp),%mm7 - paddq %mm6,%mm0 - addl $8,%ebp - decl %edx - jnz L00616_79_sse2 - paddq %mm3,%mm0 - movq 8(%esp),%mm1 - movq 24(%esp),%mm3 - movq 40(%esp),%mm5 - movq 48(%esp),%mm6 - movq 56(%esp),%mm7 - pxor %mm1,%mm2 - paddq (%esi),%mm0 - paddq 8(%esi),%mm1 - paddq 16(%esi),%mm2 - paddq 24(%esi),%mm3 - paddq 32(%esi),%mm4 - paddq 40(%esi),%mm5 - paddq 48(%esi),%mm6 - paddq 56(%esi),%mm7 - movl $640,%eax - movq %mm0,(%esi) - movq %mm1,8(%esi) - movq %mm2,16(%esi) - movq %mm3,24(%esi) - movq %mm4,32(%esi) - movq %mm5,40(%esi) - movq %mm6,48(%esi) - movq %mm7,56(%esi) - leal (%esp,%eax,1),%esp - subl %eax,%ebp - cmpl 88(%esp),%edi - jb L004loop_sse2 - movl 92(%esp),%esp - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 5,0x90 -L003SSSE3: - leal -64(%esp),%edx - subl $256,%esp - movdqa 640(%ebp),%xmm1 - movdqu (%edi),%xmm0 -.byte 102,15,56,0,193 - movdqa (%ebp),%xmm3 - movdqa %xmm1,%xmm2 - movdqu 16(%edi),%xmm1 - paddq %xmm0,%xmm3 -.byte 102,15,56,0,202 - movdqa %xmm3,-128(%edx) - movdqa 16(%ebp),%xmm4 - movdqa %xmm2,%xmm3 - movdqu 32(%edi),%xmm2 - paddq %xmm1,%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm4,-112(%edx) - movdqa 32(%ebp),%xmm5 - movdqa %xmm3,%xmm4 - movdqu 48(%edi),%xmm3 - paddq %xmm2,%xmm5 -.byte 102,15,56,0,220 - movdqa %xmm5,-96(%edx) - movdqa 48(%ebp),%xmm6 - movdqa %xmm4,%xmm5 - movdqu 64(%edi),%xmm4 - paddq %xmm3,%xmm6 -.byte 102,15,56,0,229 - movdqa %xmm6,-80(%edx) - movdqa 64(%ebp),%xmm7 - movdqa %xmm5,%xmm6 - movdqu 80(%edi),%xmm5 - paddq %xmm4,%xmm7 -.byte 102,15,56,0,238 - movdqa %xmm7,-64(%edx) - movdqa %xmm0,(%edx) - movdqa 80(%ebp),%xmm0 - movdqa %xmm6,%xmm7 - movdqu 96(%edi),%xmm6 - paddq %xmm5,%xmm0 -.byte 102,15,56,0,247 - movdqa %xmm0,-48(%edx) - movdqa %xmm1,16(%edx) - movdqa 96(%ebp),%xmm1 - movdqa %xmm7,%xmm0 - movdqu 112(%edi),%xmm7 - paddq %xmm6,%xmm1 -.byte 102,15,56,0,248 - movdqa %xmm1,-32(%edx) - movdqa %xmm2,32(%edx) - movdqa 112(%ebp),%xmm2 - movdqa (%edx),%xmm0 - paddq %xmm7,%xmm2 - movdqa %xmm2,-16(%edx) - nop -.align 5,0x90 -L007loop_ssse3: - movdqa 16(%edx),%xmm2 - movdqa %xmm3,48(%edx) - leal 128(%ebp),%ebp - movq %mm1,8(%esp) - movl %edi,%ebx - movq %mm2,16(%esp) - leal 128(%edi),%edi - movq %mm3,24(%esp) - cmpl %eax,%edi - movq %mm5,40(%esp) - cmovbl %edi,%ebx - movq %mm6,48(%esp) - movl $4,%ecx - pxor %mm1,%mm2 - movq %mm7,56(%esp) - pxor %mm3,%mm3 - jmp L00800_47_ssse3 -.align 5,0x90 -L00800_47_ssse3: - movdqa %xmm5,%xmm3 - movdqa %xmm2,%xmm1 -.byte 102,15,58,15,208,8 - movdqa %xmm4,(%edx) -.byte 102,15,58,15,220,8 - movdqa %xmm2,%xmm4 - psrlq $7,%xmm2 - paddq %xmm3,%xmm0 - movdqa %xmm4,%xmm3 - psrlq $1,%xmm4 - psllq $56,%xmm3 - pxor %xmm4,%xmm2 - psrlq $7,%xmm4 - pxor %xmm3,%xmm2 - psllq $7,%xmm3 - pxor %xmm4,%xmm2 - movdqa %xmm7,%xmm4 - pxor %xmm3,%xmm2 - movdqa %xmm7,%xmm3 - psrlq $6,%xmm4 - paddq %xmm2,%xmm0 - movdqa %xmm7,%xmm2 - psrlq $19,%xmm3 - psllq $3,%xmm2 - pxor %xmm3,%xmm4 - psrlq $42,%xmm3 - pxor %xmm2,%xmm4 - psllq $42,%xmm2 - pxor %xmm3,%xmm4 - movdqa 32(%edx),%xmm3 - pxor %xmm2,%xmm4 - movdqa (%ebp),%xmm2 - movq %mm4,%mm1 - paddq %xmm4,%xmm0 - movq -128(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - paddq %xmm0,%xmm2 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -120(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm2,-128(%edx) - movdqa %xmm6,%xmm4 - movdqa %xmm3,%xmm2 -.byte 102,15,58,15,217,8 - movdqa %xmm5,16(%edx) -.byte 102,15,58,15,229,8 - movdqa %xmm3,%xmm5 - psrlq $7,%xmm3 - paddq %xmm4,%xmm1 - movdqa %xmm5,%xmm4 - psrlq $1,%xmm5 - psllq $56,%xmm4 - pxor %xmm5,%xmm3 - psrlq $7,%xmm5 - pxor %xmm4,%xmm3 - psllq $7,%xmm4 - pxor %xmm5,%xmm3 - movdqa %xmm0,%xmm5 - pxor %xmm4,%xmm3 - movdqa %xmm0,%xmm4 - psrlq $6,%xmm5 - paddq %xmm3,%xmm1 - movdqa %xmm0,%xmm3 - psrlq $19,%xmm4 - psllq $3,%xmm3 - pxor %xmm4,%xmm5 - psrlq $42,%xmm4 - pxor %xmm3,%xmm5 - psllq $42,%xmm3 - pxor %xmm4,%xmm5 - movdqa 48(%edx),%xmm4 - pxor %xmm3,%xmm5 - movdqa 16(%ebp),%xmm3 - movq %mm4,%mm1 - paddq %xmm5,%xmm1 - movq -112(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - paddq %xmm1,%xmm3 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -104(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm3,-112(%edx) - movdqa %xmm7,%xmm5 - movdqa %xmm4,%xmm3 -.byte 102,15,58,15,226,8 - movdqa %xmm6,32(%edx) -.byte 102,15,58,15,238,8 - movdqa %xmm4,%xmm6 - psrlq $7,%xmm4 - paddq %xmm5,%xmm2 - movdqa %xmm6,%xmm5 - psrlq $1,%xmm6 - psllq $56,%xmm5 - pxor %xmm6,%xmm4 - psrlq $7,%xmm6 - pxor %xmm5,%xmm4 - psllq $7,%xmm5 - pxor %xmm6,%xmm4 - movdqa %xmm1,%xmm6 - pxor %xmm5,%xmm4 - movdqa %xmm1,%xmm5 - psrlq $6,%xmm6 - paddq %xmm4,%xmm2 - movdqa %xmm1,%xmm4 - psrlq $19,%xmm5 - psllq $3,%xmm4 - pxor %xmm5,%xmm6 - psrlq $42,%xmm5 - pxor %xmm4,%xmm6 - psllq $42,%xmm4 - pxor %xmm5,%xmm6 - movdqa (%edx),%xmm5 - pxor %xmm4,%xmm6 - movdqa 32(%ebp),%xmm4 - movq %mm4,%mm1 - paddq %xmm6,%xmm2 - movq -96(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - paddq %xmm2,%xmm4 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -88(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm4,-96(%edx) - movdqa %xmm0,%xmm6 - movdqa %xmm5,%xmm4 -.byte 102,15,58,15,235,8 - movdqa %xmm7,48(%edx) -.byte 102,15,58,15,247,8 - movdqa %xmm5,%xmm7 - psrlq $7,%xmm5 - paddq %xmm6,%xmm3 - movdqa %xmm7,%xmm6 - psrlq $1,%xmm7 - psllq $56,%xmm6 - pxor %xmm7,%xmm5 - psrlq $7,%xmm7 - pxor %xmm6,%xmm5 - psllq $7,%xmm6 - pxor %xmm7,%xmm5 - movdqa %xmm2,%xmm7 - pxor %xmm6,%xmm5 - movdqa %xmm2,%xmm6 - psrlq $6,%xmm7 - paddq %xmm5,%xmm3 - movdqa %xmm2,%xmm5 - psrlq $19,%xmm6 - psllq $3,%xmm5 - pxor %xmm6,%xmm7 - psrlq $42,%xmm6 - pxor %xmm5,%xmm7 - psllq $42,%xmm5 - pxor %xmm6,%xmm7 - movdqa 16(%edx),%xmm6 - pxor %xmm5,%xmm7 - movdqa 48(%ebp),%xmm5 - movq %mm4,%mm1 - paddq %xmm7,%xmm3 - movq -80(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - paddq %xmm3,%xmm5 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -72(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm5,-80(%edx) - movdqa %xmm1,%xmm7 - movdqa %xmm6,%xmm5 -.byte 102,15,58,15,244,8 - movdqa %xmm0,(%edx) -.byte 102,15,58,15,248,8 - movdqa %xmm6,%xmm0 - psrlq $7,%xmm6 - paddq %xmm7,%xmm4 - movdqa %xmm0,%xmm7 - psrlq $1,%xmm0 - psllq $56,%xmm7 - pxor %xmm0,%xmm6 - psrlq $7,%xmm0 - pxor %xmm7,%xmm6 - psllq $7,%xmm7 - pxor %xmm0,%xmm6 - movdqa %xmm3,%xmm0 - pxor %xmm7,%xmm6 - movdqa %xmm3,%xmm7 - psrlq $6,%xmm0 - paddq %xmm6,%xmm4 - movdqa %xmm3,%xmm6 - psrlq $19,%xmm7 - psllq $3,%xmm6 - pxor %xmm7,%xmm0 - psrlq $42,%xmm7 - pxor %xmm6,%xmm0 - psllq $42,%xmm6 - pxor %xmm7,%xmm0 - movdqa 32(%edx),%xmm7 - pxor %xmm6,%xmm0 - movdqa 64(%ebp),%xmm6 - movq %mm4,%mm1 - paddq %xmm0,%xmm4 - movq -64(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - paddq %xmm4,%xmm6 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -56(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm6,-64(%edx) - movdqa %xmm2,%xmm0 - movdqa %xmm7,%xmm6 -.byte 102,15,58,15,253,8 - movdqa %xmm1,16(%edx) -.byte 102,15,58,15,193,8 - movdqa %xmm7,%xmm1 - psrlq $7,%xmm7 - paddq %xmm0,%xmm5 - movdqa %xmm1,%xmm0 - psrlq $1,%xmm1 - psllq $56,%xmm0 - pxor %xmm1,%xmm7 - psrlq $7,%xmm1 - pxor %xmm0,%xmm7 - psllq $7,%xmm0 - pxor %xmm1,%xmm7 - movdqa %xmm4,%xmm1 - pxor %xmm0,%xmm7 - movdqa %xmm4,%xmm0 - psrlq $6,%xmm1 - paddq %xmm7,%xmm5 - movdqa %xmm4,%xmm7 - psrlq $19,%xmm0 - psllq $3,%xmm7 - pxor %xmm0,%xmm1 - psrlq $42,%xmm0 - pxor %xmm7,%xmm1 - psllq $42,%xmm7 - pxor %xmm0,%xmm1 - movdqa 48(%edx),%xmm0 - pxor %xmm7,%xmm1 - movdqa 80(%ebp),%xmm7 - movq %mm4,%mm1 - paddq %xmm1,%xmm5 - movq -48(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - paddq %xmm5,%xmm7 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -40(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm7,-48(%edx) - movdqa %xmm3,%xmm1 - movdqa %xmm0,%xmm7 -.byte 102,15,58,15,198,8 - movdqa %xmm2,32(%edx) -.byte 102,15,58,15,202,8 - movdqa %xmm0,%xmm2 - psrlq $7,%xmm0 - paddq %xmm1,%xmm6 - movdqa %xmm2,%xmm1 - psrlq $1,%xmm2 - psllq $56,%xmm1 - pxor %xmm2,%xmm0 - psrlq $7,%xmm2 - pxor %xmm1,%xmm0 - psllq $7,%xmm1 - pxor %xmm2,%xmm0 - movdqa %xmm5,%xmm2 - pxor %xmm1,%xmm0 - movdqa %xmm5,%xmm1 - psrlq $6,%xmm2 - paddq %xmm0,%xmm6 - movdqa %xmm5,%xmm0 - psrlq $19,%xmm1 - psllq $3,%xmm0 - pxor %xmm1,%xmm2 - psrlq $42,%xmm1 - pxor %xmm0,%xmm2 - psllq $42,%xmm0 - pxor %xmm1,%xmm2 - movdqa (%edx),%xmm1 - pxor %xmm0,%xmm2 - movdqa 96(%ebp),%xmm0 - movq %mm4,%mm1 - paddq %xmm2,%xmm6 - movq -32(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - paddq %xmm6,%xmm0 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -24(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm0,-32(%edx) - movdqa %xmm4,%xmm2 - movdqa %xmm1,%xmm0 -.byte 102,15,58,15,207,8 - movdqa %xmm3,48(%edx) -.byte 102,15,58,15,211,8 - movdqa %xmm1,%xmm3 - psrlq $7,%xmm1 - paddq %xmm2,%xmm7 - movdqa %xmm3,%xmm2 - psrlq $1,%xmm3 - psllq $56,%xmm2 - pxor %xmm3,%xmm1 - psrlq $7,%xmm3 - pxor %xmm2,%xmm1 - psllq $7,%xmm2 - pxor %xmm3,%xmm1 - movdqa %xmm6,%xmm3 - pxor %xmm2,%xmm1 - movdqa %xmm6,%xmm2 - psrlq $6,%xmm3 - paddq %xmm1,%xmm7 - movdqa %xmm6,%xmm1 - psrlq $19,%xmm2 - psllq $3,%xmm1 - pxor %xmm2,%xmm3 - psrlq $42,%xmm2 - pxor %xmm1,%xmm3 - psllq $42,%xmm1 - pxor %xmm2,%xmm3 - movdqa 16(%edx),%xmm2 - pxor %xmm1,%xmm3 - movdqa 112(%ebp),%xmm1 - movq %mm4,%mm1 - paddq %xmm3,%xmm7 - movq -16(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - paddq %xmm7,%xmm1 - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -8(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm1,-16(%edx) - leal 128(%ebp),%ebp - decl %ecx - jnz L00800_47_ssse3 - movdqa (%ebp),%xmm1 - leal -640(%ebp),%ebp - movdqu (%ebx),%xmm0 -.byte 102,15,56,0,193 - movdqa (%ebp),%xmm3 - movdqa %xmm1,%xmm2 - movdqu 16(%ebx),%xmm1 - paddq %xmm0,%xmm3 -.byte 102,15,56,0,202 - movq %mm4,%mm1 - movq -128(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -120(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm3,-128(%edx) - movdqa 16(%ebp),%xmm4 - movdqa %xmm2,%xmm3 - movdqu 32(%ebx),%xmm2 - paddq %xmm1,%xmm4 -.byte 102,15,56,0,211 - movq %mm4,%mm1 - movq -112(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -104(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm4,-112(%edx) - movdqa 32(%ebp),%xmm5 - movdqa %xmm3,%xmm4 - movdqu 48(%ebx),%xmm3 - paddq %xmm2,%xmm5 -.byte 102,15,56,0,220 - movq %mm4,%mm1 - movq -96(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -88(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm5,-96(%edx) - movdqa 48(%ebp),%xmm6 - movdqa %xmm4,%xmm5 - movdqu 64(%ebx),%xmm4 - paddq %xmm3,%xmm6 -.byte 102,15,56,0,229 - movq %mm4,%mm1 - movq -80(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -72(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm6,-80(%edx) - movdqa 64(%ebp),%xmm7 - movdqa %xmm5,%xmm6 - movdqu 80(%ebx),%xmm5 - paddq %xmm4,%xmm7 -.byte 102,15,56,0,238 - movq %mm4,%mm1 - movq -64(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,32(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 56(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 24(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 8(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 32(%esp),%mm5 - paddq %mm6,%mm2 - movq 40(%esp),%mm6 - movq %mm4,%mm1 - movq -56(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,24(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,56(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 48(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 16(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq (%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 24(%esp),%mm5 - paddq %mm6,%mm0 - movq 32(%esp),%mm6 - movdqa %xmm7,-64(%edx) - movdqa %xmm0,(%edx) - movdqa 80(%ebp),%xmm0 - movdqa %xmm6,%xmm7 - movdqu 96(%ebx),%xmm6 - paddq %xmm5,%xmm0 -.byte 102,15,56,0,247 - movq %mm4,%mm1 - movq -48(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,16(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,48(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 40(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 8(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 56(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 16(%esp),%mm5 - paddq %mm6,%mm2 - movq 24(%esp),%mm6 - movq %mm4,%mm1 - movq -40(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,8(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,40(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 32(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq (%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 48(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 8(%esp),%mm5 - paddq %mm6,%mm0 - movq 16(%esp),%mm6 - movdqa %xmm0,-48(%edx) - movdqa %xmm1,16(%edx) - movdqa 96(%ebp),%xmm1 - movdqa %xmm7,%xmm0 - movdqu 112(%ebx),%xmm7 - paddq %xmm6,%xmm1 -.byte 102,15,56,0,248 - movq %mm4,%mm1 - movq -32(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,32(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 24(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 56(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 40(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq (%esp),%mm5 - paddq %mm6,%mm2 - movq 8(%esp),%mm6 - movq %mm4,%mm1 - movq -24(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,56(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,24(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 16(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 48(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 32(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 56(%esp),%mm5 - paddq %mm6,%mm0 - movq (%esp),%mm6 - movdqa %xmm1,-32(%edx) - movdqa %xmm2,32(%edx) - movdqa 112(%ebp),%xmm2 - movdqa (%edx),%xmm0 - paddq %xmm7,%xmm2 - movq %mm4,%mm1 - movq -16(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,48(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm0 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm0,16(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq 8(%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 40(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm0,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm0,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 24(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm0,%mm2 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - pxor %mm7,%mm6 - movq 48(%esp),%mm5 - paddq %mm6,%mm2 - movq 56(%esp),%mm6 - movq %mm4,%mm1 - movq -8(%edx),%mm7 - pxor %mm6,%mm5 - psrlq $14,%mm1 - movq %mm4,40(%esp) - pand %mm4,%mm5 - psllq $23,%mm4 - paddq %mm3,%mm2 - movq %mm1,%mm3 - psrlq $4,%mm1 - pxor %mm6,%mm5 - pxor %mm4,%mm3 - psllq $23,%mm4 - pxor %mm1,%mm3 - movq %mm2,8(%esp) - paddq %mm5,%mm7 - pxor %mm4,%mm3 - psrlq $23,%mm1 - paddq (%esp),%mm7 - pxor %mm1,%mm3 - psllq $4,%mm4 - pxor %mm4,%mm3 - movq 32(%esp),%mm4 - paddq %mm7,%mm3 - movq %mm2,%mm5 - psrlq $28,%mm5 - paddq %mm3,%mm4 - movq %mm2,%mm6 - movq %mm5,%mm7 - psllq $25,%mm6 - movq 16(%esp),%mm1 - psrlq $6,%mm5 - pxor %mm6,%mm7 - psllq $5,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm2 - psrlq $5,%mm5 - pxor %mm6,%mm7 - pand %mm2,%mm0 - psllq $6,%mm6 - pxor %mm5,%mm7 - pxor %mm1,%mm0 - pxor %mm7,%mm6 - movq 40(%esp),%mm5 - paddq %mm6,%mm0 - movq 48(%esp),%mm6 - movdqa %xmm2,-16(%edx) - movq 8(%esp),%mm1 - paddq %mm3,%mm0 - movq 24(%esp),%mm3 - movq 56(%esp),%mm7 - pxor %mm1,%mm2 - paddq (%esi),%mm0 - paddq 8(%esi),%mm1 - paddq 16(%esi),%mm2 - paddq 24(%esi),%mm3 - paddq 32(%esi),%mm4 - paddq 40(%esi),%mm5 - paddq 48(%esi),%mm6 - paddq 56(%esi),%mm7 - movq %mm0,(%esi) - movq %mm1,8(%esi) - movq %mm2,16(%esi) - movq %mm3,24(%esi) - movq %mm4,32(%esi) - movq %mm5,40(%esi) - movq %mm6,48(%esi) - movq %mm7,56(%esi) - cmpl %eax,%edi - jb L007loop_ssse3 - movl 76(%edx),%esp - emms - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 4,0x90 -L002loop_x86: - movl (%edi),%eax - movl 4(%edi),%ebx - movl 8(%edi),%ecx - movl 12(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 16(%edi),%eax - movl 20(%edi),%ebx - movl 24(%edi),%ecx - movl 28(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 32(%edi),%eax - movl 36(%edi),%ebx - movl 40(%edi),%ecx - movl 44(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 48(%edi),%eax - movl 52(%edi),%ebx - movl 56(%edi),%ecx - movl 60(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 64(%edi),%eax - movl 68(%edi),%ebx - movl 72(%edi),%ecx - movl 76(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 80(%edi),%eax - movl 84(%edi),%ebx - movl 88(%edi),%ecx - movl 92(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 96(%edi),%eax - movl 100(%edi),%ebx - movl 104(%edi),%ecx - movl 108(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - movl 112(%edi),%eax - movl 116(%edi),%ebx - movl 120(%edi),%ecx - movl 124(%edi),%edx - bswap %eax - bswap %ebx - bswap %ecx - bswap %edx - pushl %eax - pushl %ebx - pushl %ecx - pushl %edx - addl $128,%edi - subl $72,%esp - movl %edi,204(%esp) - leal 8(%esp),%edi - movl $16,%ecx -.long 2784229001 -.align 4,0x90 -L00900_15_x86: - movl 40(%esp),%ecx - movl 44(%esp),%edx - movl %ecx,%esi - shrl $9,%ecx - movl %edx,%edi - shrl $9,%edx - movl %ecx,%ebx - shll $14,%esi - movl %edx,%eax - shll $14,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%eax - shll $4,%esi - xorl %edx,%ebx - shll $4,%edi - xorl %esi,%ebx - shrl $4,%ecx - xorl %edi,%eax - shrl $4,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 48(%esp),%ecx - movl 52(%esp),%edx - movl 56(%esp),%esi - movl 60(%esp),%edi - addl 64(%esp),%eax - adcl 68(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - andl 40(%esp),%ecx - andl 44(%esp),%edx - addl 192(%esp),%eax - adcl 196(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - movl (%ebp),%esi - movl 4(%ebp),%edi - addl %ecx,%eax - adcl %edx,%ebx - movl 32(%esp),%ecx - movl 36(%esp),%edx - addl %esi,%eax - adcl %edi,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - addl %ecx,%eax - adcl %edx,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,%esi - shrl $2,%ecx - movl %edx,%edi - shrl $2,%edx - movl %ecx,%ebx - shll $4,%esi - movl %edx,%eax - shll $4,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%ebx - shll $21,%esi - xorl %edx,%eax - shll $21,%edi - xorl %esi,%eax - shrl $21,%ecx - xorl %edi,%ebx - shrl $21,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl 16(%esp),%esi - movl 20(%esp),%edi - addl (%esp),%eax - adcl 4(%esp),%ebx - orl %esi,%ecx - orl %edi,%edx - andl 24(%esp),%ecx - andl 28(%esp),%edx - andl 8(%esp),%esi - andl 12(%esp),%edi - orl %esi,%ecx - orl %edi,%edx - addl %ecx,%eax - adcl %edx,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - movb (%ebp),%dl - subl $8,%esp - leal 8(%ebp),%ebp - cmpb $148,%dl - jne L00900_15_x86 -.align 4,0x90 -L01016_79_x86: - movl 312(%esp),%ecx - movl 316(%esp),%edx - movl %ecx,%esi - shrl $1,%ecx - movl %edx,%edi - shrl $1,%edx - movl %ecx,%eax - shll $24,%esi - movl %edx,%ebx - shll $24,%edi - xorl %esi,%ebx - shrl $6,%ecx - xorl %edi,%eax - shrl $6,%edx - xorl %ecx,%eax - shll $7,%esi - xorl %edx,%ebx - shll $1,%edi - xorl %esi,%ebx - shrl $1,%ecx - xorl %edi,%eax - shrl $1,%edx - xorl %ecx,%eax - shll $6,%edi - xorl %edx,%ebx - xorl %edi,%eax - movl %eax,(%esp) - movl %ebx,4(%esp) - movl 208(%esp),%ecx - movl 212(%esp),%edx - movl %ecx,%esi - shrl $6,%ecx - movl %edx,%edi - shrl $6,%edx - movl %ecx,%eax - shll $3,%esi - movl %edx,%ebx - shll $3,%edi - xorl %esi,%eax - shrl $13,%ecx - xorl %edi,%ebx - shrl $13,%edx - xorl %ecx,%eax - shll $10,%esi - xorl %edx,%ebx - shll $10,%edi - xorl %esi,%ebx - shrl $10,%ecx - xorl %edi,%eax - shrl $10,%edx - xorl %ecx,%ebx - shll $13,%edi - xorl %edx,%eax - xorl %edi,%eax - movl 320(%esp),%ecx - movl 324(%esp),%edx - addl (%esp),%eax - adcl 4(%esp),%ebx - movl 248(%esp),%esi - movl 252(%esp),%edi - addl %ecx,%eax - adcl %edx,%ebx - addl %esi,%eax - adcl %edi,%ebx - movl %eax,192(%esp) - movl %ebx,196(%esp) - movl 40(%esp),%ecx - movl 44(%esp),%edx - movl %ecx,%esi - shrl $9,%ecx - movl %edx,%edi - shrl $9,%edx - movl %ecx,%ebx - shll $14,%esi - movl %edx,%eax - shll $14,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%eax - shll $4,%esi - xorl %edx,%ebx - shll $4,%edi - xorl %esi,%ebx - shrl $4,%ecx - xorl %edi,%eax - shrl $4,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 48(%esp),%ecx - movl 52(%esp),%edx - movl 56(%esp),%esi - movl 60(%esp),%edi - addl 64(%esp),%eax - adcl 68(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - andl 40(%esp),%ecx - andl 44(%esp),%edx - addl 192(%esp),%eax - adcl 196(%esp),%ebx - xorl %esi,%ecx - xorl %edi,%edx - movl (%ebp),%esi - movl 4(%ebp),%edi - addl %ecx,%eax - adcl %edx,%ebx - movl 32(%esp),%ecx - movl 36(%esp),%edx - addl %esi,%eax - adcl %edi,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - addl %ecx,%eax - adcl %edx,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl %eax,32(%esp) - movl %ebx,36(%esp) - movl %ecx,%esi - shrl $2,%ecx - movl %edx,%edi - shrl $2,%edx - movl %ecx,%ebx - shll $4,%esi - movl %edx,%eax - shll $4,%edi - xorl %esi,%ebx - shrl $5,%ecx - xorl %edi,%eax - shrl $5,%edx - xorl %ecx,%ebx - shll $21,%esi - xorl %edx,%eax - shll $21,%edi - xorl %esi,%eax - shrl $21,%ecx - xorl %edi,%ebx - shrl $21,%edx - xorl %ecx,%eax - shll $5,%esi - xorl %edx,%ebx - shll $5,%edi - xorl %esi,%eax - xorl %edi,%ebx - movl 8(%esp),%ecx - movl 12(%esp),%edx - movl 16(%esp),%esi - movl 20(%esp),%edi - addl (%esp),%eax - adcl 4(%esp),%ebx - orl %esi,%ecx - orl %edi,%edx - andl 24(%esp),%ecx - andl 28(%esp),%edx - andl 8(%esp),%esi - andl 12(%esp),%edi - orl %esi,%ecx - orl %edi,%edx - addl %ecx,%eax - adcl %edx,%ebx - movl %eax,(%esp) - movl %ebx,4(%esp) - movb (%ebp),%dl - subl $8,%esp - leal 8(%ebp),%ebp - cmpb $23,%dl - jne L01016_79_x86 - movl 840(%esp),%esi - movl 844(%esp),%edi - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%edx - addl 8(%esp),%eax - adcl 12(%esp),%ebx - movl %eax,(%esi) - movl %ebx,4(%esi) - addl 16(%esp),%ecx - adcl 20(%esp),%edx - movl %ecx,8(%esi) - movl %edx,12(%esi) - movl 16(%esi),%eax - movl 20(%esi),%ebx - movl 24(%esi),%ecx - movl 28(%esi),%edx - addl 24(%esp),%eax - adcl 28(%esp),%ebx - movl %eax,16(%esi) - movl %ebx,20(%esi) - addl 32(%esp),%ecx - adcl 36(%esp),%edx - movl %ecx,24(%esi) - movl %edx,28(%esi) - movl 32(%esi),%eax - movl 36(%esi),%ebx - movl 40(%esi),%ecx - movl 44(%esi),%edx - addl 40(%esp),%eax - adcl 44(%esp),%ebx - movl %eax,32(%esi) - movl %ebx,36(%esi) - addl 48(%esp),%ecx - adcl 52(%esp),%edx - movl %ecx,40(%esi) - movl %edx,44(%esi) - movl 48(%esi),%eax - movl 52(%esi),%ebx - movl 56(%esi),%ecx - movl 60(%esi),%edx - addl 56(%esp),%eax - adcl 60(%esp),%ebx - movl %eax,48(%esi) - movl %ebx,52(%esi) - addl 64(%esp),%ecx - adcl 68(%esp),%edx - movl %ecx,56(%esi) - movl %edx,60(%esi) - addl $840,%esp - subl $640,%ebp - cmpl 8(%esp),%edi - jb L002loop_x86 - movl 12(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.align 6,0x90 -L001K512: -.long 3609767458,1116352408 -.long 602891725,1899447441 -.long 3964484399,3049323471 -.long 2173295548,3921009573 -.long 4081628472,961987163 -.long 3053834265,1508970993 -.long 2937671579,2453635748 -.long 3664609560,2870763221 -.long 2734883394,3624381080 -.long 1164996542,310598401 -.long 1323610764,607225278 -.long 3590304994,1426881987 -.long 4068182383,1925078388 -.long 991336113,2162078206 -.long 633803317,2614888103 -.long 3479774868,3248222580 -.long 2666613458,3835390401 -.long 944711139,4022224774 -.long 2341262773,264347078 -.long 2007800933,604807628 -.long 1495990901,770255983 -.long 1856431235,1249150122 -.long 3175218132,1555081692 -.long 2198950837,1996064986 -.long 3999719339,2554220882 -.long 766784016,2821834349 -.long 2566594879,2952996808 -.long 3203337956,3210313671 -.long 1034457026,3336571891 -.long 2466948901,3584528711 -.long 3758326383,113926993 -.long 168717936,338241895 -.long 1188179964,666307205 -.long 1546045734,773529912 -.long 1522805485,1294757372 -.long 2643833823,1396182291 -.long 2343527390,1695183700 -.long 1014477480,1986661051 -.long 1206759142,2177026350 -.long 344077627,2456956037 -.long 1290863460,2730485921 -.long 3158454273,2820302411 -.long 3505952657,3259730800 -.long 106217008,3345764771 -.long 3606008344,3516065817 -.long 1432725776,3600352804 -.long 1467031594,4094571909 -.long 851169720,275423344 -.long 3100823752,430227734 -.long 1363258195,506948616 -.long 3750685593,659060556 -.long 3785050280,883997877 -.long 3318307427,958139571 -.long 3812723403,1322822218 -.long 2003034995,1537002063 -.long 3602036899,1747873779 -.long 1575990012,1955562222 -.long 1125592928,2024104815 -.long 2716904306,2227730452 -.long 442776044,2361852424 -.long 593698344,2428436474 -.long 3733110249,2756734187 -.long 2999351573,3204031479 -.long 3815920427,3329325298 -.long 3928383900,3391569614 -.long 566280711,3515267271 -.long 3454069534,3940187606 -.long 4000239992,4118630271 -.long 1914138554,116418474 -.long 2731055270,174292421 -.long 3203993006,289380356 -.long 320620315,460393269 -.long 587496836,685471733 -.long 1086792851,852142971 -.long 365543100,1017036298 -.long 2618297676,1126000580 -.long 3409855158,1288033470 -.long 4234509866,1501505948 -.long 987167468,1607167915 -.long 1246189591,1816402316 -.long 67438087,66051 -.long 202182159,134810123 -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 -.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 -.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -.byte 62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/vpaes-x86.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/vpaes-x86.S deleted file mode 100644 index 6b5a88b304..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/vpaes-x86.S +++ /dev/null @@ -1,681 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -#ifdef BORINGSSL_DISPATCH_TEST -#endif -.align 6,0x90 -L_vpaes_consts: -.long 218628480,235210255,168496130,67568393 -.long 252381056,17041926,33884169,51187212 -.long 252645135,252645135,252645135,252645135 -.long 1512730624,3266504856,1377990664,3401244816 -.long 830229760,1275146365,2969422977,3447763452 -.long 3411033600,2979783055,338359620,2782886510 -.long 4209124096,907596821,221174255,1006095553 -.long 191964160,3799684038,3164090317,1589111125 -.long 182528256,1777043520,2877432650,3265356744 -.long 1874708224,3503451415,3305285752,363511674 -.long 1606117888,3487855781,1093350906,2384367825 -.long 197121,67569157,134941193,202313229 -.long 67569157,134941193,202313229,197121 -.long 134941193,202313229,197121,67569157 -.long 202313229,197121,67569157,134941193 -.long 33619971,100992007,168364043,235736079 -.long 235736079,33619971,100992007,168364043 -.long 168364043,235736079,33619971,100992007 -.long 100992007,168364043,235736079,33619971 -.long 50462976,117835012,185207048,252579084 -.long 252314880,51251460,117574920,184942860 -.long 184682752,252054788,50987272,118359308 -.long 118099200,185467140,251790600,50727180 -.long 2946363062,528716217,1300004225,1881839624 -.long 1532713819,1532713819,1532713819,1532713819 -.long 3602276352,4288629033,3737020424,4153884961 -.long 1354558464,32357713,2958822624,3775749553 -.long 1201988352,132424512,1572796698,503232858 -.long 2213177600,1597421020,4103937655,675398315 -.long 2749646592,4273543773,1511898873,121693092 -.long 3040248576,1103263732,2871565598,1608280554 -.long 2236667136,2588920351,482954393,64377734 -.long 3069987328,291237287,2117370568,3650299247 -.long 533321216,3573750986,2572112006,1401264716 -.long 1339849704,2721158661,548607111,3445553514 -.long 2128193280,3054596040,2183486460,1257083700 -.long 655635200,1165381986,3923443150,2344132524 -.long 190078720,256924420,290342170,357187870 -.long 1610966272,2263057382,4103205268,309794674 -.long 2592527872,2233205587,1335446729,3402964816 -.long 3973531904,3225098121,3002836325,1918774430 -.long 3870401024,2102906079,2284471353,4117666579 -.long 617007872,1021508343,366931923,691083277 -.long 2528395776,3491914898,2968704004,1613121270 -.long 3445188352,3247741094,844474987,4093578302 -.long 651481088,1190302358,1689581232,574775300 -.long 4289380608,206939853,2555985458,2489840491 -.long 2130264064,327674451,3566485037,3349835193 -.long 2470714624,316102159,3636825756,3393945945 -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 -.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 -.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 -.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 -.byte 118,101,114,115,105,116,121,41,0 -.align 6,0x90 -.private_extern __vpaes_preheat -.align 4 -__vpaes_preheat: - addl (%esp),%ebp - movdqa -48(%ebp),%xmm7 - movdqa -16(%ebp),%xmm6 - ret -.private_extern __vpaes_encrypt_core -.align 4 -__vpaes_encrypt_core: - movl $16,%ecx - movl 240(%edx),%eax - movdqa %xmm6,%xmm1 - movdqa (%ebp),%xmm2 - pandn %xmm0,%xmm1 - pand %xmm6,%xmm0 - movdqu (%edx),%xmm5 -.byte 102,15,56,0,208 - movdqa 16(%ebp),%xmm0 - pxor %xmm5,%xmm2 - psrld $4,%xmm1 - addl $16,%edx -.byte 102,15,56,0,193 - leal 192(%ebp),%ebx - pxor %xmm2,%xmm0 - jmp L000enc_entry -.align 4,0x90 -L001enc_loop: - movdqa 32(%ebp),%xmm4 - movdqa 48(%ebp),%xmm0 -.byte 102,15,56,0,226 -.byte 102,15,56,0,195 - pxor %xmm5,%xmm4 - movdqa 64(%ebp),%xmm5 - pxor %xmm4,%xmm0 - movdqa -64(%ebx,%ecx,1),%xmm1 -.byte 102,15,56,0,234 - movdqa 80(%ebp),%xmm2 - movdqa (%ebx,%ecx,1),%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm0,%xmm3 - pxor %xmm5,%xmm2 -.byte 102,15,56,0,193 - addl $16,%edx - pxor %xmm2,%xmm0 -.byte 102,15,56,0,220 - addl $16,%ecx - pxor %xmm0,%xmm3 -.byte 102,15,56,0,193 - andl $48,%ecx - subl $1,%eax - pxor %xmm3,%xmm0 -L000enc_entry: - movdqa %xmm6,%xmm1 - movdqa -32(%ebp),%xmm5 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm6,%xmm0 -.byte 102,15,56,0,232 - movdqa %xmm7,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm7,%xmm4 - pxor %xmm5,%xmm3 -.byte 102,15,56,0,224 - movdqa %xmm7,%xmm2 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm7,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%edx),%xmm5 - pxor %xmm1,%xmm3 - jnz L001enc_loop - movdqa 96(%ebp),%xmm4 - movdqa 112(%ebp),%xmm0 -.byte 102,15,56,0,226 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,195 - movdqa 64(%ebx,%ecx,1),%xmm1 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,193 - ret -.private_extern __vpaes_decrypt_core -.align 4 -__vpaes_decrypt_core: - leal 608(%ebp),%ebx - movl 240(%edx),%eax - movdqa %xmm6,%xmm1 - movdqa -64(%ebx),%xmm2 - pandn %xmm0,%xmm1 - movl %eax,%ecx - psrld $4,%xmm1 - movdqu (%edx),%xmm5 - shll $4,%ecx - pand %xmm6,%xmm0 -.byte 102,15,56,0,208 - movdqa -48(%ebx),%xmm0 - xorl $48,%ecx -.byte 102,15,56,0,193 - andl $48,%ecx - pxor %xmm5,%xmm2 - movdqa 176(%ebp),%xmm5 - pxor %xmm2,%xmm0 - addl $16,%edx - leal -352(%ebx,%ecx,1),%ecx - jmp L002dec_entry -.align 4,0x90 -L003dec_loop: - movdqa -32(%ebx),%xmm4 - movdqa -16(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa (%ebx),%xmm4 - pxor %xmm1,%xmm0 - movdqa 16(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 32(%ebx),%xmm4 - pxor %xmm1,%xmm0 - movdqa 48(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 64(%ebx),%xmm4 - pxor %xmm1,%xmm0 - movdqa 80(%ebx),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - addl $16,%edx -.byte 102,15,58,15,237,12 - pxor %xmm1,%xmm0 - subl $1,%eax -L002dec_entry: - movdqa %xmm6,%xmm1 - movdqa -32(%ebp),%xmm2 - pandn %xmm0,%xmm1 - pand %xmm6,%xmm0 - psrld $4,%xmm1 -.byte 102,15,56,0,208 - movdqa %xmm7,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm7,%xmm4 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm7,%xmm2 -.byte 102,15,56,0,211 - movdqa %xmm7,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%edx),%xmm0 - pxor %xmm1,%xmm3 - jnz L003dec_loop - movdqa 96(%ebx),%xmm4 -.byte 102,15,56,0,226 - pxor %xmm0,%xmm4 - movdqa 112(%ebx),%xmm0 - movdqa (%ecx),%xmm2 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,194 - ret -.private_extern __vpaes_schedule_core -.align 4 -__vpaes_schedule_core: - addl (%esp),%ebp - movdqu (%esi),%xmm0 - movdqa 320(%ebp),%xmm2 - movdqa %xmm0,%xmm3 - leal (%ebp),%ebx - movdqa %xmm2,4(%esp) - call __vpaes_schedule_transform - movdqa %xmm0,%xmm7 - testl %edi,%edi - jnz L004schedule_am_decrypting - movdqu %xmm0,(%edx) - jmp L005schedule_go -L004schedule_am_decrypting: - movdqa 256(%ebp,%ecx,1),%xmm1 -.byte 102,15,56,0,217 - movdqu %xmm3,(%edx) - xorl $48,%ecx -L005schedule_go: - cmpl $192,%eax - ja L006schedule_256 - je L007schedule_192 -L008schedule_128: - movl $10,%eax -L009loop_schedule_128: - call __vpaes_schedule_round - decl %eax - jz L010schedule_mangle_last - call __vpaes_schedule_mangle - jmp L009loop_schedule_128 -.align 4,0x90 -L007schedule_192: - movdqu 8(%esi),%xmm0 - call __vpaes_schedule_transform - movdqa %xmm0,%xmm6 - pxor %xmm4,%xmm4 - movhlps %xmm4,%xmm6 - movl $4,%eax -L011loop_schedule_192: - call __vpaes_schedule_round -.byte 102,15,58,15,198,8 - call __vpaes_schedule_mangle - call __vpaes_schedule_192_smear - call __vpaes_schedule_mangle - call __vpaes_schedule_round - decl %eax - jz L010schedule_mangle_last - call __vpaes_schedule_mangle - call __vpaes_schedule_192_smear - jmp L011loop_schedule_192 -.align 4,0x90 -L006schedule_256: - movdqu 16(%esi),%xmm0 - call __vpaes_schedule_transform - movl $7,%eax -L012loop_schedule_256: - call __vpaes_schedule_mangle - movdqa %xmm0,%xmm6 - call __vpaes_schedule_round - decl %eax - jz L010schedule_mangle_last - call __vpaes_schedule_mangle - pshufd $255,%xmm0,%xmm0 - movdqa %xmm7,20(%esp) - movdqa %xmm6,%xmm7 - call L_vpaes_schedule_low_round - movdqa 20(%esp),%xmm7 - jmp L012loop_schedule_256 -.align 4,0x90 -L010schedule_mangle_last: - leal 384(%ebp),%ebx - testl %edi,%edi - jnz L013schedule_mangle_last_dec - movdqa 256(%ebp,%ecx,1),%xmm1 -.byte 102,15,56,0,193 - leal 352(%ebp),%ebx - addl $32,%edx -L013schedule_mangle_last_dec: - addl $-16,%edx - pxor 336(%ebp),%xmm0 - call __vpaes_schedule_transform - movdqu %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - ret -.private_extern __vpaes_schedule_192_smear -.align 4 -__vpaes_schedule_192_smear: - pshufd $128,%xmm6,%xmm1 - pshufd $254,%xmm7,%xmm0 - pxor %xmm1,%xmm6 - pxor %xmm1,%xmm1 - pxor %xmm0,%xmm6 - movdqa %xmm6,%xmm0 - movhlps %xmm1,%xmm6 - ret -.private_extern __vpaes_schedule_round -.align 4 -__vpaes_schedule_round: - movdqa 8(%esp),%xmm2 - pxor %xmm1,%xmm1 -.byte 102,15,58,15,202,15 -.byte 102,15,58,15,210,15 - pxor %xmm1,%xmm7 - pshufd $255,%xmm0,%xmm0 -.byte 102,15,58,15,192,1 - movdqa %xmm2,8(%esp) -L_vpaes_schedule_low_round: - movdqa %xmm7,%xmm1 - pslldq $4,%xmm7 - pxor %xmm1,%xmm7 - movdqa %xmm7,%xmm1 - pslldq $8,%xmm7 - pxor %xmm1,%xmm7 - pxor 336(%ebp),%xmm7 - movdqa -16(%ebp),%xmm4 - movdqa -48(%ebp),%xmm5 - movdqa %xmm4,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm4,%xmm0 - movdqa -32(%ebp),%xmm2 -.byte 102,15,56,0,208 - pxor %xmm1,%xmm0 - movdqa %xmm5,%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - movdqa %xmm5,%xmm4 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm5,%xmm2 -.byte 102,15,56,0,211 - pxor %xmm0,%xmm2 - movdqa %xmm5,%xmm3 -.byte 102,15,56,0,220 - pxor %xmm1,%xmm3 - movdqa 32(%ebp),%xmm4 -.byte 102,15,56,0,226 - movdqa 48(%ebp),%xmm0 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 - pxor %xmm7,%xmm0 - movdqa %xmm0,%xmm7 - ret -.private_extern __vpaes_schedule_transform -.align 4 -__vpaes_schedule_transform: - movdqa -16(%ebp),%xmm2 - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - movdqa (%ebx),%xmm2 -.byte 102,15,56,0,208 - movdqa 16(%ebx),%xmm0 -.byte 102,15,56,0,193 - pxor %xmm2,%xmm0 - ret -.private_extern __vpaes_schedule_mangle -.align 4 -__vpaes_schedule_mangle: - movdqa %xmm0,%xmm4 - movdqa 128(%ebp),%xmm5 - testl %edi,%edi - jnz L014schedule_mangle_dec - addl $16,%edx - pxor 336(%ebp),%xmm4 -.byte 102,15,56,0,229 - movdqa %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 - jmp L015schedule_mangle_both -.align 4,0x90 -L014schedule_mangle_dec: - movdqa -16(%ebp),%xmm2 - leal 416(%ebp),%esi - movdqa %xmm2,%xmm1 - pandn %xmm4,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm4 - movdqa (%esi),%xmm2 -.byte 102,15,56,0,212 - movdqa 16(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - movdqa 32(%esi),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 48(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - movdqa 64(%esi),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 80(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - movdqa 96(%esi),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 112(%esi),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - addl $-16,%edx -L015schedule_mangle_both: - movdqa 256(%ebp,%ecx,1),%xmm1 -.byte 102,15,56,0,217 - addl $-16,%ecx - andl $48,%ecx - movdqu %xmm3,(%edx) - ret -.globl _vpaes_set_encrypt_key -.private_extern _vpaes_set_encrypt_key -.align 4 -_vpaes_set_encrypt_key: -L_vpaes_set_encrypt_key_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call L016pic -L016pic: - popl %ebx - leal _BORINGSSL_function_hit+5-L016pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%eax - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movl %eax,%ebx - shrl $5,%ebx - addl $5,%ebx - movl %ebx,240(%edx) - movl $48,%ecx - movl $0,%edi - leal L_vpaes_consts+0x30-L017pic_point,%ebp - call __vpaes_schedule_core -L017pic_point: - movl 48(%esp),%esp - xorl %eax,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _vpaes_set_decrypt_key -.private_extern _vpaes_set_decrypt_key -.align 4 -_vpaes_set_decrypt_key: -L_vpaes_set_decrypt_key_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%eax - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movl %eax,%ebx - shrl $5,%ebx - addl $5,%ebx - movl %ebx,240(%edx) - shll $4,%ebx - leal 16(%edx,%ebx,1),%edx - movl $1,%edi - movl %eax,%ecx - shrl $1,%ecx - andl $32,%ecx - xorl $32,%ecx - leal L_vpaes_consts+0x30-L018pic_point,%ebp - call __vpaes_schedule_core -L018pic_point: - movl 48(%esp),%esp - xorl %eax,%eax - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _vpaes_encrypt -.private_extern _vpaes_encrypt -.align 4 -_vpaes_encrypt: -L_vpaes_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi -#ifdef BORINGSSL_DISPATCH_TEST - pushl %ebx - pushl %edx - call L019pic -L019pic: - popl %ebx - leal _BORINGSSL_function_hit+4-L019pic(%ebx),%ebx - movl $1,%edx - movb %dl,(%ebx) - popl %edx - popl %ebx -#endif - leal L_vpaes_consts+0x30-L020pic_point,%ebp - call __vpaes_preheat -L020pic_point: - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%edi - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movdqu (%esi),%xmm0 - call __vpaes_encrypt_core - movdqu %xmm0,(%edi) - movl 48(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _vpaes_decrypt -.private_extern _vpaes_decrypt -.align 4 -_vpaes_decrypt: -L_vpaes_decrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - leal L_vpaes_consts+0x30-L021pic_point,%ebp - call __vpaes_preheat -L021pic_point: - movl 20(%esp),%esi - leal -56(%esp),%ebx - movl 24(%esp),%edi - andl $-16,%ebx - movl 28(%esp),%edx - xchgl %esp,%ebx - movl %ebx,48(%esp) - movdqu (%esi),%xmm0 - call __vpaes_decrypt_core - movdqu %xmm0,(%edi) - movl 48(%esp),%esp - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _vpaes_cbc_encrypt -.private_extern _vpaes_cbc_encrypt -.align 4 -_vpaes_cbc_encrypt: -L_vpaes_cbc_encrypt_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 20(%esp),%esi - movl 24(%esp),%edi - movl 28(%esp),%eax - movl 32(%esp),%edx - subl $16,%eax - jc L022cbc_abort - leal -56(%esp),%ebx - movl 36(%esp),%ebp - andl $-16,%ebx - movl 40(%esp),%ecx - xchgl %esp,%ebx - movdqu (%ebp),%xmm1 - subl %esi,%edi - movl %ebx,48(%esp) - movl %edi,(%esp) - movl %edx,4(%esp) - movl %ebp,8(%esp) - movl %eax,%edi - leal L_vpaes_consts+0x30-L023pic_point,%ebp - call __vpaes_preheat -L023pic_point: - cmpl $0,%ecx - je L024cbc_dec_loop - jmp L025cbc_enc_loop -.align 4,0x90 -L025cbc_enc_loop: - movdqu (%esi),%xmm0 - pxor %xmm1,%xmm0 - call __vpaes_encrypt_core - movl (%esp),%ebx - movl 4(%esp),%edx - movdqa %xmm0,%xmm1 - movdqu %xmm0,(%ebx,%esi,1) - leal 16(%esi),%esi - subl $16,%edi - jnc L025cbc_enc_loop - jmp L026cbc_done -.align 4,0x90 -L024cbc_dec_loop: - movdqu (%esi),%xmm0 - movdqa %xmm1,16(%esp) - movdqa %xmm0,32(%esp) - call __vpaes_decrypt_core - movl (%esp),%ebx - movl 4(%esp),%edx - pxor 16(%esp),%xmm0 - movdqa 32(%esp),%xmm1 - movdqu %xmm0,(%ebx,%esi,1) - leal 16(%esi),%esi - subl $16,%edi - jnc L024cbc_dec_loop -L026cbc_done: - movl 8(%esp),%ebx - movl 48(%esp),%esp - movdqu %xmm1,(%ebx) -L022cbc_abort: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/x86-mont.S b/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/x86-mont.S deleted file mode 100644 index 3ef8774ed5..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/fipsmodule/x86-mont.S +++ /dev/null @@ -1,485 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _bn_mul_mont -.private_extern _bn_mul_mont -.align 4 -_bn_mul_mont: -L_bn_mul_mont_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - xorl %eax,%eax - movl 40(%esp),%edi - cmpl $4,%edi - jl L000just_leave - leal 20(%esp),%esi - leal 24(%esp),%edx - addl $2,%edi - negl %edi - leal -32(%esp,%edi,4),%ebp - negl %edi - movl %ebp,%eax - subl %edx,%eax - andl $2047,%eax - subl %eax,%ebp - xorl %ebp,%edx - andl $2048,%edx - xorl $2048,%edx - subl %edx,%ebp - andl $-64,%ebp - movl %esp,%eax - subl %ebp,%eax - andl $-4096,%eax - movl %esp,%edx - leal (%ebp,%eax,1),%esp - movl (%esp),%eax - cmpl %ebp,%esp - ja L001page_walk - jmp L002page_walk_done -.align 4,0x90 -L001page_walk: - leal -4096(%esp),%esp - movl (%esp),%eax - cmpl %ebp,%esp - ja L001page_walk -L002page_walk_done: - movl (%esi),%eax - movl 4(%esi),%ebx - movl 8(%esi),%ecx - movl 12(%esi),%ebp - movl 16(%esi),%esi - movl (%esi),%esi - movl %eax,4(%esp) - movl %ebx,8(%esp) - movl %ecx,12(%esp) - movl %ebp,16(%esp) - movl %esi,20(%esp) - leal -3(%edi),%ebx - movl %edx,24(%esp) - call L003PIC_me_up -L003PIC_me_up: - popl %eax - movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L003PIC_me_up(%eax),%eax - btl $26,(%eax) - jnc L004non_sse2 - movl $-1,%eax - movd %eax,%mm7 - movl 8(%esp),%esi - movl 12(%esp),%edi - movl 16(%esp),%ebp - xorl %edx,%edx - xorl %ecx,%ecx - movd (%edi),%mm4 - movd (%esi),%mm5 - movd (%ebp),%mm3 - pmuludq %mm4,%mm5 - movq %mm5,%mm2 - movq %mm5,%mm0 - pand %mm7,%mm0 - pmuludq 20(%esp),%mm5 - pmuludq %mm5,%mm3 - paddq %mm0,%mm3 - movd 4(%ebp),%mm1 - movd 4(%esi),%mm0 - psrlq $32,%mm2 - psrlq $32,%mm3 - incl %ecx -.align 4,0x90 -L0051st: - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - pand %mm7,%mm0 - movd 4(%ebp,%ecx,4),%mm1 - paddq %mm0,%mm3 - movd 4(%esi,%ecx,4),%mm0 - psrlq $32,%mm2 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm3 - leal 1(%ecx),%ecx - cmpl %ebx,%ecx - jl L0051st - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - pand %mm7,%mm0 - paddq %mm0,%mm3 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm2 - psrlq $32,%mm3 - paddq %mm2,%mm3 - movq %mm3,32(%esp,%ebx,4) - incl %edx -L006outer: - xorl %ecx,%ecx - movd (%edi,%edx,4),%mm4 - movd (%esi),%mm5 - movd 32(%esp),%mm6 - movd (%ebp),%mm3 - pmuludq %mm4,%mm5 - paddq %mm6,%mm5 - movq %mm5,%mm0 - movq %mm5,%mm2 - pand %mm7,%mm0 - pmuludq 20(%esp),%mm5 - pmuludq %mm5,%mm3 - paddq %mm0,%mm3 - movd 36(%esp),%mm6 - movd 4(%ebp),%mm1 - movd 4(%esi),%mm0 - psrlq $32,%mm2 - psrlq $32,%mm3 - paddq %mm6,%mm2 - incl %ecx - decl %ebx -L007inner: - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - movd 36(%esp,%ecx,4),%mm6 - pand %mm7,%mm0 - movd 4(%ebp,%ecx,4),%mm1 - paddq %mm0,%mm3 - movd 4(%esi,%ecx,4),%mm0 - psrlq $32,%mm2 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm3 - paddq %mm6,%mm2 - decl %ebx - leal 1(%ecx),%ecx - jnz L007inner - movl %ecx,%ebx - pmuludq %mm4,%mm0 - pmuludq %mm5,%mm1 - paddq %mm0,%mm2 - paddq %mm1,%mm3 - movq %mm2,%mm0 - pand %mm7,%mm0 - paddq %mm0,%mm3 - movd %mm3,28(%esp,%ecx,4) - psrlq $32,%mm2 - psrlq $32,%mm3 - movd 36(%esp,%ebx,4),%mm6 - paddq %mm2,%mm3 - paddq %mm6,%mm3 - movq %mm3,32(%esp,%ebx,4) - leal 1(%edx),%edx - cmpl %ebx,%edx - jle L006outer - emms - jmp L008common_tail -.align 4,0x90 -L004non_sse2: - movl 8(%esp),%esi - leal 1(%ebx),%ebp - movl 12(%esp),%edi - xorl %ecx,%ecx - movl %esi,%edx - andl $1,%ebp - subl %edi,%edx - leal 4(%edi,%ebx,4),%eax - orl %edx,%ebp - movl (%edi),%edi - jz L009bn_sqr_mont - movl %eax,28(%esp) - movl (%esi),%eax - xorl %edx,%edx -.align 4,0x90 -L010mull: - movl %edx,%ebp - mull %edi - addl %eax,%ebp - leal 1(%ecx),%ecx - adcl $0,%edx - movl (%esi,%ecx,4),%eax - cmpl %ebx,%ecx - movl %ebp,28(%esp,%ecx,4) - jl L010mull - movl %edx,%ebp - mull %edi - movl 20(%esp),%edi - addl %ebp,%eax - movl 16(%esp),%esi - adcl $0,%edx - imull 32(%esp),%edi - movl %eax,32(%esp,%ebx,4) - xorl %ecx,%ecx - movl %edx,36(%esp,%ebx,4) - movl %ecx,40(%esp,%ebx,4) - movl (%esi),%eax - mull %edi - addl 32(%esp),%eax - movl 4(%esi),%eax - adcl $0,%edx - incl %ecx - jmp L0112ndmadd -.align 4,0x90 -L0121stmadd: - movl %edx,%ebp - mull %edi - addl 32(%esp,%ecx,4),%ebp - leal 1(%ecx),%ecx - adcl $0,%edx - addl %eax,%ebp - movl (%esi,%ecx,4),%eax - adcl $0,%edx - cmpl %ebx,%ecx - movl %ebp,28(%esp,%ecx,4) - jl L0121stmadd - movl %edx,%ebp - mull %edi - addl 32(%esp,%ebx,4),%eax - movl 20(%esp),%edi - adcl $0,%edx - movl 16(%esp),%esi - addl %eax,%ebp - adcl $0,%edx - imull 32(%esp),%edi - xorl %ecx,%ecx - addl 36(%esp,%ebx,4),%edx - movl %ebp,32(%esp,%ebx,4) - adcl $0,%ecx - movl (%esi),%eax - movl %edx,36(%esp,%ebx,4) - movl %ecx,40(%esp,%ebx,4) - mull %edi - addl 32(%esp),%eax - movl 4(%esi),%eax - adcl $0,%edx - movl $1,%ecx -.align 4,0x90 -L0112ndmadd: - movl %edx,%ebp - mull %edi - addl 32(%esp,%ecx,4),%ebp - leal 1(%ecx),%ecx - adcl $0,%edx - addl %eax,%ebp - movl (%esi,%ecx,4),%eax - adcl $0,%edx - cmpl %ebx,%ecx - movl %ebp,24(%esp,%ecx,4) - jl L0112ndmadd - movl %edx,%ebp - mull %edi - addl 32(%esp,%ebx,4),%ebp - adcl $0,%edx - addl %eax,%ebp - adcl $0,%edx - movl %ebp,28(%esp,%ebx,4) - xorl %eax,%eax - movl 12(%esp),%ecx - addl 36(%esp,%ebx,4),%edx - adcl 40(%esp,%ebx,4),%eax - leal 4(%ecx),%ecx - movl %edx,32(%esp,%ebx,4) - cmpl 28(%esp),%ecx - movl %eax,36(%esp,%ebx,4) - je L008common_tail - movl (%ecx),%edi - movl 8(%esp),%esi - movl %ecx,12(%esp) - xorl %ecx,%ecx - xorl %edx,%edx - movl (%esi),%eax - jmp L0121stmadd -.align 4,0x90 -L009bn_sqr_mont: - movl %ebx,(%esp) - movl %ecx,12(%esp) - movl %edi,%eax - mull %edi - movl %eax,32(%esp) - movl %edx,%ebx - shrl $1,%edx - andl $1,%ebx - incl %ecx -.align 4,0x90 -L013sqr: - movl (%esi,%ecx,4),%eax - movl %edx,%ebp - mull %edi - addl %ebp,%eax - leal 1(%ecx),%ecx - adcl $0,%edx - leal (%ebx,%eax,2),%ebp - shrl $31,%eax - cmpl (%esp),%ecx - movl %eax,%ebx - movl %ebp,28(%esp,%ecx,4) - jl L013sqr - movl (%esi,%ecx,4),%eax - movl %edx,%ebp - mull %edi - addl %ebp,%eax - movl 20(%esp),%edi - adcl $0,%edx - movl 16(%esp),%esi - leal (%ebx,%eax,2),%ebp - imull 32(%esp),%edi - shrl $31,%eax - movl %ebp,32(%esp,%ecx,4) - leal (%eax,%edx,2),%ebp - movl (%esi),%eax - shrl $31,%edx - movl %ebp,36(%esp,%ecx,4) - movl %edx,40(%esp,%ecx,4) - mull %edi - addl 32(%esp),%eax - movl %ecx,%ebx - adcl $0,%edx - movl 4(%esi),%eax - movl $1,%ecx -.align 4,0x90 -L0143rdmadd: - movl %edx,%ebp - mull %edi - addl 32(%esp,%ecx,4),%ebp - adcl $0,%edx - addl %eax,%ebp - movl 4(%esi,%ecx,4),%eax - adcl $0,%edx - movl %ebp,28(%esp,%ecx,4) - movl %edx,%ebp - mull %edi - addl 36(%esp,%ecx,4),%ebp - leal 2(%ecx),%ecx - adcl $0,%edx - addl %eax,%ebp - movl (%esi,%ecx,4),%eax - adcl $0,%edx - cmpl %ebx,%ecx - movl %ebp,24(%esp,%ecx,4) - jl L0143rdmadd - movl %edx,%ebp - mull %edi - addl 32(%esp,%ebx,4),%ebp - adcl $0,%edx - addl %eax,%ebp - adcl $0,%edx - movl %ebp,28(%esp,%ebx,4) - movl 12(%esp),%ecx - xorl %eax,%eax - movl 8(%esp),%esi - addl 36(%esp,%ebx,4),%edx - adcl 40(%esp,%ebx,4),%eax - movl %edx,32(%esp,%ebx,4) - cmpl %ebx,%ecx - movl %eax,36(%esp,%ebx,4) - je L008common_tail - movl 4(%esi,%ecx,4),%edi - leal 1(%ecx),%ecx - movl %edi,%eax - movl %ecx,12(%esp) - mull %edi - addl 32(%esp,%ecx,4),%eax - adcl $0,%edx - movl %eax,32(%esp,%ecx,4) - xorl %ebp,%ebp - cmpl %ebx,%ecx - leal 1(%ecx),%ecx - je L015sqrlast - movl %edx,%ebx - shrl $1,%edx - andl $1,%ebx -.align 4,0x90 -L016sqradd: - movl (%esi,%ecx,4),%eax - movl %edx,%ebp - mull %edi - addl %ebp,%eax - leal (%eax,%eax,1),%ebp - adcl $0,%edx - shrl $31,%eax - addl 32(%esp,%ecx,4),%ebp - leal 1(%ecx),%ecx - adcl $0,%eax - addl %ebx,%ebp - adcl $0,%eax - cmpl (%esp),%ecx - movl %ebp,28(%esp,%ecx,4) - movl %eax,%ebx - jle L016sqradd - movl %edx,%ebp - addl %edx,%edx - shrl $31,%ebp - addl %ebx,%edx - adcl $0,%ebp -L015sqrlast: - movl 20(%esp),%edi - movl 16(%esp),%esi - imull 32(%esp),%edi - addl 32(%esp,%ecx,4),%edx - movl (%esi),%eax - adcl $0,%ebp - movl %edx,32(%esp,%ecx,4) - movl %ebp,36(%esp,%ecx,4) - mull %edi - addl 32(%esp),%eax - leal -1(%ecx),%ebx - adcl $0,%edx - movl $1,%ecx - movl 4(%esi),%eax - jmp L0143rdmadd -.align 4,0x90 -L008common_tail: - movl 16(%esp),%ebp - movl 4(%esp),%edi - leal 32(%esp),%esi - movl (%esi),%eax - movl %ebx,%ecx - xorl %edx,%edx -.align 4,0x90 -L017sub: - sbbl (%ebp,%edx,4),%eax - movl %eax,(%edi,%edx,4) - decl %ecx - movl 4(%esi,%edx,4),%eax - leal 1(%edx),%edx - jge L017sub - sbbl $0,%eax - movl $-1,%edx - xorl %eax,%edx - jmp L018copy -.align 4,0x90 -L018copy: - movl 32(%esp,%ebx,4),%esi - movl (%edi,%ebx,4),%ebp - movl %ecx,32(%esp,%ebx,4) - andl %eax,%esi - andl %edx,%ebp - orl %esi,%ebp - movl %ebp,(%edi,%ebx,4) - decl %ebx - jge L018copy - movl 24(%esp),%esp - movl $1,%eax -L000just_leave: - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 -.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 -.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 -.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 -.byte 111,114,103,62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L_OPENSSL_ia32cap_P$non_lazy_ptr: -.indirect_symbol _OPENSSL_ia32cap_P -.long 0 -#endif diff --git a/packager/third_party/boringssl/mac-x86/crypto/test/trampoline-x86.S b/packager/third_party/boringssl/mac-x86/crypto/test/trampoline-x86.S deleted file mode 100644 index 601f2f0151..0000000000 --- a/packager/third_party/boringssl/mac-x86/crypto/test/trampoline-x86.S +++ /dev/null @@ -1,169 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__i386__) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.globl _abi_test_trampoline -.private_extern _abi_test_trampoline -.align 4 -_abi_test_trampoline: -L_abi_test_trampoline_begin: - pushl %ebp - pushl %ebx - pushl %esi - pushl %edi - movl 24(%esp),%ecx - movl (%ecx),%esi - movl 4(%ecx),%edi - movl 8(%ecx),%ebx - movl 12(%ecx),%ebp - subl $44,%esp - movl 72(%esp),%eax - xorl %ecx,%ecx -L000loop: - cmpl 76(%esp),%ecx - jae L001loop_done - movl (%eax,%ecx,4),%edx - movl %edx,(%esp,%ecx,4) - addl $1,%ecx - jmp L000loop -L001loop_done: - call *64(%esp) - addl $44,%esp - movl 24(%esp),%ecx - movl %esi,(%ecx) - movl %edi,4(%ecx) - movl %ebx,8(%ecx) - movl %ebp,12(%ecx) - popl %edi - popl %esi - popl %ebx - popl %ebp - ret -.globl _abi_test_get_and_clear_direction_flag -.private_extern _abi_test_get_and_clear_direction_flag -.align 4 -_abi_test_get_and_clear_direction_flag: -L_abi_test_get_and_clear_direction_flag_begin: - pushfl - popl %eax - andl $1024,%eax - shrl $10,%eax - cld - ret -.globl _abi_test_set_direction_flag -.private_extern _abi_test_set_direction_flag -.align 4 -_abi_test_set_direction_flag: -L_abi_test_set_direction_flag_begin: - std - ret -.globl _abi_test_clobber_eax -.private_extern _abi_test_clobber_eax -.align 4 -_abi_test_clobber_eax: -L_abi_test_clobber_eax_begin: - xorl %eax,%eax - ret -.globl _abi_test_clobber_ebx -.private_extern _abi_test_clobber_ebx -.align 4 -_abi_test_clobber_ebx: -L_abi_test_clobber_ebx_begin: - xorl %ebx,%ebx - ret -.globl _abi_test_clobber_ecx -.private_extern _abi_test_clobber_ecx -.align 4 -_abi_test_clobber_ecx: -L_abi_test_clobber_ecx_begin: - xorl %ecx,%ecx - ret -.globl _abi_test_clobber_edx -.private_extern _abi_test_clobber_edx -.align 4 -_abi_test_clobber_edx: -L_abi_test_clobber_edx_begin: - xorl %edx,%edx - ret -.globl _abi_test_clobber_edi -.private_extern _abi_test_clobber_edi -.align 4 -_abi_test_clobber_edi: -L_abi_test_clobber_edi_begin: - xorl %edi,%edi - ret -.globl _abi_test_clobber_esi -.private_extern _abi_test_clobber_esi -.align 4 -_abi_test_clobber_esi: -L_abi_test_clobber_esi_begin: - xorl %esi,%esi - ret -.globl _abi_test_clobber_ebp -.private_extern _abi_test_clobber_ebp -.align 4 -_abi_test_clobber_ebp: -L_abi_test_clobber_ebp_begin: - xorl %ebp,%ebp - ret -.globl _abi_test_clobber_xmm0 -.private_extern _abi_test_clobber_xmm0 -.align 4 -_abi_test_clobber_xmm0: -L_abi_test_clobber_xmm0_begin: - pxor %xmm0,%xmm0 - ret -.globl _abi_test_clobber_xmm1 -.private_extern _abi_test_clobber_xmm1 -.align 4 -_abi_test_clobber_xmm1: -L_abi_test_clobber_xmm1_begin: - pxor %xmm1,%xmm1 - ret -.globl _abi_test_clobber_xmm2 -.private_extern _abi_test_clobber_xmm2 -.align 4 -_abi_test_clobber_xmm2: -L_abi_test_clobber_xmm2_begin: - pxor %xmm2,%xmm2 - ret -.globl _abi_test_clobber_xmm3 -.private_extern _abi_test_clobber_xmm3 -.align 4 -_abi_test_clobber_xmm3: -L_abi_test_clobber_xmm3_begin: - pxor %xmm3,%xmm3 - ret -.globl _abi_test_clobber_xmm4 -.private_extern _abi_test_clobber_xmm4 -.align 4 -_abi_test_clobber_xmm4: -L_abi_test_clobber_xmm4_begin: - pxor %xmm4,%xmm4 - ret -.globl _abi_test_clobber_xmm5 -.private_extern _abi_test_clobber_xmm5 -.align 4 -_abi_test_clobber_xmm5: -L_abi_test_clobber_xmm5_begin: - pxor %xmm5,%xmm5 - ret -.globl _abi_test_clobber_xmm6 -.private_extern _abi_test_clobber_xmm6 -.align 4 -_abi_test_clobber_xmm6: -L_abi_test_clobber_xmm6_begin: - pxor %xmm6,%xmm6 - ret -.globl _abi_test_clobber_xmm7 -.private_extern _abi_test_clobber_xmm7 -.align 4 -_abi_test_clobber_xmm7: -L_abi_test_clobber_xmm7_begin: - pxor %xmm7,%xmm7 - ret -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/chacha/chacha-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/chacha/chacha-x86_64.S deleted file mode 100644 index 10b1ad9520..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/chacha/chacha-x86_64.S +++ /dev/null @@ -1,1625 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - -.p2align 6 -L$zero: -.long 0,0,0,0 -L$one: -.long 1,0,0,0 -L$inc: -.long 0,1,2,3 -L$four: -.long 4,4,4,4 -L$incy: -.long 0,2,4,6,1,3,5,7 -L$eight: -.long 8,8,8,8,8,8,8,8 -L$rot16: -.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd -L$rot24: -.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe -L$sigma: -.byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0 -.p2align 6 -L$zeroz: -.long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0 -L$fourz: -.long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0 -L$incz: -.long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 -L$sixteen: -.long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 -.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.globl _ChaCha20_ctr32 -.private_extern _ChaCha20_ctr32 - -.p2align 6 -_ChaCha20_ctr32: - - cmpq $0,%rdx - je L$no_data - movq _OPENSSL_ia32cap_P+4(%rip),%r10 - testl $512,%r10d - jnz L$ChaCha20_ssse3 - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $64+24,%rsp - -L$ctr32_body: - - - movdqu (%rcx),%xmm1 - movdqu 16(%rcx),%xmm2 - movdqu (%r8),%xmm3 - movdqa L$one(%rip),%xmm4 - - - movdqa %xmm1,16(%rsp) - movdqa %xmm2,32(%rsp) - movdqa %xmm3,48(%rsp) - movq %rdx,%rbp - jmp L$oop_outer - -.p2align 5 -L$oop_outer: - movl $0x61707865,%eax - movl $0x3320646e,%ebx - movl $0x79622d32,%ecx - movl $0x6b206574,%edx - movl 16(%rsp),%r8d - movl 20(%rsp),%r9d - movl 24(%rsp),%r10d - movl 28(%rsp),%r11d - movd %xmm3,%r12d - movl 52(%rsp),%r13d - movl 56(%rsp),%r14d - movl 60(%rsp),%r15d - - movq %rbp,64+0(%rsp) - movl $10,%ebp - movq %rsi,64+8(%rsp) -.byte 102,72,15,126,214 - movq %rdi,64+16(%rsp) - movq %rsi,%rdi - shrq $32,%rdi - jmp L$oop - -.p2align 5 -L$oop: - addl %r8d,%eax - xorl %eax,%r12d - roll $16,%r12d - addl %r9d,%ebx - xorl %ebx,%r13d - roll $16,%r13d - addl %r12d,%esi - xorl %esi,%r8d - roll $12,%r8d - addl %r13d,%edi - xorl %edi,%r9d - roll $12,%r9d - addl %r8d,%eax - xorl %eax,%r12d - roll $8,%r12d - addl %r9d,%ebx - xorl %ebx,%r13d - roll $8,%r13d - addl %r12d,%esi - xorl %esi,%r8d - roll $7,%r8d - addl %r13d,%edi - xorl %edi,%r9d - roll $7,%r9d - movl %esi,32(%rsp) - movl %edi,36(%rsp) - movl 40(%rsp),%esi - movl 44(%rsp),%edi - addl %r10d,%ecx - xorl %ecx,%r14d - roll $16,%r14d - addl %r11d,%edx - xorl %edx,%r15d - roll $16,%r15d - addl %r14d,%esi - xorl %esi,%r10d - roll $12,%r10d - addl %r15d,%edi - xorl %edi,%r11d - roll $12,%r11d - addl %r10d,%ecx - xorl %ecx,%r14d - roll $8,%r14d - addl %r11d,%edx - xorl %edx,%r15d - roll $8,%r15d - addl %r14d,%esi - xorl %esi,%r10d - roll $7,%r10d - addl %r15d,%edi - xorl %edi,%r11d - roll $7,%r11d - addl %r9d,%eax - xorl %eax,%r15d - roll $16,%r15d - addl %r10d,%ebx - xorl %ebx,%r12d - roll $16,%r12d - addl %r15d,%esi - xorl %esi,%r9d - roll $12,%r9d - addl %r12d,%edi - xorl %edi,%r10d - roll $12,%r10d - addl %r9d,%eax - xorl %eax,%r15d - roll $8,%r15d - addl %r10d,%ebx - xorl %ebx,%r12d - roll $8,%r12d - addl %r15d,%esi - xorl %esi,%r9d - roll $7,%r9d - addl %r12d,%edi - xorl %edi,%r10d - roll $7,%r10d - movl %esi,40(%rsp) - movl %edi,44(%rsp) - movl 32(%rsp),%esi - movl 36(%rsp),%edi - addl %r11d,%ecx - xorl %ecx,%r13d - roll $16,%r13d - addl %r8d,%edx - xorl %edx,%r14d - roll $16,%r14d - addl %r13d,%esi - xorl %esi,%r11d - roll $12,%r11d - addl %r14d,%edi - xorl %edi,%r8d - roll $12,%r8d - addl %r11d,%ecx - xorl %ecx,%r13d - roll $8,%r13d - addl %r8d,%edx - xorl %edx,%r14d - roll $8,%r14d - addl %r13d,%esi - xorl %esi,%r11d - roll $7,%r11d - addl %r14d,%edi - xorl %edi,%r8d - roll $7,%r8d - decl %ebp - jnz L$oop - movl %edi,36(%rsp) - movl %esi,32(%rsp) - movq 64(%rsp),%rbp - movdqa %xmm2,%xmm1 - movq 64+8(%rsp),%rsi - paddd %xmm4,%xmm3 - movq 64+16(%rsp),%rdi - - addl $0x61707865,%eax - addl $0x3320646e,%ebx - addl $0x79622d32,%ecx - addl $0x6b206574,%edx - addl 16(%rsp),%r8d - addl 20(%rsp),%r9d - addl 24(%rsp),%r10d - addl 28(%rsp),%r11d - addl 48(%rsp),%r12d - addl 52(%rsp),%r13d - addl 56(%rsp),%r14d - addl 60(%rsp),%r15d - paddd 32(%rsp),%xmm1 - - cmpq $64,%rbp - jb L$tail - - xorl 0(%rsi),%eax - xorl 4(%rsi),%ebx - xorl 8(%rsi),%ecx - xorl 12(%rsi),%edx - xorl 16(%rsi),%r8d - xorl 20(%rsi),%r9d - xorl 24(%rsi),%r10d - xorl 28(%rsi),%r11d - movdqu 32(%rsi),%xmm0 - xorl 48(%rsi),%r12d - xorl 52(%rsi),%r13d - xorl 56(%rsi),%r14d - xorl 60(%rsi),%r15d - leaq 64(%rsi),%rsi - pxor %xmm1,%xmm0 - - movdqa %xmm2,32(%rsp) - movd %xmm3,48(%rsp) - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - movdqu %xmm0,32(%rdi) - movl %r12d,48(%rdi) - movl %r13d,52(%rdi) - movl %r14d,56(%rdi) - movl %r15d,60(%rdi) - leaq 64(%rdi),%rdi - - subq $64,%rbp - jnz L$oop_outer - - jmp L$done - -.p2align 4 -L$tail: - movl %eax,0(%rsp) - movl %ebx,4(%rsp) - xorq %rbx,%rbx - movl %ecx,8(%rsp) - movl %edx,12(%rsp) - movl %r8d,16(%rsp) - movl %r9d,20(%rsp) - movl %r10d,24(%rsp) - movl %r11d,28(%rsp) - movdqa %xmm1,32(%rsp) - movl %r12d,48(%rsp) - movl %r13d,52(%rsp) - movl %r14d,56(%rsp) - movl %r15d,60(%rsp) - -L$oop_tail: - movzbl (%rsi,%rbx,1),%eax - movzbl (%rsp,%rbx,1),%edx - leaq 1(%rbx),%rbx - xorl %edx,%eax - movb %al,-1(%rdi,%rbx,1) - decq %rbp - jnz L$oop_tail - -L$done: - leaq 64+24+48(%rsp),%rsi - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$no_data: - .byte 0xf3,0xc3 - - - -.p2align 5 -ChaCha20_ssse3: -L$ChaCha20_ssse3: - - movq %rsp,%r9 - - cmpq $128,%rdx - ja L$ChaCha20_4x - -L$do_sse3_after_all: - subq $64+8,%rsp - movdqa L$sigma(%rip),%xmm0 - movdqu (%rcx),%xmm1 - movdqu 16(%rcx),%xmm2 - movdqu (%r8),%xmm3 - movdqa L$rot16(%rip),%xmm6 - movdqa L$rot24(%rip),%xmm7 - - movdqa %xmm0,0(%rsp) - movdqa %xmm1,16(%rsp) - movdqa %xmm2,32(%rsp) - movdqa %xmm3,48(%rsp) - movq $10,%r8 - jmp L$oop_ssse3 - -.p2align 5 -L$oop_outer_ssse3: - movdqa L$one(%rip),%xmm3 - movdqa 0(%rsp),%xmm0 - movdqa 16(%rsp),%xmm1 - movdqa 32(%rsp),%xmm2 - paddd 48(%rsp),%xmm3 - movq $10,%r8 - movdqa %xmm3,48(%rsp) - jmp L$oop_ssse3 - -.p2align 5 -L$oop_ssse3: - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $57,%xmm1,%xmm1 - pshufd $147,%xmm3,%xmm3 - nop - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,222 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $20,%xmm1 - pslld $12,%xmm4 - por %xmm4,%xmm1 - paddd %xmm1,%xmm0 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,223 - paddd %xmm3,%xmm2 - pxor %xmm2,%xmm1 - movdqa %xmm1,%xmm4 - psrld $25,%xmm1 - pslld $7,%xmm4 - por %xmm4,%xmm1 - pshufd $78,%xmm2,%xmm2 - pshufd $147,%xmm1,%xmm1 - pshufd $57,%xmm3,%xmm3 - decq %r8 - jnz L$oop_ssse3 - paddd 0(%rsp),%xmm0 - paddd 16(%rsp),%xmm1 - paddd 32(%rsp),%xmm2 - paddd 48(%rsp),%xmm3 - - cmpq $64,%rdx - jb L$tail_ssse3 - - movdqu 0(%rsi),%xmm4 - movdqu 16(%rsi),%xmm5 - pxor %xmm4,%xmm0 - movdqu 32(%rsi),%xmm4 - pxor %xmm5,%xmm1 - movdqu 48(%rsi),%xmm5 - leaq 64(%rsi),%rsi - pxor %xmm4,%xmm2 - pxor %xmm5,%xmm3 - - movdqu %xmm0,0(%rdi) - movdqu %xmm1,16(%rdi) - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - leaq 64(%rdi),%rdi - - subq $64,%rdx - jnz L$oop_outer_ssse3 - - jmp L$done_ssse3 - -.p2align 4 -L$tail_ssse3: - movdqa %xmm0,0(%rsp) - movdqa %xmm1,16(%rsp) - movdqa %xmm2,32(%rsp) - movdqa %xmm3,48(%rsp) - xorq %r8,%r8 - -L$oop_tail_ssse3: - movzbl (%rsi,%r8,1),%eax - movzbl (%rsp,%r8,1),%ecx - leaq 1(%r8),%r8 - xorl %ecx,%eax - movb %al,-1(%rdi,%r8,1) - decq %rdx - jnz L$oop_tail_ssse3 - -L$done_ssse3: - leaq (%r9),%rsp - -L$ssse3_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -ChaCha20_4x: -L$ChaCha20_4x: - - movq %rsp,%r9 - - movq %r10,%r11 - shrq $32,%r10 - testq $32,%r10 - jnz L$ChaCha20_8x - cmpq $192,%rdx - ja L$proceed4x - - andq $71303168,%r11 - cmpq $4194304,%r11 - je L$do_sse3_after_all - -L$proceed4x: - subq $0x140+8,%rsp - movdqa L$sigma(%rip),%xmm11 - movdqu (%rcx),%xmm15 - movdqu 16(%rcx),%xmm7 - movdqu (%r8),%xmm3 - leaq 256(%rsp),%rcx - leaq L$rot16(%rip),%r10 - leaq L$rot24(%rip),%r11 - - pshufd $0x00,%xmm11,%xmm8 - pshufd $0x55,%xmm11,%xmm9 - movdqa %xmm8,64(%rsp) - pshufd $0xaa,%xmm11,%xmm10 - movdqa %xmm9,80(%rsp) - pshufd $0xff,%xmm11,%xmm11 - movdqa %xmm10,96(%rsp) - movdqa %xmm11,112(%rsp) - - pshufd $0x00,%xmm15,%xmm12 - pshufd $0x55,%xmm15,%xmm13 - movdqa %xmm12,128-256(%rcx) - pshufd $0xaa,%xmm15,%xmm14 - movdqa %xmm13,144-256(%rcx) - pshufd $0xff,%xmm15,%xmm15 - movdqa %xmm14,160-256(%rcx) - movdqa %xmm15,176-256(%rcx) - - pshufd $0x00,%xmm7,%xmm4 - pshufd $0x55,%xmm7,%xmm5 - movdqa %xmm4,192-256(%rcx) - pshufd $0xaa,%xmm7,%xmm6 - movdqa %xmm5,208-256(%rcx) - pshufd $0xff,%xmm7,%xmm7 - movdqa %xmm6,224-256(%rcx) - movdqa %xmm7,240-256(%rcx) - - pshufd $0x00,%xmm3,%xmm0 - pshufd $0x55,%xmm3,%xmm1 - paddd L$inc(%rip),%xmm0 - pshufd $0xaa,%xmm3,%xmm2 - movdqa %xmm1,272-256(%rcx) - pshufd $0xff,%xmm3,%xmm3 - movdqa %xmm2,288-256(%rcx) - movdqa %xmm3,304-256(%rcx) - - jmp L$oop_enter4x - -.p2align 5 -L$oop_outer4x: - movdqa 64(%rsp),%xmm8 - movdqa 80(%rsp),%xmm9 - movdqa 96(%rsp),%xmm10 - movdqa 112(%rsp),%xmm11 - movdqa 128-256(%rcx),%xmm12 - movdqa 144-256(%rcx),%xmm13 - movdqa 160-256(%rcx),%xmm14 - movdqa 176-256(%rcx),%xmm15 - movdqa 192-256(%rcx),%xmm4 - movdqa 208-256(%rcx),%xmm5 - movdqa 224-256(%rcx),%xmm6 - movdqa 240-256(%rcx),%xmm7 - movdqa 256-256(%rcx),%xmm0 - movdqa 272-256(%rcx),%xmm1 - movdqa 288-256(%rcx),%xmm2 - movdqa 304-256(%rcx),%xmm3 - paddd L$four(%rip),%xmm0 - -L$oop_enter4x: - movdqa %xmm6,32(%rsp) - movdqa %xmm7,48(%rsp) - movdqa (%r10),%xmm7 - movl $10,%eax - movdqa %xmm0,256-256(%rcx) - jmp L$oop4x - -.p2align 5 -L$oop4x: - paddd %xmm12,%xmm8 - paddd %xmm13,%xmm9 - pxor %xmm8,%xmm0 - pxor %xmm9,%xmm1 -.byte 102,15,56,0,199 -.byte 102,15,56,0,207 - paddd %xmm0,%xmm4 - paddd %xmm1,%xmm5 - pxor %xmm4,%xmm12 - pxor %xmm5,%xmm13 - movdqa %xmm12,%xmm6 - pslld $12,%xmm12 - psrld $20,%xmm6 - movdqa %xmm13,%xmm7 - pslld $12,%xmm13 - por %xmm6,%xmm12 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm13 - paddd %xmm12,%xmm8 - paddd %xmm13,%xmm9 - pxor %xmm8,%xmm0 - pxor %xmm9,%xmm1 -.byte 102,15,56,0,198 -.byte 102,15,56,0,206 - paddd %xmm0,%xmm4 - paddd %xmm1,%xmm5 - pxor %xmm4,%xmm12 - pxor %xmm5,%xmm13 - movdqa %xmm12,%xmm7 - pslld $7,%xmm12 - psrld $25,%xmm7 - movdqa %xmm13,%xmm6 - pslld $7,%xmm13 - por %xmm7,%xmm12 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm13 - movdqa %xmm4,0(%rsp) - movdqa %xmm5,16(%rsp) - movdqa 32(%rsp),%xmm4 - movdqa 48(%rsp),%xmm5 - paddd %xmm14,%xmm10 - paddd %xmm15,%xmm11 - pxor %xmm10,%xmm2 - pxor %xmm11,%xmm3 -.byte 102,15,56,0,215 -.byte 102,15,56,0,223 - paddd %xmm2,%xmm4 - paddd %xmm3,%xmm5 - pxor %xmm4,%xmm14 - pxor %xmm5,%xmm15 - movdqa %xmm14,%xmm6 - pslld $12,%xmm14 - psrld $20,%xmm6 - movdqa %xmm15,%xmm7 - pslld $12,%xmm15 - por %xmm6,%xmm14 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm15 - paddd %xmm14,%xmm10 - paddd %xmm15,%xmm11 - pxor %xmm10,%xmm2 - pxor %xmm11,%xmm3 -.byte 102,15,56,0,214 -.byte 102,15,56,0,222 - paddd %xmm2,%xmm4 - paddd %xmm3,%xmm5 - pxor %xmm4,%xmm14 - pxor %xmm5,%xmm15 - movdqa %xmm14,%xmm7 - pslld $7,%xmm14 - psrld $25,%xmm7 - movdqa %xmm15,%xmm6 - pslld $7,%xmm15 - por %xmm7,%xmm14 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm15 - paddd %xmm13,%xmm8 - paddd %xmm14,%xmm9 - pxor %xmm8,%xmm3 - pxor %xmm9,%xmm0 -.byte 102,15,56,0,223 -.byte 102,15,56,0,199 - paddd %xmm3,%xmm4 - paddd %xmm0,%xmm5 - pxor %xmm4,%xmm13 - pxor %xmm5,%xmm14 - movdqa %xmm13,%xmm6 - pslld $12,%xmm13 - psrld $20,%xmm6 - movdqa %xmm14,%xmm7 - pslld $12,%xmm14 - por %xmm6,%xmm13 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm14 - paddd %xmm13,%xmm8 - paddd %xmm14,%xmm9 - pxor %xmm8,%xmm3 - pxor %xmm9,%xmm0 -.byte 102,15,56,0,222 -.byte 102,15,56,0,198 - paddd %xmm3,%xmm4 - paddd %xmm0,%xmm5 - pxor %xmm4,%xmm13 - pxor %xmm5,%xmm14 - movdqa %xmm13,%xmm7 - pslld $7,%xmm13 - psrld $25,%xmm7 - movdqa %xmm14,%xmm6 - pslld $7,%xmm14 - por %xmm7,%xmm13 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm14 - movdqa %xmm4,32(%rsp) - movdqa %xmm5,48(%rsp) - movdqa 0(%rsp),%xmm4 - movdqa 16(%rsp),%xmm5 - paddd %xmm15,%xmm10 - paddd %xmm12,%xmm11 - pxor %xmm10,%xmm1 - pxor %xmm11,%xmm2 -.byte 102,15,56,0,207 -.byte 102,15,56,0,215 - paddd %xmm1,%xmm4 - paddd %xmm2,%xmm5 - pxor %xmm4,%xmm15 - pxor %xmm5,%xmm12 - movdqa %xmm15,%xmm6 - pslld $12,%xmm15 - psrld $20,%xmm6 - movdqa %xmm12,%xmm7 - pslld $12,%xmm12 - por %xmm6,%xmm15 - psrld $20,%xmm7 - movdqa (%r11),%xmm6 - por %xmm7,%xmm12 - paddd %xmm15,%xmm10 - paddd %xmm12,%xmm11 - pxor %xmm10,%xmm1 - pxor %xmm11,%xmm2 -.byte 102,15,56,0,206 -.byte 102,15,56,0,214 - paddd %xmm1,%xmm4 - paddd %xmm2,%xmm5 - pxor %xmm4,%xmm15 - pxor %xmm5,%xmm12 - movdqa %xmm15,%xmm7 - pslld $7,%xmm15 - psrld $25,%xmm7 - movdqa %xmm12,%xmm6 - pslld $7,%xmm12 - por %xmm7,%xmm15 - psrld $25,%xmm6 - movdqa (%r10),%xmm7 - por %xmm6,%xmm12 - decl %eax - jnz L$oop4x - - paddd 64(%rsp),%xmm8 - paddd 80(%rsp),%xmm9 - paddd 96(%rsp),%xmm10 - paddd 112(%rsp),%xmm11 - - movdqa %xmm8,%xmm6 - punpckldq %xmm9,%xmm8 - movdqa %xmm10,%xmm7 - punpckldq %xmm11,%xmm10 - punpckhdq %xmm9,%xmm6 - punpckhdq %xmm11,%xmm7 - movdqa %xmm8,%xmm9 - punpcklqdq %xmm10,%xmm8 - movdqa %xmm6,%xmm11 - punpcklqdq %xmm7,%xmm6 - punpckhqdq %xmm10,%xmm9 - punpckhqdq %xmm7,%xmm11 - paddd 128-256(%rcx),%xmm12 - paddd 144-256(%rcx),%xmm13 - paddd 160-256(%rcx),%xmm14 - paddd 176-256(%rcx),%xmm15 - - movdqa %xmm8,0(%rsp) - movdqa %xmm9,16(%rsp) - movdqa 32(%rsp),%xmm8 - movdqa 48(%rsp),%xmm9 - - movdqa %xmm12,%xmm10 - punpckldq %xmm13,%xmm12 - movdqa %xmm14,%xmm7 - punpckldq %xmm15,%xmm14 - punpckhdq %xmm13,%xmm10 - punpckhdq %xmm15,%xmm7 - movdqa %xmm12,%xmm13 - punpcklqdq %xmm14,%xmm12 - movdqa %xmm10,%xmm15 - punpcklqdq %xmm7,%xmm10 - punpckhqdq %xmm14,%xmm13 - punpckhqdq %xmm7,%xmm15 - paddd 192-256(%rcx),%xmm4 - paddd 208-256(%rcx),%xmm5 - paddd 224-256(%rcx),%xmm8 - paddd 240-256(%rcx),%xmm9 - - movdqa %xmm6,32(%rsp) - movdqa %xmm11,48(%rsp) - - movdqa %xmm4,%xmm14 - punpckldq %xmm5,%xmm4 - movdqa %xmm8,%xmm7 - punpckldq %xmm9,%xmm8 - punpckhdq %xmm5,%xmm14 - punpckhdq %xmm9,%xmm7 - movdqa %xmm4,%xmm5 - punpcklqdq %xmm8,%xmm4 - movdqa %xmm14,%xmm9 - punpcklqdq %xmm7,%xmm14 - punpckhqdq %xmm8,%xmm5 - punpckhqdq %xmm7,%xmm9 - paddd 256-256(%rcx),%xmm0 - paddd 272-256(%rcx),%xmm1 - paddd 288-256(%rcx),%xmm2 - paddd 304-256(%rcx),%xmm3 - - movdqa %xmm0,%xmm8 - punpckldq %xmm1,%xmm0 - movdqa %xmm2,%xmm7 - punpckldq %xmm3,%xmm2 - punpckhdq %xmm1,%xmm8 - punpckhdq %xmm3,%xmm7 - movdqa %xmm0,%xmm1 - punpcklqdq %xmm2,%xmm0 - movdqa %xmm8,%xmm3 - punpcklqdq %xmm7,%xmm8 - punpckhqdq %xmm2,%xmm1 - punpckhqdq %xmm7,%xmm3 - cmpq $256,%rdx - jb L$tail4x - - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - leaq 128(%rsi),%rsi - pxor 16(%rsp),%xmm6 - pxor %xmm13,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm1,%xmm7 - - movdqu %xmm6,64(%rdi) - movdqu 0(%rsi),%xmm6 - movdqu %xmm11,80(%rdi) - movdqu 16(%rsi),%xmm11 - movdqu %xmm2,96(%rdi) - movdqu 32(%rsi),%xmm2 - movdqu %xmm7,112(%rdi) - leaq 128(%rdi),%rdi - movdqu 48(%rsi),%xmm7 - pxor 32(%rsp),%xmm6 - pxor %xmm10,%xmm11 - pxor %xmm14,%xmm2 - pxor %xmm8,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - leaq 128(%rsi),%rsi - pxor 48(%rsp),%xmm6 - pxor %xmm15,%xmm11 - pxor %xmm9,%xmm2 - pxor %xmm3,%xmm7 - movdqu %xmm6,64(%rdi) - movdqu %xmm11,80(%rdi) - movdqu %xmm2,96(%rdi) - movdqu %xmm7,112(%rdi) - leaq 128(%rdi),%rdi - - subq $256,%rdx - jnz L$oop_outer4x - - jmp L$done4x - -L$tail4x: - cmpq $192,%rdx - jae L$192_or_more4x - cmpq $128,%rdx - jae L$128_or_more4x - cmpq $64,%rdx - jae L$64_or_more4x - - - xorq %r10,%r10 - - movdqa %xmm12,16(%rsp) - movdqa %xmm4,32(%rsp) - movdqa %xmm0,48(%rsp) - jmp L$oop_tail4x - -.p2align 5 -L$64_or_more4x: - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - movdqu %xmm6,0(%rdi) - movdqu %xmm11,16(%rdi) - movdqu %xmm2,32(%rdi) - movdqu %xmm7,48(%rdi) - je L$done4x - - movdqa 16(%rsp),%xmm6 - leaq 64(%rsi),%rsi - xorq %r10,%r10 - movdqa %xmm6,0(%rsp) - movdqa %xmm13,16(%rsp) - leaq 64(%rdi),%rdi - movdqa %xmm5,32(%rsp) - subq $64,%rdx - movdqa %xmm1,48(%rsp) - jmp L$oop_tail4x - -.p2align 5 -L$128_or_more4x: - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - pxor 16(%rsp),%xmm6 - pxor %xmm13,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm1,%xmm7 - movdqu %xmm6,64(%rdi) - movdqu %xmm11,80(%rdi) - movdqu %xmm2,96(%rdi) - movdqu %xmm7,112(%rdi) - je L$done4x - - movdqa 32(%rsp),%xmm6 - leaq 128(%rsi),%rsi - xorq %r10,%r10 - movdqa %xmm6,0(%rsp) - movdqa %xmm10,16(%rsp) - leaq 128(%rdi),%rdi - movdqa %xmm14,32(%rsp) - subq $128,%rdx - movdqa %xmm8,48(%rsp) - jmp L$oop_tail4x - -.p2align 5 -L$192_or_more4x: - movdqu 0(%rsi),%xmm6 - movdqu 16(%rsi),%xmm11 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm7 - pxor 0(%rsp),%xmm6 - pxor %xmm12,%xmm11 - pxor %xmm4,%xmm2 - pxor %xmm0,%xmm7 - - movdqu %xmm6,0(%rdi) - movdqu 64(%rsi),%xmm6 - movdqu %xmm11,16(%rdi) - movdqu 80(%rsi),%xmm11 - movdqu %xmm2,32(%rdi) - movdqu 96(%rsi),%xmm2 - movdqu %xmm7,48(%rdi) - movdqu 112(%rsi),%xmm7 - leaq 128(%rsi),%rsi - pxor 16(%rsp),%xmm6 - pxor %xmm13,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm1,%xmm7 - - movdqu %xmm6,64(%rdi) - movdqu 0(%rsi),%xmm6 - movdqu %xmm11,80(%rdi) - movdqu 16(%rsi),%xmm11 - movdqu %xmm2,96(%rdi) - movdqu 32(%rsi),%xmm2 - movdqu %xmm7,112(%rdi) - leaq 128(%rdi),%rdi - movdqu 48(%rsi),%xmm7 - pxor 32(%rsp),%xmm6 - pxor %xmm10,%xmm11 - pxor %xmm14,%xmm2 - pxor %xmm8,%xmm7 - movdqu %xmm6,0(%rdi) - movdqu %xmm11,16(%rdi) - movdqu %xmm2,32(%rdi) - movdqu %xmm7,48(%rdi) - je L$done4x - - movdqa 48(%rsp),%xmm6 - leaq 64(%rsi),%rsi - xorq %r10,%r10 - movdqa %xmm6,0(%rsp) - movdqa %xmm15,16(%rsp) - leaq 64(%rdi),%rdi - movdqa %xmm9,32(%rsp) - subq $192,%rdx - movdqa %xmm3,48(%rsp) - -L$oop_tail4x: - movzbl (%rsi,%r10,1),%eax - movzbl (%rsp,%r10,1),%ecx - leaq 1(%r10),%r10 - xorl %ecx,%eax - movb %al,-1(%rdi,%r10,1) - decq %rdx - jnz L$oop_tail4x - -L$done4x: - leaq (%r9),%rsp - -L$4x_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -ChaCha20_8x: -L$ChaCha20_8x: - - movq %rsp,%r9 - - subq $0x280+8,%rsp - andq $-32,%rsp - vzeroupper - - - - - - - - - - - vbroadcasti128 L$sigma(%rip),%ymm11 - vbroadcasti128 (%rcx),%ymm3 - vbroadcasti128 16(%rcx),%ymm15 - vbroadcasti128 (%r8),%ymm7 - leaq 256(%rsp),%rcx - leaq 512(%rsp),%rax - leaq L$rot16(%rip),%r10 - leaq L$rot24(%rip),%r11 - - vpshufd $0x00,%ymm11,%ymm8 - vpshufd $0x55,%ymm11,%ymm9 - vmovdqa %ymm8,128-256(%rcx) - vpshufd $0xaa,%ymm11,%ymm10 - vmovdqa %ymm9,160-256(%rcx) - vpshufd $0xff,%ymm11,%ymm11 - vmovdqa %ymm10,192-256(%rcx) - vmovdqa %ymm11,224-256(%rcx) - - vpshufd $0x00,%ymm3,%ymm0 - vpshufd $0x55,%ymm3,%ymm1 - vmovdqa %ymm0,256-256(%rcx) - vpshufd $0xaa,%ymm3,%ymm2 - vmovdqa %ymm1,288-256(%rcx) - vpshufd $0xff,%ymm3,%ymm3 - vmovdqa %ymm2,320-256(%rcx) - vmovdqa %ymm3,352-256(%rcx) - - vpshufd $0x00,%ymm15,%ymm12 - vpshufd $0x55,%ymm15,%ymm13 - vmovdqa %ymm12,384-512(%rax) - vpshufd $0xaa,%ymm15,%ymm14 - vmovdqa %ymm13,416-512(%rax) - vpshufd $0xff,%ymm15,%ymm15 - vmovdqa %ymm14,448-512(%rax) - vmovdqa %ymm15,480-512(%rax) - - vpshufd $0x00,%ymm7,%ymm4 - vpshufd $0x55,%ymm7,%ymm5 - vpaddd L$incy(%rip),%ymm4,%ymm4 - vpshufd $0xaa,%ymm7,%ymm6 - vmovdqa %ymm5,544-512(%rax) - vpshufd $0xff,%ymm7,%ymm7 - vmovdqa %ymm6,576-512(%rax) - vmovdqa %ymm7,608-512(%rax) - - jmp L$oop_enter8x - -.p2align 5 -L$oop_outer8x: - vmovdqa 128-256(%rcx),%ymm8 - vmovdqa 160-256(%rcx),%ymm9 - vmovdqa 192-256(%rcx),%ymm10 - vmovdqa 224-256(%rcx),%ymm11 - vmovdqa 256-256(%rcx),%ymm0 - vmovdqa 288-256(%rcx),%ymm1 - vmovdqa 320-256(%rcx),%ymm2 - vmovdqa 352-256(%rcx),%ymm3 - vmovdqa 384-512(%rax),%ymm12 - vmovdqa 416-512(%rax),%ymm13 - vmovdqa 448-512(%rax),%ymm14 - vmovdqa 480-512(%rax),%ymm15 - vmovdqa 512-512(%rax),%ymm4 - vmovdqa 544-512(%rax),%ymm5 - vmovdqa 576-512(%rax),%ymm6 - vmovdqa 608-512(%rax),%ymm7 - vpaddd L$eight(%rip),%ymm4,%ymm4 - -L$oop_enter8x: - vmovdqa %ymm14,64(%rsp) - vmovdqa %ymm15,96(%rsp) - vbroadcasti128 (%r10),%ymm15 - vmovdqa %ymm4,512-512(%rax) - movl $10,%eax - jmp L$oop8x - -.p2align 5 -L$oop8x: - vpaddd %ymm0,%ymm8,%ymm8 - vpxor %ymm4,%ymm8,%ymm4 - vpshufb %ymm15,%ymm4,%ymm4 - vpaddd %ymm1,%ymm9,%ymm9 - vpxor %ymm5,%ymm9,%ymm5 - vpshufb %ymm15,%ymm5,%ymm5 - vpaddd %ymm4,%ymm12,%ymm12 - vpxor %ymm0,%ymm12,%ymm0 - vpslld $12,%ymm0,%ymm14 - vpsrld $20,%ymm0,%ymm0 - vpor %ymm0,%ymm14,%ymm0 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm5,%ymm13,%ymm13 - vpxor %ymm1,%ymm13,%ymm1 - vpslld $12,%ymm1,%ymm15 - vpsrld $20,%ymm1,%ymm1 - vpor %ymm1,%ymm15,%ymm1 - vpaddd %ymm0,%ymm8,%ymm8 - vpxor %ymm4,%ymm8,%ymm4 - vpshufb %ymm14,%ymm4,%ymm4 - vpaddd %ymm1,%ymm9,%ymm9 - vpxor %ymm5,%ymm9,%ymm5 - vpshufb %ymm14,%ymm5,%ymm5 - vpaddd %ymm4,%ymm12,%ymm12 - vpxor %ymm0,%ymm12,%ymm0 - vpslld $7,%ymm0,%ymm15 - vpsrld $25,%ymm0,%ymm0 - vpor %ymm0,%ymm15,%ymm0 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm5,%ymm13,%ymm13 - vpxor %ymm1,%ymm13,%ymm1 - vpslld $7,%ymm1,%ymm14 - vpsrld $25,%ymm1,%ymm1 - vpor %ymm1,%ymm14,%ymm1 - vmovdqa %ymm12,0(%rsp) - vmovdqa %ymm13,32(%rsp) - vmovdqa 64(%rsp),%ymm12 - vmovdqa 96(%rsp),%ymm13 - vpaddd %ymm2,%ymm10,%ymm10 - vpxor %ymm6,%ymm10,%ymm6 - vpshufb %ymm15,%ymm6,%ymm6 - vpaddd %ymm3,%ymm11,%ymm11 - vpxor %ymm7,%ymm11,%ymm7 - vpshufb %ymm15,%ymm7,%ymm7 - vpaddd %ymm6,%ymm12,%ymm12 - vpxor %ymm2,%ymm12,%ymm2 - vpslld $12,%ymm2,%ymm14 - vpsrld $20,%ymm2,%ymm2 - vpor %ymm2,%ymm14,%ymm2 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm7,%ymm13,%ymm13 - vpxor %ymm3,%ymm13,%ymm3 - vpslld $12,%ymm3,%ymm15 - vpsrld $20,%ymm3,%ymm3 - vpor %ymm3,%ymm15,%ymm3 - vpaddd %ymm2,%ymm10,%ymm10 - vpxor %ymm6,%ymm10,%ymm6 - vpshufb %ymm14,%ymm6,%ymm6 - vpaddd %ymm3,%ymm11,%ymm11 - vpxor %ymm7,%ymm11,%ymm7 - vpshufb %ymm14,%ymm7,%ymm7 - vpaddd %ymm6,%ymm12,%ymm12 - vpxor %ymm2,%ymm12,%ymm2 - vpslld $7,%ymm2,%ymm15 - vpsrld $25,%ymm2,%ymm2 - vpor %ymm2,%ymm15,%ymm2 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm7,%ymm13,%ymm13 - vpxor %ymm3,%ymm13,%ymm3 - vpslld $7,%ymm3,%ymm14 - vpsrld $25,%ymm3,%ymm3 - vpor %ymm3,%ymm14,%ymm3 - vpaddd %ymm1,%ymm8,%ymm8 - vpxor %ymm7,%ymm8,%ymm7 - vpshufb %ymm15,%ymm7,%ymm7 - vpaddd %ymm2,%ymm9,%ymm9 - vpxor %ymm4,%ymm9,%ymm4 - vpshufb %ymm15,%ymm4,%ymm4 - vpaddd %ymm7,%ymm12,%ymm12 - vpxor %ymm1,%ymm12,%ymm1 - vpslld $12,%ymm1,%ymm14 - vpsrld $20,%ymm1,%ymm1 - vpor %ymm1,%ymm14,%ymm1 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm4,%ymm13,%ymm13 - vpxor %ymm2,%ymm13,%ymm2 - vpslld $12,%ymm2,%ymm15 - vpsrld $20,%ymm2,%ymm2 - vpor %ymm2,%ymm15,%ymm2 - vpaddd %ymm1,%ymm8,%ymm8 - vpxor %ymm7,%ymm8,%ymm7 - vpshufb %ymm14,%ymm7,%ymm7 - vpaddd %ymm2,%ymm9,%ymm9 - vpxor %ymm4,%ymm9,%ymm4 - vpshufb %ymm14,%ymm4,%ymm4 - vpaddd %ymm7,%ymm12,%ymm12 - vpxor %ymm1,%ymm12,%ymm1 - vpslld $7,%ymm1,%ymm15 - vpsrld $25,%ymm1,%ymm1 - vpor %ymm1,%ymm15,%ymm1 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm4,%ymm13,%ymm13 - vpxor %ymm2,%ymm13,%ymm2 - vpslld $7,%ymm2,%ymm14 - vpsrld $25,%ymm2,%ymm2 - vpor %ymm2,%ymm14,%ymm2 - vmovdqa %ymm12,64(%rsp) - vmovdqa %ymm13,96(%rsp) - vmovdqa 0(%rsp),%ymm12 - vmovdqa 32(%rsp),%ymm13 - vpaddd %ymm3,%ymm10,%ymm10 - vpxor %ymm5,%ymm10,%ymm5 - vpshufb %ymm15,%ymm5,%ymm5 - vpaddd %ymm0,%ymm11,%ymm11 - vpxor %ymm6,%ymm11,%ymm6 - vpshufb %ymm15,%ymm6,%ymm6 - vpaddd %ymm5,%ymm12,%ymm12 - vpxor %ymm3,%ymm12,%ymm3 - vpslld $12,%ymm3,%ymm14 - vpsrld $20,%ymm3,%ymm3 - vpor %ymm3,%ymm14,%ymm3 - vbroadcasti128 (%r11),%ymm14 - vpaddd %ymm6,%ymm13,%ymm13 - vpxor %ymm0,%ymm13,%ymm0 - vpslld $12,%ymm0,%ymm15 - vpsrld $20,%ymm0,%ymm0 - vpor %ymm0,%ymm15,%ymm0 - vpaddd %ymm3,%ymm10,%ymm10 - vpxor %ymm5,%ymm10,%ymm5 - vpshufb %ymm14,%ymm5,%ymm5 - vpaddd %ymm0,%ymm11,%ymm11 - vpxor %ymm6,%ymm11,%ymm6 - vpshufb %ymm14,%ymm6,%ymm6 - vpaddd %ymm5,%ymm12,%ymm12 - vpxor %ymm3,%ymm12,%ymm3 - vpslld $7,%ymm3,%ymm15 - vpsrld $25,%ymm3,%ymm3 - vpor %ymm3,%ymm15,%ymm3 - vbroadcasti128 (%r10),%ymm15 - vpaddd %ymm6,%ymm13,%ymm13 - vpxor %ymm0,%ymm13,%ymm0 - vpslld $7,%ymm0,%ymm14 - vpsrld $25,%ymm0,%ymm0 - vpor %ymm0,%ymm14,%ymm0 - decl %eax - jnz L$oop8x - - leaq 512(%rsp),%rax - vpaddd 128-256(%rcx),%ymm8,%ymm8 - vpaddd 160-256(%rcx),%ymm9,%ymm9 - vpaddd 192-256(%rcx),%ymm10,%ymm10 - vpaddd 224-256(%rcx),%ymm11,%ymm11 - - vpunpckldq %ymm9,%ymm8,%ymm14 - vpunpckldq %ymm11,%ymm10,%ymm15 - vpunpckhdq %ymm9,%ymm8,%ymm8 - vpunpckhdq %ymm11,%ymm10,%ymm10 - vpunpcklqdq %ymm15,%ymm14,%ymm9 - vpunpckhqdq %ymm15,%ymm14,%ymm14 - vpunpcklqdq %ymm10,%ymm8,%ymm11 - vpunpckhqdq %ymm10,%ymm8,%ymm8 - vpaddd 256-256(%rcx),%ymm0,%ymm0 - vpaddd 288-256(%rcx),%ymm1,%ymm1 - vpaddd 320-256(%rcx),%ymm2,%ymm2 - vpaddd 352-256(%rcx),%ymm3,%ymm3 - - vpunpckldq %ymm1,%ymm0,%ymm10 - vpunpckldq %ymm3,%ymm2,%ymm15 - vpunpckhdq %ymm1,%ymm0,%ymm0 - vpunpckhdq %ymm3,%ymm2,%ymm2 - vpunpcklqdq %ymm15,%ymm10,%ymm1 - vpunpckhqdq %ymm15,%ymm10,%ymm10 - vpunpcklqdq %ymm2,%ymm0,%ymm3 - vpunpckhqdq %ymm2,%ymm0,%ymm0 - vperm2i128 $0x20,%ymm1,%ymm9,%ymm15 - vperm2i128 $0x31,%ymm1,%ymm9,%ymm1 - vperm2i128 $0x20,%ymm10,%ymm14,%ymm9 - vperm2i128 $0x31,%ymm10,%ymm14,%ymm10 - vperm2i128 $0x20,%ymm3,%ymm11,%ymm14 - vperm2i128 $0x31,%ymm3,%ymm11,%ymm3 - vperm2i128 $0x20,%ymm0,%ymm8,%ymm11 - vperm2i128 $0x31,%ymm0,%ymm8,%ymm0 - vmovdqa %ymm15,0(%rsp) - vmovdqa %ymm9,32(%rsp) - vmovdqa 64(%rsp),%ymm15 - vmovdqa 96(%rsp),%ymm9 - - vpaddd 384-512(%rax),%ymm12,%ymm12 - vpaddd 416-512(%rax),%ymm13,%ymm13 - vpaddd 448-512(%rax),%ymm15,%ymm15 - vpaddd 480-512(%rax),%ymm9,%ymm9 - - vpunpckldq %ymm13,%ymm12,%ymm2 - vpunpckldq %ymm9,%ymm15,%ymm8 - vpunpckhdq %ymm13,%ymm12,%ymm12 - vpunpckhdq %ymm9,%ymm15,%ymm15 - vpunpcklqdq %ymm8,%ymm2,%ymm13 - vpunpckhqdq %ymm8,%ymm2,%ymm2 - vpunpcklqdq %ymm15,%ymm12,%ymm9 - vpunpckhqdq %ymm15,%ymm12,%ymm12 - vpaddd 512-512(%rax),%ymm4,%ymm4 - vpaddd 544-512(%rax),%ymm5,%ymm5 - vpaddd 576-512(%rax),%ymm6,%ymm6 - vpaddd 608-512(%rax),%ymm7,%ymm7 - - vpunpckldq %ymm5,%ymm4,%ymm15 - vpunpckldq %ymm7,%ymm6,%ymm8 - vpunpckhdq %ymm5,%ymm4,%ymm4 - vpunpckhdq %ymm7,%ymm6,%ymm6 - vpunpcklqdq %ymm8,%ymm15,%ymm5 - vpunpckhqdq %ymm8,%ymm15,%ymm15 - vpunpcklqdq %ymm6,%ymm4,%ymm7 - vpunpckhqdq %ymm6,%ymm4,%ymm4 - vperm2i128 $0x20,%ymm5,%ymm13,%ymm8 - vperm2i128 $0x31,%ymm5,%ymm13,%ymm5 - vperm2i128 $0x20,%ymm15,%ymm2,%ymm13 - vperm2i128 $0x31,%ymm15,%ymm2,%ymm15 - vperm2i128 $0x20,%ymm7,%ymm9,%ymm2 - vperm2i128 $0x31,%ymm7,%ymm9,%ymm7 - vperm2i128 $0x20,%ymm4,%ymm12,%ymm9 - vperm2i128 $0x31,%ymm4,%ymm12,%ymm4 - vmovdqa 0(%rsp),%ymm6 - vmovdqa 32(%rsp),%ymm12 - - cmpq $512,%rdx - jb L$tail8x - - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - leaq 128(%rsi),%rsi - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - leaq 128(%rdi),%rdi - - vpxor 0(%rsi),%ymm12,%ymm12 - vpxor 32(%rsi),%ymm13,%ymm13 - vpxor 64(%rsi),%ymm10,%ymm10 - vpxor 96(%rsi),%ymm15,%ymm15 - leaq 128(%rsi),%rsi - vmovdqu %ymm12,0(%rdi) - vmovdqu %ymm13,32(%rdi) - vmovdqu %ymm10,64(%rdi) - vmovdqu %ymm15,96(%rdi) - leaq 128(%rdi),%rdi - - vpxor 0(%rsi),%ymm14,%ymm14 - vpxor 32(%rsi),%ymm2,%ymm2 - vpxor 64(%rsi),%ymm3,%ymm3 - vpxor 96(%rsi),%ymm7,%ymm7 - leaq 128(%rsi),%rsi - vmovdqu %ymm14,0(%rdi) - vmovdqu %ymm2,32(%rdi) - vmovdqu %ymm3,64(%rdi) - vmovdqu %ymm7,96(%rdi) - leaq 128(%rdi),%rdi - - vpxor 0(%rsi),%ymm11,%ymm11 - vpxor 32(%rsi),%ymm9,%ymm9 - vpxor 64(%rsi),%ymm0,%ymm0 - vpxor 96(%rsi),%ymm4,%ymm4 - leaq 128(%rsi),%rsi - vmovdqu %ymm11,0(%rdi) - vmovdqu %ymm9,32(%rdi) - vmovdqu %ymm0,64(%rdi) - vmovdqu %ymm4,96(%rdi) - leaq 128(%rdi),%rdi - - subq $512,%rdx - jnz L$oop_outer8x - - jmp L$done8x - -L$tail8x: - cmpq $448,%rdx - jae L$448_or_more8x - cmpq $384,%rdx - jae L$384_or_more8x - cmpq $320,%rdx - jae L$320_or_more8x - cmpq $256,%rdx - jae L$256_or_more8x - cmpq $192,%rdx - jae L$192_or_more8x - cmpq $128,%rdx - jae L$128_or_more8x - cmpq $64,%rdx - jae L$64_or_more8x - - xorq %r10,%r10 - vmovdqa %ymm6,0(%rsp) - vmovdqa %ymm8,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$64_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - je L$done8x - - leaq 64(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm1,0(%rsp) - leaq 64(%rdi),%rdi - subq $64,%rdx - vmovdqa %ymm5,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$128_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - je L$done8x - - leaq 128(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm12,0(%rsp) - leaq 128(%rdi),%rdi - subq $128,%rdx - vmovdqa %ymm13,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$192_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - je L$done8x - - leaq 192(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm10,0(%rsp) - leaq 192(%rdi),%rdi - subq $192,%rdx - vmovdqa %ymm15,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$256_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - je L$done8x - - leaq 256(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm14,0(%rsp) - leaq 256(%rdi),%rdi - subq $256,%rdx - vmovdqa %ymm2,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$320_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vpxor 256(%rsi),%ymm14,%ymm14 - vpxor 288(%rsi),%ymm2,%ymm2 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - vmovdqu %ymm14,256(%rdi) - vmovdqu %ymm2,288(%rdi) - je L$done8x - - leaq 320(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm3,0(%rsp) - leaq 320(%rdi),%rdi - subq $320,%rdx - vmovdqa %ymm7,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$384_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vpxor 256(%rsi),%ymm14,%ymm14 - vpxor 288(%rsi),%ymm2,%ymm2 - vpxor 320(%rsi),%ymm3,%ymm3 - vpxor 352(%rsi),%ymm7,%ymm7 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - vmovdqu %ymm14,256(%rdi) - vmovdqu %ymm2,288(%rdi) - vmovdqu %ymm3,320(%rdi) - vmovdqu %ymm7,352(%rdi) - je L$done8x - - leaq 384(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm11,0(%rsp) - leaq 384(%rdi),%rdi - subq $384,%rdx - vmovdqa %ymm9,32(%rsp) - jmp L$oop_tail8x - -.p2align 5 -L$448_or_more8x: - vpxor 0(%rsi),%ymm6,%ymm6 - vpxor 32(%rsi),%ymm8,%ymm8 - vpxor 64(%rsi),%ymm1,%ymm1 - vpxor 96(%rsi),%ymm5,%ymm5 - vpxor 128(%rsi),%ymm12,%ymm12 - vpxor 160(%rsi),%ymm13,%ymm13 - vpxor 192(%rsi),%ymm10,%ymm10 - vpxor 224(%rsi),%ymm15,%ymm15 - vpxor 256(%rsi),%ymm14,%ymm14 - vpxor 288(%rsi),%ymm2,%ymm2 - vpxor 320(%rsi),%ymm3,%ymm3 - vpxor 352(%rsi),%ymm7,%ymm7 - vpxor 384(%rsi),%ymm11,%ymm11 - vpxor 416(%rsi),%ymm9,%ymm9 - vmovdqu %ymm6,0(%rdi) - vmovdqu %ymm8,32(%rdi) - vmovdqu %ymm1,64(%rdi) - vmovdqu %ymm5,96(%rdi) - vmovdqu %ymm12,128(%rdi) - vmovdqu %ymm13,160(%rdi) - vmovdqu %ymm10,192(%rdi) - vmovdqu %ymm15,224(%rdi) - vmovdqu %ymm14,256(%rdi) - vmovdqu %ymm2,288(%rdi) - vmovdqu %ymm3,320(%rdi) - vmovdqu %ymm7,352(%rdi) - vmovdqu %ymm11,384(%rdi) - vmovdqu %ymm9,416(%rdi) - je L$done8x - - leaq 448(%rsi),%rsi - xorq %r10,%r10 - vmovdqa %ymm0,0(%rsp) - leaq 448(%rdi),%rdi - subq $448,%rdx - vmovdqa %ymm4,32(%rsp) - -L$oop_tail8x: - movzbl (%rsi,%r10,1),%eax - movzbl (%rsp,%r10,1),%ecx - leaq 1(%r10),%r10 - xorl %ecx,%eax - movb %al,-1(%rdi,%r10,1) - decq %rdx - jnz L$oop_tail8x - -L$done8x: - vzeroall - leaq (%r9),%rsp - -L$8x_epilogue: - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S deleted file mode 100644 index 0c921b37b5..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S +++ /dev/null @@ -1,3068 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.data - -.p2align 4 -one: -.quad 1,0 -two: -.quad 2,0 -three: -.quad 3,0 -four: -.quad 4,0 -five: -.quad 5,0 -six: -.quad 6,0 -seven: -.quad 7,0 -eight: -.quad 8,0 - -OR_MASK: -.long 0x00000000,0x00000000,0x00000000,0x80000000 -poly: -.quad 0x1, 0xc200000000000000 -mask: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -con1: -.long 1,1,1,1 -con2: -.long 0x1b,0x1b,0x1b,0x1b -con3: -.byte -1,-1,-1,-1,-1,-1,-1,-1,4,5,6,7,4,5,6,7 -and_mask: -.long 0,0xffffffff, 0xffffffff, 0xffffffff -.text - -.p2align 4 -GFMUL: - - vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 - vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5 - vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 - vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $8,%xmm3,%xmm4 - vpsrldq $8,%xmm3,%xmm3 - vpxor %xmm4,%xmm2,%xmm2 - vpxor %xmm3,%xmm5,%xmm5 - - vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 - vpshufd $78,%xmm2,%xmm4 - vpxor %xmm4,%xmm3,%xmm2 - - vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 - vpshufd $78,%xmm2,%xmm4 - vpxor %xmm4,%xmm3,%xmm2 - - vpxor %xmm5,%xmm2,%xmm0 - .byte 0xf3,0xc3 - - -.globl _aesgcmsiv_htable_init -.private_extern _aesgcmsiv_htable_init - -.p2align 4 -_aesgcmsiv_htable_init: - - vmovdqa (%rsi),%xmm0 - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm0,(%rdi) - call GFMUL - vmovdqa %xmm0,16(%rdi) - call GFMUL - vmovdqa %xmm0,32(%rdi) - call GFMUL - vmovdqa %xmm0,48(%rdi) - call GFMUL - vmovdqa %xmm0,64(%rdi) - call GFMUL - vmovdqa %xmm0,80(%rdi) - call GFMUL - vmovdqa %xmm0,96(%rdi) - call GFMUL - vmovdqa %xmm0,112(%rdi) - .byte 0xf3,0xc3 - - -.globl _aesgcmsiv_htable6_init -.private_extern _aesgcmsiv_htable6_init - -.p2align 4 -_aesgcmsiv_htable6_init: - - vmovdqa (%rsi),%xmm0 - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm0,(%rdi) - call GFMUL - vmovdqa %xmm0,16(%rdi) - call GFMUL - vmovdqa %xmm0,32(%rdi) - call GFMUL - vmovdqa %xmm0,48(%rdi) - call GFMUL - vmovdqa %xmm0,64(%rdi) - call GFMUL - vmovdqa %xmm0,80(%rdi) - .byte 0xf3,0xc3 - - -.globl _aesgcmsiv_htable_polyval -.private_extern _aesgcmsiv_htable_polyval - -.p2align 4 -_aesgcmsiv_htable_polyval: - - testq %rdx,%rdx - jnz L$htable_polyval_start - .byte 0xf3,0xc3 - -L$htable_polyval_start: - vzeroall - - - - movq %rdx,%r11 - andq $127,%r11 - - jz L$htable_polyval_no_prefix - - vpxor %xmm9,%xmm9,%xmm9 - vmovdqa (%rcx),%xmm1 - subq %r11,%rdx - - subq $16,%r11 - - - vmovdqu (%rsi),%xmm0 - vpxor %xmm1,%xmm0,%xmm0 - - vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm5 - vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm3 - vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm4 - vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - leaq 16(%rsi),%rsi - testq %r11,%r11 - jnz L$htable_polyval_prefix_loop - jmp L$htable_polyval_prefix_complete - - -.p2align 6 -L$htable_polyval_prefix_loop: - subq $16,%r11 - - vmovdqu (%rsi),%xmm0 - - vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - testq %r11,%r11 - - leaq 16(%rsi),%rsi - - jnz L$htable_polyval_prefix_loop - -L$htable_polyval_prefix_complete: - vpsrldq $8,%xmm5,%xmm6 - vpslldq $8,%xmm5,%xmm5 - - vpxor %xmm6,%xmm4,%xmm9 - vpxor %xmm5,%xmm3,%xmm1 - - jmp L$htable_polyval_main_loop - -L$htable_polyval_no_prefix: - - - - - vpxor %xmm1,%xmm1,%xmm1 - vmovdqa (%rcx),%xmm9 - -.p2align 6 -L$htable_polyval_main_loop: - subq $0x80,%rdx - jb L$htable_polyval_out - - vmovdqu 112(%rsi),%xmm0 - - vpclmulqdq $0x01,(%rdi),%xmm0,%xmm5 - vpclmulqdq $0x00,(%rdi),%xmm0,%xmm3 - vpclmulqdq $0x11,(%rdi),%xmm0,%xmm4 - vpclmulqdq $0x10,(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vmovdqu 96(%rsi),%xmm0 - vpclmulqdq $0x01,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,16(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - - vmovdqu 80(%rsi),%xmm0 - - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 - vpalignr $8,%xmm1,%xmm1,%xmm1 - - vpclmulqdq $0x01,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,32(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpxor %xmm7,%xmm1,%xmm1 - - vmovdqu 64(%rsi),%xmm0 - - vpclmulqdq $0x01,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,48(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vmovdqu 48(%rsi),%xmm0 - - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 - vpalignr $8,%xmm1,%xmm1,%xmm1 - - vpclmulqdq $0x01,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,64(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpxor %xmm7,%xmm1,%xmm1 - - vmovdqu 32(%rsi),%xmm0 - - vpclmulqdq $0x01,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,80(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpxor %xmm9,%xmm1,%xmm1 - - vmovdqu 16(%rsi),%xmm0 - - vpclmulqdq $0x01,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,96(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vmovdqu 0(%rsi),%xmm0 - vpxor %xmm1,%xmm0,%xmm0 - - vpclmulqdq $0x01,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x00,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm3,%xmm3 - vpclmulqdq $0x11,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm4,%xmm4 - vpclmulqdq $0x10,112(%rdi),%xmm0,%xmm6 - vpxor %xmm6,%xmm5,%xmm5 - - - vpsrldq $8,%xmm5,%xmm6 - vpslldq $8,%xmm5,%xmm5 - - vpxor %xmm6,%xmm4,%xmm9 - vpxor %xmm5,%xmm3,%xmm1 - - leaq 128(%rsi),%rsi - jmp L$htable_polyval_main_loop - - - -L$htable_polyval_out: - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 - vpalignr $8,%xmm1,%xmm1,%xmm1 - vpxor %xmm6,%xmm1,%xmm1 - - vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 - vpalignr $8,%xmm1,%xmm1,%xmm1 - vpxor %xmm6,%xmm1,%xmm1 - vpxor %xmm9,%xmm1,%xmm1 - - vmovdqu %xmm1,(%rcx) - vzeroupper - .byte 0xf3,0xc3 - - -.globl _aesgcmsiv_polyval_horner -.private_extern _aesgcmsiv_polyval_horner - -.p2align 4 -_aesgcmsiv_polyval_horner: - - testq %rcx,%rcx - jnz L$polyval_horner_start - .byte 0xf3,0xc3 - -L$polyval_horner_start: - - - - xorq %r10,%r10 - shlq $4,%rcx - - vmovdqa (%rsi),%xmm1 - vmovdqa (%rdi),%xmm0 - -L$polyval_horner_loop: - vpxor (%rdx,%r10,1),%xmm0,%xmm0 - call GFMUL - - addq $16,%r10 - cmpq %r10,%rcx - jne L$polyval_horner_loop - - - vmovdqa %xmm0,(%rdi) - .byte 0xf3,0xc3 - - -.globl _aes128gcmsiv_aes_ks -.private_extern _aes128gcmsiv_aes_ks - -.p2align 4 -_aes128gcmsiv_aes_ks: - - vmovdqu (%rdi),%xmm1 - vmovdqa %xmm1,(%rsi) - - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - - movq $8,%rax - -L$ks128_loop: - addq $16,%rsi - subq $1,%rax - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,(%rsi) - jne L$ks128_loop - - vmovdqa con2(%rip),%xmm0 - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,16(%rsi) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslldq $4,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpslldq $4,%xmm3,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,32(%rsi) - .byte 0xf3,0xc3 - - -.globl _aes256gcmsiv_aes_ks -.private_extern _aes256gcmsiv_aes_ks - -.p2align 4 -_aes256gcmsiv_aes_ks: - - vmovdqu (%rdi),%xmm1 - vmovdqu 16(%rdi),%xmm3 - vmovdqa %xmm1,(%rsi) - vmovdqa %xmm3,16(%rsi) - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - vpxor %xmm14,%xmm14,%xmm14 - movq $6,%rax - -L$ks256_loop: - addq $32,%rsi - subq $1,%rax - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,(%rsi) - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpsllq $32,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpshufb con3(%rip),%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vmovdqa %xmm3,16(%rsi) - jne L$ks256_loop - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpsllq $32,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vmovdqa %xmm1,32(%rsi) - .byte 0xf3,0xc3 - -.globl _aes128gcmsiv_aes_ks_enc_x1 -.private_extern _aes128gcmsiv_aes_ks_enc_x1 - -.p2align 4 -_aes128gcmsiv_aes_ks_enc_x1: - - vmovdqa (%rcx),%xmm1 - vmovdqa 0(%rdi),%xmm4 - - vmovdqa %xmm1,(%rdx) - vpxor %xmm1,%xmm4,%xmm4 - - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,16(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,32(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,48(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,64(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,80(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,96(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,112(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,128(%rdx) - - - vmovdqa con2(%rip),%xmm0 - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenc %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,144(%rdx) - - vpshufb %xmm15,%xmm1,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpsllq $32,%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpshufb con3(%rip),%xmm1,%xmm3 - vpxor %xmm3,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - - vaesenclast %xmm1,%xmm4,%xmm4 - vmovdqa %xmm1,160(%rdx) - - - vmovdqa %xmm4,0(%rsi) - .byte 0xf3,0xc3 - - -.globl _aes128gcmsiv_kdf -.private_extern _aes128gcmsiv_kdf - -.p2align 4 -_aes128gcmsiv_kdf: - - - - - - vmovdqa (%rdx),%xmm1 - vmovdqa 0(%rdi),%xmm9 - vmovdqa and_mask(%rip),%xmm12 - vmovdqa one(%rip),%xmm13 - vpshufd $0x90,%xmm9,%xmm9 - vpand %xmm12,%xmm9,%xmm9 - vpaddd %xmm13,%xmm9,%xmm10 - vpaddd %xmm13,%xmm10,%xmm11 - vpaddd %xmm13,%xmm11,%xmm12 - - vpxor %xmm1,%xmm9,%xmm9 - vpxor %xmm1,%xmm10,%xmm10 - vpxor %xmm1,%xmm11,%xmm11 - vpxor %xmm1,%xmm12,%xmm12 - - vmovdqa 16(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 32(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 48(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 64(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 80(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 96(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 112(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 128(%rdx),%xmm2 - vaesenc %xmm2,%xmm9,%xmm9 - vaesenc %xmm2,%xmm10,%xmm10 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - - vmovdqa 144(%rdx),%xmm1 - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - - vmovdqa 160(%rdx),%xmm2 - vaesenclast %xmm2,%xmm9,%xmm9 - vaesenclast %xmm2,%xmm10,%xmm10 - vaesenclast %xmm2,%xmm11,%xmm11 - vaesenclast %xmm2,%xmm12,%xmm12 - - - vmovdqa %xmm9,0(%rsi) - vmovdqa %xmm10,16(%rsi) - vmovdqa %xmm11,32(%rsi) - vmovdqa %xmm12,48(%rsi) - .byte 0xf3,0xc3 - - -.globl _aes128gcmsiv_enc_msg_x4 -.private_extern _aes128gcmsiv_enc_msg_x4 - -.p2align 4 -_aes128gcmsiv_enc_msg_x4: - - testq %r8,%r8 - jnz L$128_enc_msg_x4_start - .byte 0xf3,0xc3 - -L$128_enc_msg_x4_start: - pushq %r12 - - pushq %r13 - - - shrq $4,%r8 - movq %r8,%r10 - shlq $62,%r10 - shrq $62,%r10 - - - vmovdqa (%rdx),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - - vmovdqu four(%rip),%xmm4 - vmovdqa %xmm15,%xmm0 - vpaddd one(%rip),%xmm15,%xmm1 - vpaddd two(%rip),%xmm15,%xmm2 - vpaddd three(%rip),%xmm15,%xmm3 - - shrq $2,%r8 - je L$128_enc_msg_x4_check_remainder - - subq $64,%rsi - subq $64,%rdi - -L$128_enc_msg_x4_loop1: - addq $64,%rsi - addq $64,%rdi - - vmovdqa %xmm0,%xmm5 - vmovdqa %xmm1,%xmm6 - vmovdqa %xmm2,%xmm7 - vmovdqa %xmm3,%xmm8 - - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm0,%xmm0 - vmovdqu 32(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm1,%xmm1 - vmovdqu 48(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm2,%xmm2 - vmovdqu 64(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm3,%xmm3 - - vmovdqu 80(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 96(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 112(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 128(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 144(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm12 - vaesenclast %xmm12,%xmm5,%xmm5 - vaesenclast %xmm12,%xmm6,%xmm6 - vaesenclast %xmm12,%xmm7,%xmm7 - vaesenclast %xmm12,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm5,%xmm5 - vpxor 16(%rdi),%xmm6,%xmm6 - vpxor 32(%rdi),%xmm7,%xmm7 - vpxor 48(%rdi),%xmm8,%xmm8 - - subq $1,%r8 - - vmovdqu %xmm5,0(%rsi) - vmovdqu %xmm6,16(%rsi) - vmovdqu %xmm7,32(%rsi) - vmovdqu %xmm8,48(%rsi) - - jne L$128_enc_msg_x4_loop1 - - addq $64,%rsi - addq $64,%rdi - -L$128_enc_msg_x4_check_remainder: - cmpq $0,%r10 - je L$128_enc_msg_x4_out - -L$128_enc_msg_x4_loop2: - - - vmovdqa %xmm0,%xmm5 - vpaddd one(%rip),%xmm0,%xmm0 - - vpxor (%rcx),%xmm5,%xmm5 - vaesenc 16(%rcx),%xmm5,%xmm5 - vaesenc 32(%rcx),%xmm5,%xmm5 - vaesenc 48(%rcx),%xmm5,%xmm5 - vaesenc 64(%rcx),%xmm5,%xmm5 - vaesenc 80(%rcx),%xmm5,%xmm5 - vaesenc 96(%rcx),%xmm5,%xmm5 - vaesenc 112(%rcx),%xmm5,%xmm5 - vaesenc 128(%rcx),%xmm5,%xmm5 - vaesenc 144(%rcx),%xmm5,%xmm5 - vaesenclast 160(%rcx),%xmm5,%xmm5 - - - vpxor (%rdi),%xmm5,%xmm5 - vmovdqu %xmm5,(%rsi) - - addq $16,%rdi - addq $16,%rsi - - subq $1,%r10 - jne L$128_enc_msg_x4_loop2 - -L$128_enc_msg_x4_out: - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - - -.globl _aes128gcmsiv_enc_msg_x8 -.private_extern _aes128gcmsiv_enc_msg_x8 - -.p2align 4 -_aes128gcmsiv_enc_msg_x8: - - testq %r8,%r8 - jnz L$128_enc_msg_x8_start - .byte 0xf3,0xc3 - -L$128_enc_msg_x8_start: - pushq %r12 - - pushq %r13 - - pushq %rbp - - movq %rsp,%rbp - - - - subq $128,%rsp - andq $-64,%rsp - - shrq $4,%r8 - movq %r8,%r10 - shlq $61,%r10 - shrq $61,%r10 - - - vmovdqu (%rdx),%xmm1 - vpor OR_MASK(%rip),%xmm1,%xmm1 - - - vpaddd seven(%rip),%xmm1,%xmm0 - vmovdqu %xmm0,(%rsp) - vpaddd one(%rip),%xmm1,%xmm9 - vpaddd two(%rip),%xmm1,%xmm10 - vpaddd three(%rip),%xmm1,%xmm11 - vpaddd four(%rip),%xmm1,%xmm12 - vpaddd five(%rip),%xmm1,%xmm13 - vpaddd six(%rip),%xmm1,%xmm14 - vmovdqa %xmm1,%xmm0 - - shrq $3,%r8 - je L$128_enc_msg_x8_check_remainder - - subq $128,%rsi - subq $128,%rdi - -L$128_enc_msg_x8_loop1: - addq $128,%rsi - addq $128,%rdi - - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm9,%xmm2 - vmovdqa %xmm10,%xmm3 - vmovdqa %xmm11,%xmm4 - vmovdqa %xmm12,%xmm5 - vmovdqa %xmm13,%xmm6 - vmovdqa %xmm14,%xmm7 - - vmovdqu (%rsp),%xmm8 - - vpxor (%rcx),%xmm1,%xmm1 - vpxor (%rcx),%xmm2,%xmm2 - vpxor (%rcx),%xmm3,%xmm3 - vpxor (%rcx),%xmm4,%xmm4 - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu (%rsp),%xmm14 - vpaddd eight(%rip),%xmm14,%xmm14 - vmovdqu %xmm14,(%rsp) - vmovdqu 32(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpsubd one(%rip),%xmm14,%xmm14 - vmovdqu 48(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm0,%xmm0 - vmovdqu 64(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm9,%xmm9 - vmovdqu 80(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm10,%xmm10 - vmovdqu 96(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm11,%xmm11 - vmovdqu 112(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm12,%xmm12 - vmovdqu 128(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm13,%xmm13 - vmovdqu 144(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm15 - vaesenclast %xmm15,%xmm1,%xmm1 - vaesenclast %xmm15,%xmm2,%xmm2 - vaesenclast %xmm15,%xmm3,%xmm3 - vaesenclast %xmm15,%xmm4,%xmm4 - vaesenclast %xmm15,%xmm5,%xmm5 - vaesenclast %xmm15,%xmm6,%xmm6 - vaesenclast %xmm15,%xmm7,%xmm7 - vaesenclast %xmm15,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm1,%xmm1 - vpxor 16(%rdi),%xmm2,%xmm2 - vpxor 32(%rdi),%xmm3,%xmm3 - vpxor 48(%rdi),%xmm4,%xmm4 - vpxor 64(%rdi),%xmm5,%xmm5 - vpxor 80(%rdi),%xmm6,%xmm6 - vpxor 96(%rdi),%xmm7,%xmm7 - vpxor 112(%rdi),%xmm8,%xmm8 - - decq %r8 - - vmovdqu %xmm1,0(%rsi) - vmovdqu %xmm2,16(%rsi) - vmovdqu %xmm3,32(%rsi) - vmovdqu %xmm4,48(%rsi) - vmovdqu %xmm5,64(%rsi) - vmovdqu %xmm6,80(%rsi) - vmovdqu %xmm7,96(%rsi) - vmovdqu %xmm8,112(%rsi) - - jne L$128_enc_msg_x8_loop1 - - addq $128,%rsi - addq $128,%rdi - -L$128_enc_msg_x8_check_remainder: - cmpq $0,%r10 - je L$128_enc_msg_x8_out - -L$128_enc_msg_x8_loop2: - - - vmovdqa %xmm0,%xmm1 - vpaddd one(%rip),%xmm0,%xmm0 - - vpxor (%rcx),%xmm1,%xmm1 - vaesenc 16(%rcx),%xmm1,%xmm1 - vaesenc 32(%rcx),%xmm1,%xmm1 - vaesenc 48(%rcx),%xmm1,%xmm1 - vaesenc 64(%rcx),%xmm1,%xmm1 - vaesenc 80(%rcx),%xmm1,%xmm1 - vaesenc 96(%rcx),%xmm1,%xmm1 - vaesenc 112(%rcx),%xmm1,%xmm1 - vaesenc 128(%rcx),%xmm1,%xmm1 - vaesenc 144(%rcx),%xmm1,%xmm1 - vaesenclast 160(%rcx),%xmm1,%xmm1 - - - vpxor (%rdi),%xmm1,%xmm1 - - vmovdqu %xmm1,(%rsi) - - addq $16,%rdi - addq $16,%rsi - - decq %r10 - jne L$128_enc_msg_x8_loop2 - -L$128_enc_msg_x8_out: - movq %rbp,%rsp - - popq %rbp - - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - - -.globl _aes128gcmsiv_dec -.private_extern _aes128gcmsiv_dec - -.p2align 4 -_aes128gcmsiv_dec: - - testq $~15,%r9 - jnz L$128_dec_start - .byte 0xf3,0xc3 - -L$128_dec_start: - vzeroupper - vmovdqa (%rdx),%xmm0 - movq %rdx,%rax - - leaq 32(%rax),%rax - leaq 32(%rcx),%rcx - - - vmovdqu (%rdi,%r9,1),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - andq $~15,%r9 - - - cmpq $96,%r9 - jb L$128_dec_loop2 - - - subq $96,%r9 - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vpxor (%r8),%xmm7,%xmm7 - vpxor (%r8),%xmm8,%xmm8 - vpxor (%r8),%xmm9,%xmm9 - vpxor (%r8),%xmm10,%xmm10 - vpxor (%r8),%xmm11,%xmm11 - vpxor (%r8),%xmm12,%xmm12 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vaesenclast %xmm4,%xmm8,%xmm8 - vaesenclast %xmm4,%xmm9,%xmm9 - vaesenclast %xmm4,%xmm10,%xmm10 - vaesenclast %xmm4,%xmm11,%xmm11 - vaesenclast %xmm4,%xmm12,%xmm12 - - - vpxor 0(%rdi),%xmm7,%xmm7 - vpxor 16(%rdi),%xmm8,%xmm8 - vpxor 32(%rdi),%xmm9,%xmm9 - vpxor 48(%rdi),%xmm10,%xmm10 - vpxor 64(%rdi),%xmm11,%xmm11 - vpxor 80(%rdi),%xmm12,%xmm12 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - addq $96,%rdi - addq $96,%rsi - jmp L$128_dec_loop1 - - -.p2align 6 -L$128_dec_loop1: - cmpq $96,%r9 - jb L$128_dec_finish_96 - subq $96,%r9 - - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vmovdqa (%r8),%xmm4 - vpxor %xmm4,%xmm7,%xmm7 - vpxor %xmm4,%xmm8,%xmm8 - vpxor %xmm4,%xmm9,%xmm9 - vpxor %xmm4,%xmm10,%xmm10 - vpxor %xmm4,%xmm11,%xmm11 - vpxor %xmm4,%xmm12,%xmm12 - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vmovdqa 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm6 - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor 0(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vpxor 16(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm8,%xmm8 - vpxor 32(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm9,%xmm9 - vpxor 48(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm10,%xmm10 - vpxor 64(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm11,%xmm11 - vpxor 80(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm12,%xmm12 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - vpxor %xmm5,%xmm0,%xmm0 - - leaq 96(%rdi),%rdi - leaq 96(%rsi),%rsi - jmp L$128_dec_loop1 - -L$128_dec_finish_96: - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor %xmm5,%xmm0,%xmm0 - -L$128_dec_loop2: - - - - cmpq $16,%r9 - jb L$128_dec_out - subq $16,%r9 - - vmovdqa %xmm15,%xmm2 - vpaddd one(%rip),%xmm15,%xmm15 - - vpxor 0(%r8),%xmm2,%xmm2 - vaesenc 16(%r8),%xmm2,%xmm2 - vaesenc 32(%r8),%xmm2,%xmm2 - vaesenc 48(%r8),%xmm2,%xmm2 - vaesenc 64(%r8),%xmm2,%xmm2 - vaesenc 80(%r8),%xmm2,%xmm2 - vaesenc 96(%r8),%xmm2,%xmm2 - vaesenc 112(%r8),%xmm2,%xmm2 - vaesenc 128(%r8),%xmm2,%xmm2 - vaesenc 144(%r8),%xmm2,%xmm2 - vaesenclast 160(%r8),%xmm2,%xmm2 - vpxor (%rdi),%xmm2,%xmm2 - vmovdqu %xmm2,(%rsi) - addq $16,%rdi - addq $16,%rsi - - vpxor %xmm2,%xmm0,%xmm0 - vmovdqa -32(%rcx),%xmm1 - call GFMUL - - jmp L$128_dec_loop2 - -L$128_dec_out: - vmovdqu %xmm0,(%rdx) - .byte 0xf3,0xc3 - - -.globl _aes128gcmsiv_ecb_enc_block -.private_extern _aes128gcmsiv_ecb_enc_block - -.p2align 4 -_aes128gcmsiv_ecb_enc_block: - - vmovdqa (%rdi),%xmm1 - - vpxor (%rdx),%xmm1,%xmm1 - vaesenc 16(%rdx),%xmm1,%xmm1 - vaesenc 32(%rdx),%xmm1,%xmm1 - vaesenc 48(%rdx),%xmm1,%xmm1 - vaesenc 64(%rdx),%xmm1,%xmm1 - vaesenc 80(%rdx),%xmm1,%xmm1 - vaesenc 96(%rdx),%xmm1,%xmm1 - vaesenc 112(%rdx),%xmm1,%xmm1 - vaesenc 128(%rdx),%xmm1,%xmm1 - vaesenc 144(%rdx),%xmm1,%xmm1 - vaesenclast 160(%rdx),%xmm1,%xmm1 - - vmovdqa %xmm1,(%rsi) - - .byte 0xf3,0xc3 - - -.globl _aes256gcmsiv_aes_ks_enc_x1 -.private_extern _aes256gcmsiv_aes_ks_enc_x1 - -.p2align 4 -_aes256gcmsiv_aes_ks_enc_x1: - - vmovdqa con1(%rip),%xmm0 - vmovdqa mask(%rip),%xmm15 - vmovdqa (%rdi),%xmm8 - vmovdqa (%rcx),%xmm1 - vmovdqa 16(%rcx),%xmm3 - vpxor %xmm1,%xmm8,%xmm8 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm1,(%rdx) - vmovdqu %xmm3,16(%rdx) - vpxor %xmm14,%xmm14,%xmm14 - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,32(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,48(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,64(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,80(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,96(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,112(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,128(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,144(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,160(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,176(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslld $1,%xmm0,%xmm0 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenc %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,192(%rdx) - - vpshufd $0xff,%xmm1,%xmm2 - vaesenclast %xmm14,%xmm2,%xmm2 - vpslldq $4,%xmm3,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpxor %xmm2,%xmm3,%xmm3 - vaesenc %xmm3,%xmm8,%xmm8 - vmovdqu %xmm3,208(%rdx) - - vpshufb %xmm15,%xmm3,%xmm2 - vaesenclast %xmm0,%xmm2,%xmm2 - vpslldq $4,%xmm1,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpslldq $4,%xmm4,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpxor %xmm2,%xmm1,%xmm1 - vaesenclast %xmm1,%xmm8,%xmm8 - vmovdqu %xmm1,224(%rdx) - - vmovdqa %xmm8,(%rsi) - .byte 0xf3,0xc3 - - -.globl _aes256gcmsiv_ecb_enc_block -.private_extern _aes256gcmsiv_ecb_enc_block - -.p2align 4 -_aes256gcmsiv_ecb_enc_block: - - vmovdqa (%rdi),%xmm1 - vpxor (%rdx),%xmm1,%xmm1 - vaesenc 16(%rdx),%xmm1,%xmm1 - vaesenc 32(%rdx),%xmm1,%xmm1 - vaesenc 48(%rdx),%xmm1,%xmm1 - vaesenc 64(%rdx),%xmm1,%xmm1 - vaesenc 80(%rdx),%xmm1,%xmm1 - vaesenc 96(%rdx),%xmm1,%xmm1 - vaesenc 112(%rdx),%xmm1,%xmm1 - vaesenc 128(%rdx),%xmm1,%xmm1 - vaesenc 144(%rdx),%xmm1,%xmm1 - vaesenc 160(%rdx),%xmm1,%xmm1 - vaesenc 176(%rdx),%xmm1,%xmm1 - vaesenc 192(%rdx),%xmm1,%xmm1 - vaesenc 208(%rdx),%xmm1,%xmm1 - vaesenclast 224(%rdx),%xmm1,%xmm1 - vmovdqa %xmm1,(%rsi) - .byte 0xf3,0xc3 - - -.globl _aes256gcmsiv_enc_msg_x4 -.private_extern _aes256gcmsiv_enc_msg_x4 - -.p2align 4 -_aes256gcmsiv_enc_msg_x4: - - testq %r8,%r8 - jnz L$256_enc_msg_x4_start - .byte 0xf3,0xc3 - -L$256_enc_msg_x4_start: - movq %r8,%r10 - shrq $4,%r8 - shlq $60,%r10 - jz L$256_enc_msg_x4_start2 - addq $1,%r8 - -L$256_enc_msg_x4_start2: - movq %r8,%r10 - shlq $62,%r10 - shrq $62,%r10 - - - vmovdqa (%rdx),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - - vmovdqa four(%rip),%xmm4 - vmovdqa %xmm15,%xmm0 - vpaddd one(%rip),%xmm15,%xmm1 - vpaddd two(%rip),%xmm15,%xmm2 - vpaddd three(%rip),%xmm15,%xmm3 - - shrq $2,%r8 - je L$256_enc_msg_x4_check_remainder - - subq $64,%rsi - subq $64,%rdi - -L$256_enc_msg_x4_loop1: - addq $64,%rsi - addq $64,%rdi - - vmovdqa %xmm0,%xmm5 - vmovdqa %xmm1,%xmm6 - vmovdqa %xmm2,%xmm7 - vmovdqa %xmm3,%xmm8 - - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm0,%xmm0 - vmovdqu 32(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm1,%xmm1 - vmovdqu 48(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm2,%xmm2 - vmovdqu 64(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vpaddd %xmm4,%xmm3,%xmm3 - - vmovdqu 80(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 96(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 112(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 128(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 144(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 176(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 192(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 208(%rcx),%xmm12 - vaesenc %xmm12,%xmm5,%xmm5 - vaesenc %xmm12,%xmm6,%xmm6 - vaesenc %xmm12,%xmm7,%xmm7 - vaesenc %xmm12,%xmm8,%xmm8 - - vmovdqu 224(%rcx),%xmm12 - vaesenclast %xmm12,%xmm5,%xmm5 - vaesenclast %xmm12,%xmm6,%xmm6 - vaesenclast %xmm12,%xmm7,%xmm7 - vaesenclast %xmm12,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm5,%xmm5 - vpxor 16(%rdi),%xmm6,%xmm6 - vpxor 32(%rdi),%xmm7,%xmm7 - vpxor 48(%rdi),%xmm8,%xmm8 - - subq $1,%r8 - - vmovdqu %xmm5,0(%rsi) - vmovdqu %xmm6,16(%rsi) - vmovdqu %xmm7,32(%rsi) - vmovdqu %xmm8,48(%rsi) - - jne L$256_enc_msg_x4_loop1 - - addq $64,%rsi - addq $64,%rdi - -L$256_enc_msg_x4_check_remainder: - cmpq $0,%r10 - je L$256_enc_msg_x4_out - -L$256_enc_msg_x4_loop2: - - - - vmovdqa %xmm0,%xmm5 - vpaddd one(%rip),%xmm0,%xmm0 - vpxor (%rcx),%xmm5,%xmm5 - vaesenc 16(%rcx),%xmm5,%xmm5 - vaesenc 32(%rcx),%xmm5,%xmm5 - vaesenc 48(%rcx),%xmm5,%xmm5 - vaesenc 64(%rcx),%xmm5,%xmm5 - vaesenc 80(%rcx),%xmm5,%xmm5 - vaesenc 96(%rcx),%xmm5,%xmm5 - vaesenc 112(%rcx),%xmm5,%xmm5 - vaesenc 128(%rcx),%xmm5,%xmm5 - vaesenc 144(%rcx),%xmm5,%xmm5 - vaesenc 160(%rcx),%xmm5,%xmm5 - vaesenc 176(%rcx),%xmm5,%xmm5 - vaesenc 192(%rcx),%xmm5,%xmm5 - vaesenc 208(%rcx),%xmm5,%xmm5 - vaesenclast 224(%rcx),%xmm5,%xmm5 - - - vpxor (%rdi),%xmm5,%xmm5 - - vmovdqu %xmm5,(%rsi) - - addq $16,%rdi - addq $16,%rsi - - subq $1,%r10 - jne L$256_enc_msg_x4_loop2 - -L$256_enc_msg_x4_out: - .byte 0xf3,0xc3 - - -.globl _aes256gcmsiv_enc_msg_x8 -.private_extern _aes256gcmsiv_enc_msg_x8 - -.p2align 4 -_aes256gcmsiv_enc_msg_x8: - - testq %r8,%r8 - jnz L$256_enc_msg_x8_start - .byte 0xf3,0xc3 - -L$256_enc_msg_x8_start: - - movq %rsp,%r11 - subq $16,%r11 - andq $-64,%r11 - - movq %r8,%r10 - shrq $4,%r8 - shlq $60,%r10 - jz L$256_enc_msg_x8_start2 - addq $1,%r8 - -L$256_enc_msg_x8_start2: - movq %r8,%r10 - shlq $61,%r10 - shrq $61,%r10 - - - vmovdqa (%rdx),%xmm1 - vpor OR_MASK(%rip),%xmm1,%xmm1 - - - vpaddd seven(%rip),%xmm1,%xmm0 - vmovdqa %xmm0,(%r11) - vpaddd one(%rip),%xmm1,%xmm9 - vpaddd two(%rip),%xmm1,%xmm10 - vpaddd three(%rip),%xmm1,%xmm11 - vpaddd four(%rip),%xmm1,%xmm12 - vpaddd five(%rip),%xmm1,%xmm13 - vpaddd six(%rip),%xmm1,%xmm14 - vmovdqa %xmm1,%xmm0 - - shrq $3,%r8 - jz L$256_enc_msg_x8_check_remainder - - subq $128,%rsi - subq $128,%rdi - -L$256_enc_msg_x8_loop1: - addq $128,%rsi - addq $128,%rdi - - vmovdqa %xmm0,%xmm1 - vmovdqa %xmm9,%xmm2 - vmovdqa %xmm10,%xmm3 - vmovdqa %xmm11,%xmm4 - vmovdqa %xmm12,%xmm5 - vmovdqa %xmm13,%xmm6 - vmovdqa %xmm14,%xmm7 - - vmovdqa (%r11),%xmm8 - - vpxor (%rcx),%xmm1,%xmm1 - vpxor (%rcx),%xmm2,%xmm2 - vpxor (%rcx),%xmm3,%xmm3 - vpxor (%rcx),%xmm4,%xmm4 - vpxor (%rcx),%xmm5,%xmm5 - vpxor (%rcx),%xmm6,%xmm6 - vpxor (%rcx),%xmm7,%xmm7 - vpxor (%rcx),%xmm8,%xmm8 - - vmovdqu 16(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqa (%r11),%xmm14 - vpaddd eight(%rip),%xmm14,%xmm14 - vmovdqa %xmm14,(%r11) - vmovdqu 32(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpsubd one(%rip),%xmm14,%xmm14 - vmovdqu 48(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm0,%xmm0 - vmovdqu 64(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm9,%xmm9 - vmovdqu 80(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm10,%xmm10 - vmovdqu 96(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm11,%xmm11 - vmovdqu 112(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm12,%xmm12 - vmovdqu 128(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vpaddd eight(%rip),%xmm13,%xmm13 - vmovdqu 144(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 160(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 176(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 192(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 208(%rcx),%xmm15 - vaesenc %xmm15,%xmm1,%xmm1 - vaesenc %xmm15,%xmm2,%xmm2 - vaesenc %xmm15,%xmm3,%xmm3 - vaesenc %xmm15,%xmm4,%xmm4 - vaesenc %xmm15,%xmm5,%xmm5 - vaesenc %xmm15,%xmm6,%xmm6 - vaesenc %xmm15,%xmm7,%xmm7 - vaesenc %xmm15,%xmm8,%xmm8 - - vmovdqu 224(%rcx),%xmm15 - vaesenclast %xmm15,%xmm1,%xmm1 - vaesenclast %xmm15,%xmm2,%xmm2 - vaesenclast %xmm15,%xmm3,%xmm3 - vaesenclast %xmm15,%xmm4,%xmm4 - vaesenclast %xmm15,%xmm5,%xmm5 - vaesenclast %xmm15,%xmm6,%xmm6 - vaesenclast %xmm15,%xmm7,%xmm7 - vaesenclast %xmm15,%xmm8,%xmm8 - - - - vpxor 0(%rdi),%xmm1,%xmm1 - vpxor 16(%rdi),%xmm2,%xmm2 - vpxor 32(%rdi),%xmm3,%xmm3 - vpxor 48(%rdi),%xmm4,%xmm4 - vpxor 64(%rdi),%xmm5,%xmm5 - vpxor 80(%rdi),%xmm6,%xmm6 - vpxor 96(%rdi),%xmm7,%xmm7 - vpxor 112(%rdi),%xmm8,%xmm8 - - subq $1,%r8 - - vmovdqu %xmm1,0(%rsi) - vmovdqu %xmm2,16(%rsi) - vmovdqu %xmm3,32(%rsi) - vmovdqu %xmm4,48(%rsi) - vmovdqu %xmm5,64(%rsi) - vmovdqu %xmm6,80(%rsi) - vmovdqu %xmm7,96(%rsi) - vmovdqu %xmm8,112(%rsi) - - jne L$256_enc_msg_x8_loop1 - - addq $128,%rsi - addq $128,%rdi - -L$256_enc_msg_x8_check_remainder: - cmpq $0,%r10 - je L$256_enc_msg_x8_out - -L$256_enc_msg_x8_loop2: - - - vmovdqa %xmm0,%xmm1 - vpaddd one(%rip),%xmm0,%xmm0 - - vpxor (%rcx),%xmm1,%xmm1 - vaesenc 16(%rcx),%xmm1,%xmm1 - vaesenc 32(%rcx),%xmm1,%xmm1 - vaesenc 48(%rcx),%xmm1,%xmm1 - vaesenc 64(%rcx),%xmm1,%xmm1 - vaesenc 80(%rcx),%xmm1,%xmm1 - vaesenc 96(%rcx),%xmm1,%xmm1 - vaesenc 112(%rcx),%xmm1,%xmm1 - vaesenc 128(%rcx),%xmm1,%xmm1 - vaesenc 144(%rcx),%xmm1,%xmm1 - vaesenc 160(%rcx),%xmm1,%xmm1 - vaesenc 176(%rcx),%xmm1,%xmm1 - vaesenc 192(%rcx),%xmm1,%xmm1 - vaesenc 208(%rcx),%xmm1,%xmm1 - vaesenclast 224(%rcx),%xmm1,%xmm1 - - - vpxor (%rdi),%xmm1,%xmm1 - - vmovdqu %xmm1,(%rsi) - - addq $16,%rdi - addq $16,%rsi - subq $1,%r10 - jnz L$256_enc_msg_x8_loop2 - -L$256_enc_msg_x8_out: - .byte 0xf3,0xc3 - - - -.globl _aes256gcmsiv_dec -.private_extern _aes256gcmsiv_dec - -.p2align 4 -_aes256gcmsiv_dec: - - testq $~15,%r9 - jnz L$256_dec_start - .byte 0xf3,0xc3 - -L$256_dec_start: - vzeroupper - vmovdqa (%rdx),%xmm0 - movq %rdx,%rax - - leaq 32(%rax),%rax - leaq 32(%rcx),%rcx - - - vmovdqu (%rdi,%r9,1),%xmm15 - vpor OR_MASK(%rip),%xmm15,%xmm15 - andq $~15,%r9 - - - cmpq $96,%r9 - jb L$256_dec_loop2 - - - subq $96,%r9 - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vpxor (%r8),%xmm7,%xmm7 - vpxor (%r8),%xmm8,%xmm8 - vpxor (%r8),%xmm9,%xmm9 - vpxor (%r8),%xmm10,%xmm10 - vpxor (%r8),%xmm11,%xmm11 - vpxor (%r8),%xmm12,%xmm12 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 176(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 192(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 208(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 224(%r8),%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vaesenclast %xmm4,%xmm8,%xmm8 - vaesenclast %xmm4,%xmm9,%xmm9 - vaesenclast %xmm4,%xmm10,%xmm10 - vaesenclast %xmm4,%xmm11,%xmm11 - vaesenclast %xmm4,%xmm12,%xmm12 - - - vpxor 0(%rdi),%xmm7,%xmm7 - vpxor 16(%rdi),%xmm8,%xmm8 - vpxor 32(%rdi),%xmm9,%xmm9 - vpxor 48(%rdi),%xmm10,%xmm10 - vpxor 64(%rdi),%xmm11,%xmm11 - vpxor 80(%rdi),%xmm12,%xmm12 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - addq $96,%rdi - addq $96,%rsi - jmp L$256_dec_loop1 - - -.p2align 6 -L$256_dec_loop1: - cmpq $96,%r9 - jb L$256_dec_finish_96 - subq $96,%r9 - - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqa %xmm15,%xmm7 - vpaddd one(%rip),%xmm7,%xmm8 - vpaddd two(%rip),%xmm7,%xmm9 - vpaddd one(%rip),%xmm9,%xmm10 - vpaddd two(%rip),%xmm9,%xmm11 - vpaddd one(%rip),%xmm11,%xmm12 - vpaddd two(%rip),%xmm11,%xmm15 - - vmovdqa (%r8),%xmm4 - vpxor %xmm4,%xmm7,%xmm7 - vpxor %xmm4,%xmm8,%xmm8 - vpxor %xmm4,%xmm9,%xmm9 - vpxor %xmm4,%xmm10,%xmm10 - vpxor %xmm4,%xmm11,%xmm11 - vpxor %xmm4,%xmm12,%xmm12 - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 32(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 48(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 64(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 96(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 112(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vmovdqa 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 128(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vmovdqu 144(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 160(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 176(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 192(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 208(%r8),%xmm4 - vaesenc %xmm4,%xmm7,%xmm7 - vaesenc %xmm4,%xmm8,%xmm8 - vaesenc %xmm4,%xmm9,%xmm9 - vaesenc %xmm4,%xmm10,%xmm10 - vaesenc %xmm4,%xmm11,%xmm11 - vaesenc %xmm4,%xmm12,%xmm12 - - vmovdqu 224(%r8),%xmm6 - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor 0(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm7,%xmm7 - vpxor 16(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm8,%xmm8 - vpxor 32(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm9,%xmm9 - vpxor 48(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm10,%xmm10 - vpxor 64(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm11,%xmm11 - vpxor 80(%rdi),%xmm6,%xmm4 - vaesenclast %xmm4,%xmm12,%xmm12 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vmovdqu %xmm7,0(%rsi) - vmovdqu %xmm8,16(%rsi) - vmovdqu %xmm9,32(%rsi) - vmovdqu %xmm10,48(%rsi) - vmovdqu %xmm11,64(%rsi) - vmovdqu %xmm12,80(%rsi) - - vpxor %xmm5,%xmm0,%xmm0 - - leaq 96(%rdi),%rdi - leaq 96(%rsi),%rsi - jmp L$256_dec_loop1 - -L$256_dec_finish_96: - vmovdqa %xmm12,%xmm6 - vmovdqa %xmm11,16-32(%rax) - vmovdqa %xmm10,32-32(%rax) - vmovdqa %xmm9,48-32(%rax) - vmovdqa %xmm8,64-32(%rax) - vmovdqa %xmm7,80-32(%rax) - - vmovdqu 0-32(%rcx),%xmm4 - vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 - vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 - vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu -16(%rax),%xmm6 - vmovdqu -16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 0(%rax),%xmm6 - vmovdqu 0(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 16(%rax),%xmm6 - vmovdqu 16(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vmovdqu 32(%rax),%xmm6 - vmovdqu 32(%rcx),%xmm13 - - vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - - vmovdqu 80-32(%rax),%xmm6 - vpxor %xmm0,%xmm6,%xmm6 - vmovdqu 80-32(%rcx),%xmm5 - vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm2,%xmm5 - vpslldq $8,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm0 - - vmovdqa poly(%rip),%xmm3 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpalignr $8,%xmm0,%xmm0,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 - vpxor %xmm0,%xmm2,%xmm0 - - vpxor %xmm5,%xmm0,%xmm0 - -L$256_dec_loop2: - - - - cmpq $16,%r9 - jb L$256_dec_out - subq $16,%r9 - - vmovdqa %xmm15,%xmm2 - vpaddd one(%rip),%xmm15,%xmm15 - - vpxor 0(%r8),%xmm2,%xmm2 - vaesenc 16(%r8),%xmm2,%xmm2 - vaesenc 32(%r8),%xmm2,%xmm2 - vaesenc 48(%r8),%xmm2,%xmm2 - vaesenc 64(%r8),%xmm2,%xmm2 - vaesenc 80(%r8),%xmm2,%xmm2 - vaesenc 96(%r8),%xmm2,%xmm2 - vaesenc 112(%r8),%xmm2,%xmm2 - vaesenc 128(%r8),%xmm2,%xmm2 - vaesenc 144(%r8),%xmm2,%xmm2 - vaesenc 160(%r8),%xmm2,%xmm2 - vaesenc 176(%r8),%xmm2,%xmm2 - vaesenc 192(%r8),%xmm2,%xmm2 - vaesenc 208(%r8),%xmm2,%xmm2 - vaesenclast 224(%r8),%xmm2,%xmm2 - vpxor (%rdi),%xmm2,%xmm2 - vmovdqu %xmm2,(%rsi) - addq $16,%rdi - addq $16,%rsi - - vpxor %xmm2,%xmm0,%xmm0 - vmovdqa -32(%rcx),%xmm1 - call GFMUL - - jmp L$256_dec_loop2 - -L$256_dec_out: - vmovdqu %xmm0,(%rdx) - .byte 0xf3,0xc3 - - -.globl _aes256gcmsiv_kdf -.private_extern _aes256gcmsiv_kdf - -.p2align 4 -_aes256gcmsiv_kdf: - - - - - - vmovdqa (%rdx),%xmm1 - vmovdqa 0(%rdi),%xmm4 - vmovdqa and_mask(%rip),%xmm11 - vmovdqa one(%rip),%xmm8 - vpshufd $0x90,%xmm4,%xmm4 - vpand %xmm11,%xmm4,%xmm4 - vpaddd %xmm8,%xmm4,%xmm6 - vpaddd %xmm8,%xmm6,%xmm7 - vpaddd %xmm8,%xmm7,%xmm11 - vpaddd %xmm8,%xmm11,%xmm12 - vpaddd %xmm8,%xmm12,%xmm13 - - vpxor %xmm1,%xmm4,%xmm4 - vpxor %xmm1,%xmm6,%xmm6 - vpxor %xmm1,%xmm7,%xmm7 - vpxor %xmm1,%xmm11,%xmm11 - vpxor %xmm1,%xmm12,%xmm12 - vpxor %xmm1,%xmm13,%xmm13 - - vmovdqa 16(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 32(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 48(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 64(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 80(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 96(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 112(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 128(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 144(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 160(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 176(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 192(%rdx),%xmm2 - vaesenc %xmm2,%xmm4,%xmm4 - vaesenc %xmm2,%xmm6,%xmm6 - vaesenc %xmm2,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vaesenc %xmm2,%xmm12,%xmm12 - vaesenc %xmm2,%xmm13,%xmm13 - - vmovdqa 208(%rdx),%xmm1 - vaesenc %xmm1,%xmm4,%xmm4 - vaesenc %xmm1,%xmm6,%xmm6 - vaesenc %xmm1,%xmm7,%xmm7 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - - vmovdqa 224(%rdx),%xmm2 - vaesenclast %xmm2,%xmm4,%xmm4 - vaesenclast %xmm2,%xmm6,%xmm6 - vaesenclast %xmm2,%xmm7,%xmm7 - vaesenclast %xmm2,%xmm11,%xmm11 - vaesenclast %xmm2,%xmm12,%xmm12 - vaesenclast %xmm2,%xmm13,%xmm13 - - - vmovdqa %xmm4,0(%rsi) - vmovdqa %xmm6,16(%rsi) - vmovdqa %xmm7,32(%rsi) - vmovdqa %xmm11,48(%rsi) - vmovdqa %xmm12,64(%rsi) - vmovdqa %xmm13,80(%rsi) - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S deleted file mode 100644 index e50227ae38..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S +++ /dev/null @@ -1,8985 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -chacha20_poly1305_constants: - -.p2align 6 -.chacha20_consts: -.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' -.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' -.rol8: -.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 -.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 -.rol16: -.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 -.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 -.avx2_init: -.long 0,0,0,0 -.sse_inc: -.long 1,0,0,0 -.avx2_inc: -.long 2,0,0,0,2,0,0,0 -.clamp: -.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC -.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF -.p2align 4 -.and_masks: -.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00 -.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff - - -.p2align 6 -poly_hash_ad_internal: - - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r12,%r12 - cmpq $13,%r8 - jne hash_ad_loop -poly_fast_tls_ad: - - movq (%rcx),%r10 - movq 5(%rcx),%r11 - shrq $24,%r11 - movq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - .byte 0xf3,0xc3 -hash_ad_loop: - - cmpq $16,%r8 - jb hash_ad_tail - addq 0(%rcx),%r10 - adcq 8+0(%rcx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rcx),%rcx - subq $16,%r8 - jmp hash_ad_loop -hash_ad_tail: - cmpq $0,%r8 - je 1f - - xorq %r13,%r13 - xorq %r14,%r14 - xorq %r15,%r15 - addq %r8,%rcx -hash_ad_tail_loop: - shldq $8,%r13,%r14 - shlq $8,%r13 - movzbq -1(%rcx),%r15 - xorq %r15,%r13 - decq %rcx - decq %r8 - jne hash_ad_tail_loop - - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -1: - .byte 0xf3,0xc3 - - - -.globl _chacha20_poly1305_open -.private_extern _chacha20_poly1305_open - -.p2align 6 -_chacha20_poly1305_open: - - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - - pushq %r9 - - subq $288 + 32,%rsp - - - - - - - - leaq 32(%rsp),%rbp - andq $-32,%rbp - movq %rdx,8+32(%rbp) - movq %r8,0+32(%rbp) - movq %rdx,%rbx - - movl _OPENSSL_ia32cap_P+8(%rip),%eax - andl $288,%eax - xorl $288,%eax - jz chacha20_poly1305_open_avx2 - -1: - cmpq $128,%rbx - jbe open_sse_128 - - movdqa .chacha20_consts(%rip),%xmm0 - movdqu 0(%r9),%xmm4 - movdqu 16(%r9),%xmm8 - movdqu 32(%r9),%xmm12 - movdqa %xmm12,%xmm7 - - movdqa %xmm4,48(%rbp) - movdqa %xmm8,64(%rbp) - movdqa %xmm12,96(%rbp) - movq $10,%r10 -1: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - decq %r10 - jne 1b - - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - - pand .clamp(%rip),%xmm0 - movdqa %xmm0,0(%rbp) - movdqa %xmm4,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal -open_sse_main_loop: - cmpq $256,%rbx - jb 2f - - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa 96(%rbp),%xmm15 - paddd .sse_inc(%rip),%xmm15 - movdqa %xmm15,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - - - - movq $4,%rcx - movq %rsi,%r8 -1: - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - - leaq 16(%r8),%r8 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - decq %rcx - jge 1b - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - cmpq $-6,%rcx - jg 1b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqa %xmm12,80(%rbp) - movdqu 0 + 0(%rsi),%xmm12 - pxor %xmm3,%xmm12 - movdqu %xmm12,0 + 0(%rdi) - movdqu 16 + 0(%rsi),%xmm12 - pxor %xmm7,%xmm12 - movdqu %xmm12,16 + 0(%rdi) - movdqu 32 + 0(%rsi),%xmm12 - pxor %xmm11,%xmm12 - movdqu %xmm12,32 + 0(%rdi) - movdqu 48 + 0(%rsi),%xmm12 - pxor %xmm15,%xmm12 - movdqu %xmm12,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 64(%rdi) - movdqu %xmm6,16 + 64(%rdi) - movdqu %xmm10,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 128(%rdi) - movdqu %xmm5,16 + 128(%rdi) - movdqu %xmm9,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - movdqu 0 + 192(%rsi),%xmm3 - movdqu 16 + 192(%rsi),%xmm7 - movdqu 32 + 192(%rsi),%xmm11 - movdqu 48 + 192(%rsi),%xmm15 - pxor %xmm3,%xmm0 - pxor %xmm7,%xmm4 - pxor %xmm11,%xmm8 - pxor 80(%rbp),%xmm15 - movdqu %xmm0,0 + 192(%rdi) - movdqu %xmm4,16 + 192(%rdi) - movdqu %xmm8,32 + 192(%rdi) - movdqu %xmm15,48 + 192(%rdi) - - leaq 256(%rsi),%rsi - leaq 256(%rdi),%rdi - subq $256,%rbx - jmp open_sse_main_loop -2: - - testq %rbx,%rbx - jz open_sse_finalize - cmpq $64,%rbx - ja 3f - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa 96(%rbp),%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - - xorq %r8,%r8 - movq %rbx,%rcx - cmpq $16,%rcx - jb 2f -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - subq $16,%rcx -2: - addq $16,%r8 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - cmpq $16,%rcx - jae 1b - cmpq $160,%r8 - jne 2b - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - jmp open_sse_tail_64_dec_loop -3: - cmpq $128,%rbx - ja 3f - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa 96(%rbp),%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - - movq %rbx,%rcx - andq $-16,%rcx - xorq %r8,%r8 -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -2: - addq $16,%r8 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - - cmpq %rcx,%r8 - jb 1b - cmpq $160,%r8 - jne 2b - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 0(%rdi) - movdqu %xmm5,16 + 0(%rdi) - movdqu %xmm9,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - - subq $64,%rbx - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - jmp open_sse_tail_64_dec_loop -3: - cmpq $192,%rbx - ja 3f - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa 96(%rbp),%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - - movq %rbx,%rcx - movq $160,%r8 - cmpq $160,%rcx - cmovgq %r8,%rcx - andq $-16,%rcx - xorq %r8,%r8 -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -2: - addq $16,%r8 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - cmpq %rcx,%r8 - jb 1b - cmpq $160,%r8 - jne 2b - cmpq $176,%rbx - jb 1f - addq 160(%rsi),%r10 - adcq 8+160(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - cmpq $192,%rbx - jb 1f - addq 176(%rsi),%r10 - adcq 8+176(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -1: - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 0(%rdi) - movdqu %xmm6,16 + 0(%rdi) - movdqu %xmm10,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 64(%rdi) - movdqu %xmm5,16 + 64(%rdi) - movdqu %xmm9,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - - subq $128,%rbx - leaq 128(%rsi),%rsi - leaq 128(%rdi),%rdi - jmp open_sse_tail_64_dec_loop -3: - - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa 96(%rbp),%xmm15 - paddd .sse_inc(%rip),%xmm15 - movdqa %xmm15,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - - xorq %r8,%r8 -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movdqa %xmm11,80(%rbp) - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm4 - pxor %xmm11,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm4 - pxor %xmm11,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm5 - pxor %xmm11,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm5 - pxor %xmm11,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm6 - pxor %xmm11,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm6 - pxor %xmm11,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - movdqa 80(%rbp),%xmm11 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movdqa %xmm9,80(%rbp) - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol16(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $12,%xmm9 - psrld $20,%xmm7 - pxor %xmm9,%xmm7 - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol8(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $7,%xmm9 - psrld $25,%xmm7 - pxor %xmm9,%xmm7 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 - movdqa 80(%rbp),%xmm9 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - movdqa %xmm11,80(%rbp) - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm4 - pxor %xmm11,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm4 - pxor %xmm11,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm5 - pxor %xmm11,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm5 - pxor %xmm11,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $12,%xmm11 - psrld $20,%xmm6 - pxor %xmm11,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm11 - pslld $7,%xmm11 - psrld $25,%xmm6 - pxor %xmm11,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - movdqa 80(%rbp),%xmm11 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - movdqa %xmm9,80(%rbp) - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol16(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $12,%xmm9 - psrld $20,%xmm7 - pxor %xmm9,%xmm7 - paddd %xmm7,%xmm3 - pxor %xmm3,%xmm15 - pshufb .rol8(%rip),%xmm15 - paddd %xmm15,%xmm11 - pxor %xmm11,%xmm7 - movdqa %xmm7,%xmm9 - pslld $7,%xmm9 - psrld $25,%xmm7 - pxor %xmm9,%xmm7 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 - movdqa 80(%rbp),%xmm9 - - addq $16,%r8 - cmpq $160,%r8 - jb 1b - movq %rbx,%rcx - andq $-16,%rcx -1: - addq 0(%rsi,%r8), %r10 - adcq 8+0(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - addq $16,%r8 - cmpq %rcx,%r8 - jb 1b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqa %xmm12,80(%rbp) - movdqu 0 + 0(%rsi),%xmm12 - pxor %xmm3,%xmm12 - movdqu %xmm12,0 + 0(%rdi) - movdqu 16 + 0(%rsi),%xmm12 - pxor %xmm7,%xmm12 - movdqu %xmm12,16 + 0(%rdi) - movdqu 32 + 0(%rsi),%xmm12 - pxor %xmm11,%xmm12 - movdqu %xmm12,32 + 0(%rdi) - movdqu 48 + 0(%rsi),%xmm12 - pxor %xmm15,%xmm12 - movdqu %xmm12,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 64(%rdi) - movdqu %xmm6,16 + 64(%rdi) - movdqu %xmm10,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 128(%rdi) - movdqu %xmm5,16 + 128(%rdi) - movdqu %xmm9,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - - movdqa 80(%rbp),%xmm12 - subq $192,%rbx - leaq 192(%rsi),%rsi - leaq 192(%rdi),%rdi - - -open_sse_tail_64_dec_loop: - cmpq $16,%rbx - jb 1f - subq $16,%rbx - movdqu (%rsi),%xmm3 - pxor %xmm3,%xmm0 - movdqu %xmm0,(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - movdqa %xmm4,%xmm0 - movdqa %xmm8,%xmm4 - movdqa %xmm12,%xmm8 - jmp open_sse_tail_64_dec_loop -1: - movdqa %xmm0,%xmm1 - - -open_sse_tail_16: - testq %rbx,%rbx - jz open_sse_finalize - - - - pxor %xmm3,%xmm3 - leaq -1(%rsi,%rbx), %rsi - movq %rbx,%r8 -2: - pslldq $1,%xmm3 - pinsrb $0,(%rsi),%xmm3 - subq $1,%rsi - subq $1,%r8 - jnz 2b - -3: -.byte 102,73,15,126,221 - pextrq $1,%xmm3,%r14 - - pxor %xmm1,%xmm3 - - -2: - pextrb $0,%xmm3,(%rdi) - psrldq $1,%xmm3 - addq $1,%rdi - subq $1,%rbx - jne 2b - - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -open_sse_finalize: - addq 32(%rbp),%r10 - adcq 8+32(%rbp),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movq %r10,%r13 - movq %r11,%r14 - movq %r12,%r15 - subq $-5,%r10 - sbbq $-1,%r11 - sbbq $3,%r12 - cmovcq %r13,%r10 - cmovcq %r14,%r11 - cmovcq %r15,%r12 - - addq 0+16(%rbp),%r10 - adcq 8+16(%rbp),%r11 - - addq $288 + 32,%rsp - - popq %r9 - - movq %r10,(%r9) - movq %r11,8(%r9) - - popq %r15 - - popq %r14 - - popq %r13 - - popq %r12 - - popq %rbx - - popq %rbp - - .byte 0xf3,0xc3 - - -open_sse_128: - movdqu .chacha20_consts(%rip),%xmm0 - movdqa %xmm0,%xmm1 - movdqa %xmm0,%xmm2 - movdqu 0(%r9),%xmm4 - movdqa %xmm4,%xmm5 - movdqa %xmm4,%xmm6 - movdqu 16(%r9),%xmm8 - movdqa %xmm8,%xmm9 - movdqa %xmm8,%xmm10 - movdqu 32(%r9),%xmm12 - movdqa %xmm12,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa %xmm13,%xmm15 - movq $10,%r10 -1: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - decq %r10 - jnz 1b - paddd .chacha20_consts(%rip),%xmm0 - paddd .chacha20_consts(%rip),%xmm1 - paddd .chacha20_consts(%rip),%xmm2 - paddd %xmm7,%xmm4 - paddd %xmm7,%xmm5 - paddd %xmm7,%xmm6 - paddd %xmm11,%xmm9 - paddd %xmm11,%xmm10 - paddd %xmm15,%xmm13 - paddd .sse_inc(%rip),%xmm15 - paddd %xmm15,%xmm14 - - pand .clamp(%rip),%xmm0 - movdqa %xmm0,0(%rbp) - movdqa %xmm4,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal -1: - cmpq $16,%rbx - jb open_sse_tail_16 - subq $16,%rbx - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - - - movdqu 0(%rsi),%xmm3 - pxor %xmm3,%xmm1 - movdqu %xmm1,0(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movdqa %xmm5,%xmm1 - movdqa %xmm9,%xmm5 - movdqa %xmm13,%xmm9 - movdqa %xmm2,%xmm13 - movdqa %xmm6,%xmm2 - movdqa %xmm10,%xmm6 - movdqa %xmm14,%xmm10 - jmp 1b - jmp open_sse_tail_16 - - - - - - -.globl _chacha20_poly1305_seal -.private_extern _chacha20_poly1305_seal - -.p2align 6 -_chacha20_poly1305_seal: - - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - - pushq %r9 - - subq $288 + 32,%rsp - - - - - - - - leaq 32(%rsp),%rbp - andq $-32,%rbp - movq 56(%r9),%rbx - addq %rdx,%rbx - movq %rbx,8+32(%rbp) - movq %r8,0+32(%rbp) - movq %rdx,%rbx - - movl _OPENSSL_ia32cap_P+8(%rip),%eax - andl $288,%eax - xorl $288,%eax - jz chacha20_poly1305_seal_avx2 - - cmpq $128,%rbx - jbe seal_sse_128 - - movdqa .chacha20_consts(%rip),%xmm0 - movdqu 0(%r9),%xmm4 - movdqu 16(%r9),%xmm8 - movdqu 32(%r9),%xmm12 - movdqa %xmm0,%xmm1 - movdqa %xmm0,%xmm2 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm5 - movdqa %xmm4,%xmm6 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm9 - movdqa %xmm8,%xmm10 - movdqa %xmm8,%xmm11 - movdqa %xmm12,%xmm15 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,%xmm14 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,%xmm13 - paddd .sse_inc(%rip),%xmm12 - - movdqa %xmm4,48(%rbp) - movdqa %xmm8,64(%rbp) - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - movq $10,%r10 -1: - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - decq %r10 - jnz 1b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - - pand .clamp(%rip),%xmm3 - movdqa %xmm3,0(%rbp) - movdqa %xmm7,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 0(%rdi) - movdqu %xmm6,16 + 0(%rdi) - movdqu %xmm10,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 64(%rdi) - movdqu %xmm5,16 + 64(%rdi) - movdqu %xmm9,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - - cmpq $192,%rbx - ja 1f - movq $128,%rcx - subq $128,%rbx - leaq 128(%rsi),%rsi - jmp seal_sse_128_seal_hash -1: - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm0 - pxor %xmm7,%xmm4 - pxor %xmm11,%xmm8 - pxor %xmm12,%xmm15 - movdqu %xmm0,0 + 128(%rdi) - movdqu %xmm4,16 + 128(%rdi) - movdqu %xmm8,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - - movq $192,%rcx - subq $192,%rbx - leaq 192(%rsi),%rsi - movq $2,%rcx - movq $8,%r8 - cmpq $64,%rbx - jbe seal_sse_tail_64 - cmpq $128,%rbx - jbe seal_sse_tail_128 - cmpq $192,%rbx - jbe seal_sse_tail_192 - -1: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa %xmm0,%xmm3 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa 96(%rbp),%xmm15 - paddd .sse_inc(%rip),%xmm15 - movdqa %xmm15,%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - movdqa %xmm15,144(%rbp) - -2: - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 -.byte 102,15,58,15,255,4 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,12 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - movdqa %xmm8,80(%rbp) - movdqa .rol16(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $20,%xmm8 - pslld $32-20,%xmm4 - pxor %xmm8,%xmm4 - movdqa .rol8(%rip),%xmm8 - paddd %xmm7,%xmm3 - paddd %xmm6,%xmm2 - paddd %xmm5,%xmm1 - paddd %xmm4,%xmm0 - pxor %xmm3,%xmm15 - pxor %xmm2,%xmm14 - pxor %xmm1,%xmm13 - pxor %xmm0,%xmm12 -.byte 102,69,15,56,0,248 -.byte 102,69,15,56,0,240 -.byte 102,69,15,56,0,232 -.byte 102,69,15,56,0,224 - movdqa 80(%rbp),%xmm8 - paddd %xmm15,%xmm11 - paddd %xmm14,%xmm10 - paddd %xmm13,%xmm9 - paddd %xmm12,%xmm8 - pxor %xmm11,%xmm7 - pxor %xmm10,%xmm6 - pxor %xmm9,%xmm5 - pxor %xmm8,%xmm4 - movdqa %xmm8,80(%rbp) - movdqa %xmm7,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm7 - pxor %xmm8,%xmm7 - movdqa %xmm6,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm6 - pxor %xmm8,%xmm6 - movdqa %xmm5,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm5 - pxor %xmm8,%xmm5 - movdqa %xmm4,%xmm8 - psrld $25,%xmm8 - pslld $32-25,%xmm4 - pxor %xmm8,%xmm4 - movdqa 80(%rbp),%xmm8 -.byte 102,15,58,15,255,12 -.byte 102,69,15,58,15,219,8 -.byte 102,69,15,58,15,255,4 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - - leaq 16(%rdi),%rdi - decq %r8 - jge 2b - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi - decq %rcx - jg 2b - paddd .chacha20_consts(%rip),%xmm3 - paddd 48(%rbp),%xmm7 - paddd 64(%rbp),%xmm11 - paddd 144(%rbp),%xmm15 - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - movdqa %xmm14,80(%rbp) - movdqa %xmm14,80(%rbp) - movdqu 0 + 0(%rsi),%xmm14 - pxor %xmm3,%xmm14 - movdqu %xmm14,0 + 0(%rdi) - movdqu 16 + 0(%rsi),%xmm14 - pxor %xmm7,%xmm14 - movdqu %xmm14,16 + 0(%rdi) - movdqu 32 + 0(%rsi),%xmm14 - pxor %xmm11,%xmm14 - movdqu %xmm14,32 + 0(%rdi) - movdqu 48 + 0(%rsi),%xmm14 - pxor %xmm15,%xmm14 - movdqu %xmm14,48 + 0(%rdi) - - movdqa 80(%rbp),%xmm14 - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 64(%rdi) - movdqu %xmm6,16 + 64(%rdi) - movdqu %xmm10,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - movdqu 0 + 128(%rsi),%xmm3 - movdqu 16 + 128(%rsi),%xmm7 - movdqu 32 + 128(%rsi),%xmm11 - movdqu 48 + 128(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 128(%rdi) - movdqu %xmm5,16 + 128(%rdi) - movdqu %xmm9,32 + 128(%rdi) - movdqu %xmm15,48 + 128(%rdi) - - cmpq $256,%rbx - ja 3f - - movq $192,%rcx - subq $192,%rbx - leaq 192(%rsi),%rsi - jmp seal_sse_128_seal_hash -3: - movdqu 0 + 192(%rsi),%xmm3 - movdqu 16 + 192(%rsi),%xmm7 - movdqu 32 + 192(%rsi),%xmm11 - movdqu 48 + 192(%rsi),%xmm15 - pxor %xmm3,%xmm0 - pxor %xmm7,%xmm4 - pxor %xmm11,%xmm8 - pxor %xmm12,%xmm15 - movdqu %xmm0,0 + 192(%rdi) - movdqu %xmm4,16 + 192(%rdi) - movdqu %xmm8,32 + 192(%rdi) - movdqu %xmm15,48 + 192(%rdi) - - leaq 256(%rsi),%rsi - subq $256,%rbx - movq $6,%rcx - movq $4,%r8 - cmpq $192,%rbx - jg 1b - movq %rbx,%rcx - testq %rbx,%rbx - je seal_sse_128_seal_hash - movq $6,%rcx - cmpq $64,%rbx - jg 3f - -seal_sse_tail_64: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa 96(%rbp),%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - - jmp seal_sse_128_seal -3: - cmpq $128,%rbx - jg 3f - -seal_sse_tail_128: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa 96(%rbp),%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - - leaq 16(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 0(%rdi) - movdqu %xmm5,16 + 0(%rdi) - movdqu %xmm9,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - - movq $64,%rcx - subq $64,%rbx - leaq 64(%rsi),%rsi - jmp seal_sse_128_seal_hash -3: - -seal_sse_tail_192: - movdqa .chacha20_consts(%rip),%xmm0 - movdqa 48(%rbp),%xmm4 - movdqa 64(%rbp),%xmm8 - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm5 - movdqa %xmm8,%xmm9 - movdqa %xmm0,%xmm2 - movdqa %xmm4,%xmm6 - movdqa %xmm8,%xmm10 - movdqa 96(%rbp),%xmm14 - paddd .sse_inc(%rip),%xmm14 - movdqa %xmm14,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm13,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,96(%rbp) - movdqa %xmm13,112(%rbp) - movdqa %xmm14,128(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - leaq 16(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - paddd .chacha20_consts(%rip),%xmm2 - paddd 48(%rbp),%xmm6 - paddd 64(%rbp),%xmm10 - paddd 128(%rbp),%xmm14 - paddd .chacha20_consts(%rip),%xmm1 - paddd 48(%rbp),%xmm5 - paddd 64(%rbp),%xmm9 - paddd 112(%rbp),%xmm13 - paddd .chacha20_consts(%rip),%xmm0 - paddd 48(%rbp),%xmm4 - paddd 64(%rbp),%xmm8 - paddd 96(%rbp),%xmm12 - movdqu 0 + 0(%rsi),%xmm3 - movdqu 16 + 0(%rsi),%xmm7 - movdqu 32 + 0(%rsi),%xmm11 - movdqu 48 + 0(%rsi),%xmm15 - pxor %xmm3,%xmm2 - pxor %xmm7,%xmm6 - pxor %xmm11,%xmm10 - pxor %xmm14,%xmm15 - movdqu %xmm2,0 + 0(%rdi) - movdqu %xmm6,16 + 0(%rdi) - movdqu %xmm10,32 + 0(%rdi) - movdqu %xmm15,48 + 0(%rdi) - movdqu 0 + 64(%rsi),%xmm3 - movdqu 16 + 64(%rsi),%xmm7 - movdqu 32 + 64(%rsi),%xmm11 - movdqu 48 + 64(%rsi),%xmm15 - pxor %xmm3,%xmm1 - pxor %xmm7,%xmm5 - pxor %xmm11,%xmm9 - pxor %xmm13,%xmm15 - movdqu %xmm1,0 + 64(%rdi) - movdqu %xmm5,16 + 64(%rdi) - movdqu %xmm9,32 + 64(%rdi) - movdqu %xmm15,48 + 64(%rdi) - - movq $128,%rcx - subq $128,%rbx - leaq 128(%rsi),%rsi - -seal_sse_128_seal_hash: - cmpq $16,%rcx - jb seal_sse_128_seal - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - subq $16,%rcx - leaq 16(%rdi),%rdi - jmp seal_sse_128_seal_hash - -seal_sse_128_seal: - cmpq $16,%rbx - jb seal_sse_tail_16 - subq $16,%rbx - - movdqu 0(%rsi),%xmm3 - pxor %xmm3,%xmm0 - movdqu %xmm0,0(%rdi) - - addq 0(%rdi),%r10 - adcq 8(%rdi),%r11 - adcq $1,%r12 - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movdqa %xmm4,%xmm0 - movdqa %xmm8,%xmm4 - movdqa %xmm12,%xmm8 - movdqa %xmm1,%xmm12 - movdqa %xmm5,%xmm1 - movdqa %xmm9,%xmm5 - movdqa %xmm13,%xmm9 - jmp seal_sse_128_seal - -seal_sse_tail_16: - testq %rbx,%rbx - jz process_blocks_of_extra_in - - movq %rbx,%r8 - movq %rbx,%rcx - leaq -1(%rsi,%rbx), %rsi - pxor %xmm15,%xmm15 -1: - pslldq $1,%xmm15 - pinsrb $0,(%rsi),%xmm15 - leaq -1(%rsi),%rsi - decq %rcx - jne 1b - - - pxor %xmm0,%xmm15 - - - movq %rbx,%rcx - movdqu %xmm15,%xmm0 -2: - pextrb $0,%xmm0,(%rdi) - psrldq $1,%xmm0 - addq $1,%rdi - subq $1,%rcx - jnz 2b - - - - - - - - - movq 288+32(%rsp),%r9 - movq 56(%r9),%r14 - movq 48(%r9),%r13 - testq %r14,%r14 - jz process_partial_block - - movq $16,%r15 - subq %rbx,%r15 - cmpq %r15,%r14 - - jge load_extra_in - movq %r14,%r15 - -load_extra_in: - - - leaq -1(%r13,%r15), %rsi - - - addq %r15,%r13 - subq %r15,%r14 - movq %r13,48(%r9) - movq %r14,56(%r9) - - - - addq %r15,%r8 - - - pxor %xmm11,%xmm11 -3: - pslldq $1,%xmm11 - pinsrb $0,(%rsi),%xmm11 - leaq -1(%rsi),%rsi - subq $1,%r15 - jnz 3b - - - - - movq %rbx,%r15 - -4: - pslldq $1,%xmm11 - subq $1,%r15 - jnz 4b - - - - - leaq .and_masks(%rip),%r15 - shlq $4,%rbx - pand -16(%r15,%rbx), %xmm15 - - - por %xmm11,%xmm15 - - - -.byte 102,77,15,126,253 - pextrq $1,%xmm15,%r14 - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -process_blocks_of_extra_in: - - movq 288+32(%rsp),%r9 - movq 48(%r9),%rsi - movq 56(%r9),%r8 - movq %r8,%rcx - shrq $4,%r8 - -5: - jz process_extra_in_trailer - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rsi),%rsi - subq $1,%r8 - jmp 5b - -process_extra_in_trailer: - andq $15,%rcx - movq %rcx,%rbx - jz do_length_block - leaq -1(%rsi,%rcx), %rsi - -6: - pslldq $1,%xmm15 - pinsrb $0,(%rsi),%xmm15 - leaq -1(%rsi),%rsi - subq $1,%rcx - jnz 6b - -process_partial_block: - - leaq .and_masks(%rip),%r15 - shlq $4,%rbx - pand -16(%r15,%rbx), %xmm15 -.byte 102,77,15,126,253 - pextrq $1,%xmm15,%r14 - addq %r13,%r10 - adcq %r14,%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - -do_length_block: - addq 32(%rbp),%r10 - adcq 8+32(%rbp),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - movq %r10,%r13 - movq %r11,%r14 - movq %r12,%r15 - subq $-5,%r10 - sbbq $-1,%r11 - sbbq $3,%r12 - cmovcq %r13,%r10 - cmovcq %r14,%r11 - cmovcq %r15,%r12 - - addq 0+16(%rbp),%r10 - adcq 8+16(%rbp),%r11 - - addq $288 + 32,%rsp - - popq %r9 - - movq %r10,0(%r9) - movq %r11,8(%r9) - - popq %r15 - - popq %r14 - - popq %r13 - - popq %r12 - - popq %rbx - - popq %rbp - - .byte 0xf3,0xc3 - - -seal_sse_128: - movdqu .chacha20_consts(%rip),%xmm0 - movdqa %xmm0,%xmm1 - movdqa %xmm0,%xmm2 - movdqu 0(%r9),%xmm4 - movdqa %xmm4,%xmm5 - movdqa %xmm4,%xmm6 - movdqu 16(%r9),%xmm8 - movdqa %xmm8,%xmm9 - movdqa %xmm8,%xmm10 - movdqu 32(%r9),%xmm14 - movdqa %xmm14,%xmm12 - paddd .sse_inc(%rip),%xmm12 - movdqa %xmm12,%xmm13 - paddd .sse_inc(%rip),%xmm13 - movdqa %xmm4,%xmm7 - movdqa %xmm8,%xmm11 - movdqa %xmm12,%xmm15 - movq $10,%r10 -1: - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,4 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,12 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,4 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,12 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,4 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,12 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol16(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm4 - pxor %xmm3,%xmm4 - paddd %xmm4,%xmm0 - pxor %xmm0,%xmm12 - pshufb .rol8(%rip),%xmm12 - paddd %xmm12,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,15,228,12 -.byte 102,69,15,58,15,192,8 -.byte 102,69,15,58,15,228,4 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol16(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm5 - pxor %xmm3,%xmm5 - paddd %xmm5,%xmm1 - pxor %xmm1,%xmm13 - pshufb .rol8(%rip),%xmm13 - paddd %xmm13,%xmm9 - pxor %xmm9,%xmm5 - movdqa %xmm5,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm5 - pxor %xmm3,%xmm5 -.byte 102,15,58,15,237,12 -.byte 102,69,15,58,15,201,8 -.byte 102,69,15,58,15,237,4 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol16(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $12,%xmm3 - psrld $20,%xmm6 - pxor %xmm3,%xmm6 - paddd %xmm6,%xmm2 - pxor %xmm2,%xmm14 - pshufb .rol8(%rip),%xmm14 - paddd %xmm14,%xmm10 - pxor %xmm10,%xmm6 - movdqa %xmm6,%xmm3 - pslld $7,%xmm3 - psrld $25,%xmm6 - pxor %xmm3,%xmm6 -.byte 102,15,58,15,246,12 -.byte 102,69,15,58,15,210,8 -.byte 102,69,15,58,15,246,4 - - decq %r10 - jnz 1b - paddd .chacha20_consts(%rip),%xmm0 - paddd .chacha20_consts(%rip),%xmm1 - paddd .chacha20_consts(%rip),%xmm2 - paddd %xmm7,%xmm4 - paddd %xmm7,%xmm5 - paddd %xmm7,%xmm6 - paddd %xmm11,%xmm8 - paddd %xmm11,%xmm9 - paddd %xmm15,%xmm12 - paddd .sse_inc(%rip),%xmm15 - paddd %xmm15,%xmm13 - - pand .clamp(%rip),%xmm2 - movdqa %xmm2,0(%rbp) - movdqa %xmm6,16(%rbp) - - movq %r8,%r8 - call poly_hash_ad_internal - jmp seal_sse_128_seal - - - - -.p2align 6 -chacha20_poly1305_open_avx2: - vzeroupper - vmovdqa .chacha20_consts(%rip),%ymm0 - vbroadcasti128 0(%r9),%ymm4 - vbroadcasti128 16(%r9),%ymm8 - vbroadcasti128 32(%r9),%ymm12 - vpaddd .avx2_init(%rip),%ymm12,%ymm12 - cmpq $192,%rbx - jbe open_avx2_192 - cmpq $320,%rbx - jbe open_avx2_320 - - vmovdqa %ymm4,64(%rbp) - vmovdqa %ymm8,96(%rbp) - vmovdqa %ymm12,160(%rbp) - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - - decq %r10 - jne 1b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - - movq %r8,%r8 - call poly_hash_ad_internal - xorq %rcx,%rcx - -1: - addq 0(%rsi,%rcx), %r10 - adcq 8+0(%rsi,%rcx), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - addq $16,%rcx - cmpq $64,%rcx - jne 1b - - vpxor 0(%rsi),%ymm0,%ymm0 - vpxor 32(%rsi),%ymm4,%ymm4 - vmovdqu %ymm0,0(%rdi) - vmovdqu %ymm4,32(%rdi) - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi - subq $64,%rbx -1: - - cmpq $512,%rbx - jb 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - - xorq %rcx,%rcx -2: - addq 0*8(%rsi,%rcx), %r10 - adcq 8+0*8(%rsi,%rcx), %r11 - adcq $1,%r12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - addq %rax,%r15 - adcq %rdx,%r9 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - addq 2*8(%rsi,%rcx), %r10 - adcq 8+2*8(%rsi,%rcx), %r11 - adcq $1,%r12 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - addq %rax,%r15 - adcq %rdx,%r9 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - addq 4*8(%rsi,%rcx), %r10 - adcq 8+4*8(%rsi,%rcx), %r11 - adcq $1,%r12 - - leaq 48(%rcx),%rcx - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - addq %rax,%r15 - adcq %rdx,%r9 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - cmpq $60*8,%rcx - jne 2b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vmovdqa %ymm0,128(%rbp) - addq 60*8(%rsi),%r10 - adcq 8+60*8(%rsi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - addq 60*8+16(%rsi),%r10 - adcq 8+60*8+16(%rsi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 - vpxor 0+384(%rsi),%ymm3,%ymm3 - vpxor 32+384(%rsi),%ymm0,%ymm0 - vpxor 64+384(%rsi),%ymm4,%ymm4 - vpxor 96+384(%rsi),%ymm8,%ymm8 - vmovdqu %ymm3,0+384(%rdi) - vmovdqu %ymm0,32+384(%rdi) - vmovdqu %ymm4,64+384(%rdi) - vmovdqu %ymm8,96+384(%rdi) - - leaq 512(%rsi),%rsi - leaq 512(%rdi),%rdi - subq $512,%rbx - jmp 1b -3: - testq %rbx,%rbx - vzeroupper - je open_sse_finalize -3: - cmpq $128,%rbx - ja 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - - xorq %r8,%r8 - movq %rbx,%rcx - andq $-16,%rcx - testq %rcx,%rcx - je 2f -1: - addq 0*8(%rsi,%r8), %r10 - adcq 8+0*8(%rsi,%r8), %r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -2: - addq $16,%r8 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - - cmpq %rcx,%r8 - jb 1b - cmpq $160,%r8 - jne 2b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - jmp open_avx2_tail_loop -3: - cmpq $256,%rbx - ja 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - - movq %rbx,128(%rbp) - movq %rbx,%rcx - subq $128,%rcx - shrq $4,%rcx - movq $10,%r8 - cmpq $10,%rcx - cmovgq %r8,%rcx - movq %rsi,%rbx - xorq %r8,%r8 -1: - addq 0(%rbx),%r10 - adcq 8+0(%rbx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rbx),%rbx -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - - incq %r8 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - cmpq %rcx,%r8 - jb 1b - cmpq $10,%r8 - jne 2b - movq %rbx,%r8 - subq %rsi,%rbx - movq %rbx,%rcx - movq 128(%rbp),%rbx -1: - addq $16,%rcx - cmpq %rbx,%rcx - jg 1f - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - jmp 1b -1: - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm1,%ymm1 - vpxor 64+0(%rsi),%ymm5,%ymm5 - vpxor 96+0(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm1,32+0(%rdi) - vmovdqu %ymm5,64+0(%rdi) - vmovdqu %ymm9,96+0(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - leaq 128(%rsi),%rsi - leaq 128(%rdi),%rdi - subq $128,%rbx - jmp open_avx2_tail_loop -3: - cmpq $384,%rbx - ja 3f - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - - movq %rbx,128(%rbp) - movq %rbx,%rcx - subq $256,%rcx - shrq $4,%rcx - addq $6,%rcx - movq $10,%r8 - cmpq $10,%rcx - cmovgq %r8,%rcx - movq %rsi,%rbx - xorq %r8,%r8 -1: - addq 0(%rbx),%r10 - adcq 8+0(%rbx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rbx),%rbx -2: - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - addq 0(%rbx),%r10 - adcq 8+0(%rbx),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rbx),%rbx - incq %r8 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - - cmpq %rcx,%r8 - jb 1b - cmpq $10,%r8 - jne 2b - movq %rbx,%r8 - subq %rsi,%rbx - movq %rbx,%rcx - movq 128(%rbp),%rbx -1: - addq $16,%rcx - cmpq %rbx,%rcx - jg 1f - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - jmp 1b -1: - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm2,%ymm2 - vpxor 64+0(%rsi),%ymm6,%ymm6 - vpxor 96+0(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm2,32+0(%rdi) - vmovdqu %ymm6,64+0(%rdi) - vmovdqu %ymm10,96+0(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm1,%ymm1 - vpxor 64+128(%rsi),%ymm5,%ymm5 - vpxor 96+128(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm1,32+128(%rdi) - vmovdqu %ymm5,64+128(%rdi) - vmovdqu %ymm9,96+128(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - leaq 256(%rsi),%rsi - leaq 256(%rdi),%rdi - subq $256,%rbx - jmp open_avx2_tail_loop -3: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - - xorq %rcx,%rcx - movq %rsi,%r8 -1: - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 -2: - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - addq 16(%r8),%r10 - adcq 8+16(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%r8),%r8 - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - incq %rcx - cmpq $4,%rcx - jl 1b - cmpq $10,%rcx - jne 2b - movq %rbx,%rcx - subq $384,%rcx - andq $-16,%rcx -1: - testq %rcx,%rcx - je 1f - addq 0(%r8),%r10 - adcq 8+0(%r8),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%r8),%r8 - subq $16,%rcx - jmp 1b -1: - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vmovdqa %ymm0,128(%rbp) - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - leaq 384(%rsi),%rsi - leaq 384(%rdi),%rdi - subq $384,%rbx -open_avx2_tail_loop: - cmpq $32,%rbx - jb open_avx2_tail - subq $32,%rbx - vpxor (%rsi),%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - leaq 32(%rsi),%rsi - leaq 32(%rdi),%rdi - vmovdqa %ymm4,%ymm0 - vmovdqa %ymm8,%ymm4 - vmovdqa %ymm12,%ymm8 - jmp open_avx2_tail_loop -open_avx2_tail: - cmpq $16,%rbx - vmovdqa %xmm0,%xmm1 - jb 1f - subq $16,%rbx - - vpxor (%rsi),%xmm0,%xmm1 - vmovdqu %xmm1,(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - vperm2i128 $0x11,%ymm0,%ymm0,%ymm0 - vmovdqa %xmm0,%xmm1 -1: - vzeroupper - jmp open_sse_tail_16 - -open_avx2_192: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vmovdqa %ymm12,%ymm11 - vmovdqa %ymm13,%ymm15 - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - - decq %r10 - jne 1b - vpaddd %ymm2,%ymm0,%ymm0 - vpaddd %ymm2,%ymm1,%ymm1 - vpaddd %ymm6,%ymm4,%ymm4 - vpaddd %ymm6,%ymm5,%ymm5 - vpaddd %ymm10,%ymm8,%ymm8 - vpaddd %ymm10,%ymm9,%ymm9 - vpaddd %ymm11,%ymm12,%ymm12 - vpaddd %ymm15,%ymm13,%ymm13 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 -open_avx2_short: - movq %r8,%r8 - call poly_hash_ad_internal -open_avx2_hash_and_xor_loop: - cmpq $32,%rbx - jb open_avx2_short_tail_32 - subq $32,%rbx - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - addq 16(%rsi),%r10 - adcq 8+16(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - - vpxor (%rsi),%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - leaq 32(%rsi),%rsi - leaq 32(%rdi),%rdi - - vmovdqa %ymm4,%ymm0 - vmovdqa %ymm8,%ymm4 - vmovdqa %ymm12,%ymm8 - vmovdqa %ymm1,%ymm12 - vmovdqa %ymm5,%ymm1 - vmovdqa %ymm9,%ymm5 - vmovdqa %ymm13,%ymm9 - vmovdqa %ymm2,%ymm13 - vmovdqa %ymm6,%ymm2 - jmp open_avx2_hash_and_xor_loop -open_avx2_short_tail_32: - cmpq $16,%rbx - vmovdqa %xmm0,%xmm1 - jb 1f - subq $16,%rbx - addq 0(%rsi),%r10 - adcq 8+0(%rsi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - vpxor (%rsi),%xmm0,%xmm3 - vmovdqu %xmm3,(%rdi) - leaq 16(%rsi),%rsi - leaq 16(%rdi),%rdi - vextracti128 $1,%ymm0,%xmm1 -1: - vzeroupper - jmp open_sse_tail_16 - -open_avx2_320: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vpaddd .avx2_inc(%rip),%ymm13,%ymm14 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - decq %r10 - jne 1b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd %ymm7,%ymm4,%ymm4 - vpaddd %ymm7,%ymm5,%ymm5 - vpaddd %ymm7,%ymm6,%ymm6 - vpaddd %ymm11,%ymm8,%ymm8 - vpaddd %ymm11,%ymm9,%ymm9 - vpaddd %ymm11,%ymm10,%ymm10 - vpaddd 160(%rbp),%ymm12,%ymm12 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd 224(%rbp),%ymm14,%ymm14 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 - jmp open_avx2_short - - - - -.p2align 6 -chacha20_poly1305_seal_avx2: - vzeroupper - vmovdqa .chacha20_consts(%rip),%ymm0 - vbroadcasti128 0(%r9),%ymm4 - vbroadcasti128 16(%r9),%ymm8 - vbroadcasti128 32(%r9),%ymm12 - vpaddd .avx2_init(%rip),%ymm12,%ymm12 - cmpq $192,%rbx - jbe seal_avx2_192 - cmpq $320,%rbx - jbe seal_avx2_320 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm4,64(%rbp) - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm8,%ymm11 - vmovdqa %ymm8,96(%rbp) - vmovdqa %ymm12,%ymm15 - vpaddd .avx2_inc(%rip),%ymm15,%ymm14 - vpaddd .avx2_inc(%rip),%ymm14,%ymm13 - vpaddd .avx2_inc(%rip),%ymm13,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm15,256(%rbp) - movq $10,%r10 -1: - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - decq %r10 - jnz 1b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vperm2i128 $0x02,%ymm3,%ymm7,%ymm15 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm3 - vpand .clamp(%rip),%ymm15,%ymm15 - vmovdqa %ymm15,0(%rbp) - movq %r8,%r8 - call poly_hash_ad_internal - - vpxor 0(%rsi),%ymm3,%ymm3 - vpxor 32(%rsi),%ymm11,%ymm11 - vmovdqu %ymm3,0(%rdi) - vmovdqu %ymm11,32(%rdi) - vperm2i128 $0x02,%ymm2,%ymm6,%ymm15 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+64(%rsi),%ymm15,%ymm15 - vpxor 32+64(%rsi),%ymm2,%ymm2 - vpxor 64+64(%rsi),%ymm6,%ymm6 - vpxor 96+64(%rsi),%ymm10,%ymm10 - vmovdqu %ymm15,0+64(%rdi) - vmovdqu %ymm2,32+64(%rdi) - vmovdqu %ymm6,64+64(%rdi) - vmovdqu %ymm10,96+64(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm15 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+192(%rsi),%ymm15,%ymm15 - vpxor 32+192(%rsi),%ymm1,%ymm1 - vpxor 64+192(%rsi),%ymm5,%ymm5 - vpxor 96+192(%rsi),%ymm9,%ymm9 - vmovdqu %ymm15,0+192(%rdi) - vmovdqu %ymm1,32+192(%rdi) - vmovdqu %ymm5,64+192(%rdi) - vmovdqu %ymm9,96+192(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm15 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm15,%ymm8 - - leaq 320(%rsi),%rsi - subq $320,%rbx - movq $320,%rcx - cmpq $128,%rbx - jbe seal_avx2_hash - vpxor 0(%rsi),%ymm0,%ymm0 - vpxor 32(%rsi),%ymm4,%ymm4 - vpxor 64(%rsi),%ymm8,%ymm8 - vpxor 96(%rsi),%ymm12,%ymm12 - vmovdqu %ymm0,320(%rdi) - vmovdqu %ymm4,352(%rdi) - vmovdqu %ymm8,384(%rdi) - vmovdqu %ymm12,416(%rdi) - leaq 128(%rsi),%rsi - subq $128,%rbx - movq $8,%rcx - movq $2,%r8 - cmpq $128,%rbx - jbe seal_avx2_tail_128 - cmpq $256,%rbx - jbe seal_avx2_tail_256 - cmpq $384,%rbx - jbe seal_avx2_tail_384 - cmpq $512,%rbx - jbe seal_avx2_tail_512 - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - - subq $16,%rdi - movq $9,%rcx - jmp 4f -1: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - - movq $10,%rcx -2: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - addq %rax,%r15 - adcq %rdx,%r9 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - -4: - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - addq %rax,%r15 - adcq %rdx,%r9 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - addq 32(%rdi),%r10 - adcq 8+32(%rdi),%r11 - adcq $1,%r12 - - leaq 48(%rdi),%rdi - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - addq %rax,%r15 - adcq %rdx,%r9 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - decq %rcx - jne 2b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - leaq 32(%rdi),%rdi - vmovdqa %ymm0,128(%rbp) - addq -32(%rdi),%r10 - adcq 8+-32(%rdi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - addq -16(%rdi),%r10 - adcq 8+-16(%rdi),%r11 - adcq $1,%r12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 - vpxor 0+384(%rsi),%ymm3,%ymm3 - vpxor 32+384(%rsi),%ymm0,%ymm0 - vpxor 64+384(%rsi),%ymm4,%ymm4 - vpxor 96+384(%rsi),%ymm8,%ymm8 - vmovdqu %ymm3,0+384(%rdi) - vmovdqu %ymm0,32+384(%rdi) - vmovdqu %ymm4,64+384(%rdi) - vmovdqu %ymm8,96+384(%rdi) - - leaq 512(%rsi),%rsi - subq $512,%rbx - cmpq $512,%rbx - jg 1b - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - movq $10,%rcx - xorq %r8,%r8 - cmpq $128,%rbx - ja 3f - -seal_avx2_tail_128: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - jmp seal_avx2_short_loop -3: - cmpq $256,%rbx - ja 3f - -seal_avx2_tail_256: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm1,%ymm1 - vpxor 64+0(%rsi),%ymm5,%ymm5 - vpxor 96+0(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm1,32+0(%rdi) - vmovdqu %ymm5,64+0(%rdi) - vmovdqu %ymm9,96+0(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - movq $128,%rcx - leaq 128(%rsi),%rsi - subq $128,%rbx - jmp seal_avx2_hash -3: - cmpq $384,%rbx - ja seal_avx2_tail_512 - -seal_avx2_tail_384: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+0(%rsi),%ymm3,%ymm3 - vpxor 32+0(%rsi),%ymm2,%ymm2 - vpxor 64+0(%rsi),%ymm6,%ymm6 - vpxor 96+0(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+0(%rdi) - vmovdqu %ymm2,32+0(%rdi) - vmovdqu %ymm6,64+0(%rdi) - vmovdqu %ymm10,96+0(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm1,%ymm1 - vpxor 64+128(%rsi),%ymm5,%ymm5 - vpxor 96+128(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm1,32+128(%rdi) - vmovdqu %ymm5,64+128(%rdi) - vmovdqu %ymm9,96+128(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - movq $256,%rcx - leaq 256(%rsi),%rsi - subq $256,%rbx - jmp seal_avx2_hash - -seal_avx2_tail_512: - vmovdqa .chacha20_consts(%rip),%ymm0 - vmovdqa 64(%rbp),%ymm4 - vmovdqa 96(%rbp),%ymm8 - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm10 - vmovdqa %ymm0,%ymm3 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa .avx2_inc(%rip),%ymm12 - vpaddd 160(%rbp),%ymm12,%ymm15 - vpaddd %ymm15,%ymm12,%ymm14 - vpaddd %ymm14,%ymm12,%ymm13 - vpaddd %ymm13,%ymm12,%ymm12 - vmovdqa %ymm15,256(%rbp) - vmovdqa %ymm14,224(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm12,160(%rbp) - -1: - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - addq %rax,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi -2: - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $4,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $12,%ymm15,%ymm15,%ymm15 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - addq %rax,%r15 - adcq %rdx,%r9 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vmovdqa %ymm8,128(%rbp) - vmovdqa .rol16(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $20,%ymm7,%ymm8 - vpslld $32-20,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $20,%ymm6,%ymm8 - vpslld $32-20,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $20,%ymm5,%ymm8 - vpslld $32-20,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $20,%ymm4,%ymm8 - vpslld $32-20,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - vmovdqa .rol8(%rip),%ymm8 - vpaddd %ymm7,%ymm3,%ymm3 - vpaddd %ymm6,%ymm2,%ymm2 - vpaddd %ymm5,%ymm1,%ymm1 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm3,%ymm15,%ymm15 - vpxor %ymm2,%ymm14,%ymm14 - vpxor %ymm1,%ymm13,%ymm13 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb %ymm8,%ymm15,%ymm15 - vpshufb %ymm8,%ymm14,%ymm14 - vpshufb %ymm8,%ymm13,%ymm13 - vpshufb %ymm8,%ymm12,%ymm12 - vmovdqa 128(%rbp),%ymm8 - vpaddd %ymm15,%ymm11,%ymm11 - vpaddd %ymm14,%ymm10,%ymm10 - vpaddd %ymm13,%ymm9,%ymm9 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm11,%ymm7,%ymm7 - vpxor %ymm10,%ymm6,%ymm6 - movq 0+0(%rbp),%rdx - movq %rdx,%r15 - mulxq %r10,%r13,%r14 - mulxq %r11,%rax,%rdx - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - vpxor %ymm9,%ymm5,%ymm5 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa %ymm8,128(%rbp) - vpsrld $25,%ymm7,%ymm8 - vpslld $32-25,%ymm7,%ymm7 - vpxor %ymm8,%ymm7,%ymm7 - vpsrld $25,%ymm6,%ymm8 - vpslld $32-25,%ymm6,%ymm6 - vpxor %ymm8,%ymm6,%ymm6 - vpsrld $25,%ymm5,%ymm8 - vpslld $32-25,%ymm5,%ymm5 - vpxor %ymm8,%ymm5,%ymm5 - vpsrld $25,%ymm4,%ymm8 - vpslld $32-25,%ymm4,%ymm4 - vpxor %ymm8,%ymm4,%ymm4 - vmovdqa 128(%rbp),%ymm8 - vpalignr $12,%ymm7,%ymm7,%ymm7 - vpalignr $8,%ymm11,%ymm11,%ymm11 - vpalignr $4,%ymm15,%ymm15,%ymm15 - vpalignr $12,%ymm6,%ymm6,%ymm6 - movq 8+0(%rbp),%rdx - mulxq %r10,%r10,%rax - addq %r10,%r14 - mulxq %r11,%r11,%r9 - adcq %r11,%r15 - adcq $0,%r9 - imulq %r12,%rdx - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm12,%ymm12,%ymm12 - - - - - - - - - - - - - addq %rax,%r15 - adcq %rdx,%r9 - - - - - - - - - - - - - - - - - - - - - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - decq %rcx - jg 1b - decq %r8 - jge 2b - vpaddd .chacha20_consts(%rip),%ymm3,%ymm3 - vpaddd 64(%rbp),%ymm7,%ymm7 - vpaddd 96(%rbp),%ymm11,%ymm11 - vpaddd 256(%rbp),%ymm15,%ymm15 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd 64(%rbp),%ymm6,%ymm6 - vpaddd 96(%rbp),%ymm10,%ymm10 - vpaddd 224(%rbp),%ymm14,%ymm14 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd 64(%rbp),%ymm5,%ymm5 - vpaddd 96(%rbp),%ymm9,%ymm9 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd 64(%rbp),%ymm4,%ymm4 - vpaddd 96(%rbp),%ymm8,%ymm8 - vpaddd 160(%rbp),%ymm12,%ymm12 - - vmovdqa %ymm0,128(%rbp) - vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 - vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 - vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 - vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 - vpxor 0+0(%rsi),%ymm0,%ymm0 - vpxor 32+0(%rsi),%ymm3,%ymm3 - vpxor 64+0(%rsi),%ymm7,%ymm7 - vpxor 96+0(%rsi),%ymm11,%ymm11 - vmovdqu %ymm0,0+0(%rdi) - vmovdqu %ymm3,32+0(%rdi) - vmovdqu %ymm7,64+0(%rdi) - vmovdqu %ymm11,96+0(%rdi) - - vmovdqa 128(%rbp),%ymm0 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 - vpxor 0+128(%rsi),%ymm3,%ymm3 - vpxor 32+128(%rsi),%ymm2,%ymm2 - vpxor 64+128(%rsi),%ymm6,%ymm6 - vpxor 96+128(%rsi),%ymm10,%ymm10 - vmovdqu %ymm3,0+128(%rdi) - vmovdqu %ymm2,32+128(%rdi) - vmovdqu %ymm6,64+128(%rdi) - vmovdqu %ymm10,96+128(%rdi) - vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 - vpxor 0+256(%rsi),%ymm3,%ymm3 - vpxor 32+256(%rsi),%ymm1,%ymm1 - vpxor 64+256(%rsi),%ymm5,%ymm5 - vpxor 96+256(%rsi),%ymm9,%ymm9 - vmovdqu %ymm3,0+256(%rdi) - vmovdqu %ymm1,32+256(%rdi) - vmovdqu %ymm5,64+256(%rdi) - vmovdqu %ymm9,96+256(%rdi) - vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 - vmovdqa %ymm3,%ymm8 - - movq $384,%rcx - leaq 384(%rsi),%rsi - subq $384,%rbx - jmp seal_avx2_hash - -seal_avx2_320: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vpaddd .avx2_inc(%rip),%ymm13,%ymm14 - vmovdqa %ymm4,%ymm7 - vmovdqa %ymm8,%ymm11 - vmovdqa %ymm12,160(%rbp) - vmovdqa %ymm13,192(%rbp) - vmovdqa %ymm14,224(%rbp) - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $12,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $4,%ymm6,%ymm6,%ymm6 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol16(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpsrld $20,%ymm6,%ymm3 - vpslld $12,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpaddd %ymm6,%ymm2,%ymm2 - vpxor %ymm2,%ymm14,%ymm14 - vpshufb .rol8(%rip),%ymm14,%ymm14 - vpaddd %ymm14,%ymm10,%ymm10 - vpxor %ymm10,%ymm6,%ymm6 - vpslld $7,%ymm6,%ymm3 - vpsrld $25,%ymm6,%ymm6 - vpxor %ymm3,%ymm6,%ymm6 - vpalignr $4,%ymm14,%ymm14,%ymm14 - vpalignr $8,%ymm10,%ymm10,%ymm10 - vpalignr $12,%ymm6,%ymm6,%ymm6 - - decq %r10 - jne 1b - vpaddd .chacha20_consts(%rip),%ymm0,%ymm0 - vpaddd .chacha20_consts(%rip),%ymm1,%ymm1 - vpaddd .chacha20_consts(%rip),%ymm2,%ymm2 - vpaddd %ymm7,%ymm4,%ymm4 - vpaddd %ymm7,%ymm5,%ymm5 - vpaddd %ymm7,%ymm6,%ymm6 - vpaddd %ymm11,%ymm8,%ymm8 - vpaddd %ymm11,%ymm9,%ymm9 - vpaddd %ymm11,%ymm10,%ymm10 - vpaddd 160(%rbp),%ymm12,%ymm12 - vpaddd 192(%rbp),%ymm13,%ymm13 - vpaddd 224(%rbp),%ymm14,%ymm14 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 - vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 - vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 - vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 - vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 - jmp seal_avx2_short - -seal_avx2_192: - vmovdqa %ymm0,%ymm1 - vmovdqa %ymm0,%ymm2 - vmovdqa %ymm4,%ymm5 - vmovdqa %ymm4,%ymm6 - vmovdqa %ymm8,%ymm9 - vmovdqa %ymm8,%ymm10 - vpaddd .avx2_inc(%rip),%ymm12,%ymm13 - vmovdqa %ymm12,%ymm11 - vmovdqa %ymm13,%ymm15 - movq $10,%r10 -1: - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $12,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $4,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $12,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $4,%ymm5,%ymm5,%ymm5 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol16(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpsrld $20,%ymm4,%ymm3 - vpslld $12,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpaddd %ymm4,%ymm0,%ymm0 - vpxor %ymm0,%ymm12,%ymm12 - vpshufb .rol8(%rip),%ymm12,%ymm12 - vpaddd %ymm12,%ymm8,%ymm8 - vpxor %ymm8,%ymm4,%ymm4 - vpslld $7,%ymm4,%ymm3 - vpsrld $25,%ymm4,%ymm4 - vpxor %ymm3,%ymm4,%ymm4 - vpalignr $4,%ymm12,%ymm12,%ymm12 - vpalignr $8,%ymm8,%ymm8,%ymm8 - vpalignr $12,%ymm4,%ymm4,%ymm4 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol16(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpsrld $20,%ymm5,%ymm3 - vpslld $12,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpaddd %ymm5,%ymm1,%ymm1 - vpxor %ymm1,%ymm13,%ymm13 - vpshufb .rol8(%rip),%ymm13,%ymm13 - vpaddd %ymm13,%ymm9,%ymm9 - vpxor %ymm9,%ymm5,%ymm5 - vpslld $7,%ymm5,%ymm3 - vpsrld $25,%ymm5,%ymm5 - vpxor %ymm3,%ymm5,%ymm5 - vpalignr $4,%ymm13,%ymm13,%ymm13 - vpalignr $8,%ymm9,%ymm9,%ymm9 - vpalignr $12,%ymm5,%ymm5,%ymm5 - - decq %r10 - jne 1b - vpaddd %ymm2,%ymm0,%ymm0 - vpaddd %ymm2,%ymm1,%ymm1 - vpaddd %ymm6,%ymm4,%ymm4 - vpaddd %ymm6,%ymm5,%ymm5 - vpaddd %ymm10,%ymm8,%ymm8 - vpaddd %ymm10,%ymm9,%ymm9 - vpaddd %ymm11,%ymm12,%ymm12 - vpaddd %ymm15,%ymm13,%ymm13 - vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 - - vpand .clamp(%rip),%ymm3,%ymm3 - vmovdqa %ymm3,0(%rbp) - - vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 - vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 - vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 - vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 - vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 - vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 -seal_avx2_short: - movq %r8,%r8 - call poly_hash_ad_internal - xorq %rcx,%rcx -seal_avx2_hash: - cmpq $16,%rcx - jb seal_avx2_short_loop - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - subq $16,%rcx - addq $16,%rdi - jmp seal_avx2_hash -seal_avx2_short_loop: - cmpq $32,%rbx - jb seal_avx2_short_tail - subq $32,%rbx - - vpxor (%rsi),%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - leaq 32(%rsi),%rsi - - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - addq 16(%rdi),%r10 - adcq 8+16(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 32(%rdi),%rdi - - vmovdqa %ymm4,%ymm0 - vmovdqa %ymm8,%ymm4 - vmovdqa %ymm12,%ymm8 - vmovdqa %ymm1,%ymm12 - vmovdqa %ymm5,%ymm1 - vmovdqa %ymm9,%ymm5 - vmovdqa %ymm13,%ymm9 - vmovdqa %ymm2,%ymm13 - vmovdqa %ymm6,%ymm2 - jmp seal_avx2_short_loop -seal_avx2_short_tail: - cmpq $16,%rbx - jb 1f - subq $16,%rbx - vpxor (%rsi),%xmm0,%xmm3 - vmovdqu %xmm3,(%rdi) - leaq 16(%rsi),%rsi - addq 0(%rdi),%r10 - adcq 8+0(%rdi),%r11 - adcq $1,%r12 - movq 0+0(%rbp),%rax - movq %rax,%r15 - mulq %r10 - movq %rax,%r13 - movq %rdx,%r14 - movq 0+0(%rbp),%rax - mulq %r11 - imulq %r12,%r15 - addq %rax,%r14 - adcq %rdx,%r15 - movq 8+0(%rbp),%rax - movq %rax,%r9 - mulq %r10 - addq %rax,%r14 - adcq $0,%rdx - movq %rdx,%r10 - movq 8+0(%rbp),%rax - mulq %r11 - addq %rax,%r15 - adcq $0,%rdx - imulq %r12,%r9 - addq %r10,%r15 - adcq %rdx,%r9 - movq %r13,%r10 - movq %r14,%r11 - movq %r15,%r12 - andq $3,%r12 - movq %r15,%r13 - andq $-4,%r13 - movq %r9,%r14 - shrdq $2,%r9,%r15 - shrq $2,%r9 - addq %r13,%r10 - adcq %r14,%r11 - adcq $0,%r12 - addq %r15,%r10 - adcq %r9,%r11 - adcq $0,%r12 - - leaq 16(%rdi),%rdi - vextracti128 $1,%ymm0,%xmm0 -1: - vzeroupper - jmp seal_sse_tail_16 - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aes-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aes-x86_64.S deleted file mode 100644 index 8875d0abbb..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aes-x86_64.S +++ /dev/null @@ -1,2645 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.p2align 4 -_x86_64_AES_encrypt: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - - movl 240(%r15),%r13d - subl $1,%r13d - jmp L$enc_loop -.p2align 4 -L$enc_loop: - - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movl 0(%r14,%rsi,8),%r10d - movl 0(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r12d - - movzbl %bh,%esi - movzbl %ch,%edi - movzbl %dl,%ebp - xorl 3(%r14,%rsi,8),%r10d - xorl 3(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r8d - - movzbl %dh,%esi - shrl $16,%ecx - movzbl %ah,%ebp - xorl 3(%r14,%rsi,8),%r12d - shrl $16,%edx - xorl 3(%r14,%rbp,8),%r8d - - shrl $16,%ebx - leaq 16(%r15),%r15 - shrl $16,%eax - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - xorl 2(%r14,%rsi,8),%r10d - xorl 2(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r12d - - movzbl %dh,%esi - movzbl %ah,%edi - movzbl %bl,%ebp - xorl 1(%r14,%rsi,8),%r10d - xorl 1(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r8d - - movl 12(%r15),%edx - movzbl %bh,%edi - movzbl %ch,%ebp - movl 0(%r15),%eax - xorl 1(%r14,%rdi,8),%r12d - xorl 1(%r14,%rbp,8),%r8d - - movl 4(%r15),%ebx - movl 8(%r15),%ecx - xorl %r10d,%eax - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx - subl $1,%r13d - jnz L$enc_loop - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movzbl 2(%r14,%rsi,8),%r10d - movzbl 2(%r14,%rdi,8),%r11d - movzbl 2(%r14,%rbp,8),%r12d - - movzbl %dl,%esi - movzbl %bh,%edi - movzbl %ch,%ebp - movzbl 2(%r14,%rsi,8),%r8d - movl 0(%r14,%rdi,8),%edi - movl 0(%r14,%rbp,8),%ebp - - andl $0x0000ff00,%edi - andl $0x0000ff00,%ebp - - xorl %edi,%r10d - xorl %ebp,%r11d - shrl $16,%ecx - - movzbl %dh,%esi - movzbl %ah,%edi - shrl $16,%edx - movl 0(%r14,%rsi,8),%esi - movl 0(%r14,%rdi,8),%edi - - andl $0x0000ff00,%esi - andl $0x0000ff00,%edi - shrl $16,%ebx - xorl %esi,%r12d - xorl %edi,%r8d - shrl $16,%eax - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - movl 0(%r14,%rsi,8),%esi - movl 0(%r14,%rdi,8),%edi - movl 0(%r14,%rbp,8),%ebp - - andl $0x00ff0000,%esi - andl $0x00ff0000,%edi - andl $0x00ff0000,%ebp - - xorl %esi,%r10d - xorl %edi,%r11d - xorl %ebp,%r12d - - movzbl %bl,%esi - movzbl %dh,%edi - movzbl %ah,%ebp - movl 0(%r14,%rsi,8),%esi - movl 2(%r14,%rdi,8),%edi - movl 2(%r14,%rbp,8),%ebp - - andl $0x00ff0000,%esi - andl $0xff000000,%edi - andl $0xff000000,%ebp - - xorl %esi,%r8d - xorl %edi,%r10d - xorl %ebp,%r11d - - movzbl %bh,%esi - movzbl %ch,%edi - movl 16+12(%r15),%edx - movl 2(%r14,%rsi,8),%esi - movl 2(%r14,%rdi,8),%edi - movl 16+0(%r15),%eax - - andl $0xff000000,%esi - andl $0xff000000,%edi - - xorl %esi,%r12d - xorl %edi,%r8d - - movl 16+4(%r15),%ebx - movl 16+8(%r15),%ecx - xorl %r10d,%eax - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx -.byte 0xf3,0xc3 - - -.p2align 4 -_x86_64_AES_encrypt_compact: - - leaq 128(%r14),%r8 - movl 0-128(%r8),%edi - movl 32-128(%r8),%ebp - movl 64-128(%r8),%r10d - movl 96-128(%r8),%r11d - movl 128-128(%r8),%edi - movl 160-128(%r8),%ebp - movl 192-128(%r8),%r10d - movl 224-128(%r8),%r11d - jmp L$enc_loop_compact -.p2align 4 -L$enc_loop_compact: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - leaq 16(%r15),%r15 - movzbl %al,%r10d - movzbl %bl,%r11d - movzbl %cl,%r12d - movzbl %dl,%r8d - movzbl %bh,%esi - movzbl %ch,%edi - shrl $16,%ecx - movzbl %dh,%ebp - movzbl (%r14,%r10,1),%r10d - movzbl (%r14,%r11,1),%r11d - movzbl (%r14,%r12,1),%r12d - movzbl (%r14,%r8,1),%r8d - - movzbl (%r14,%rsi,1),%r9d - movzbl %ah,%esi - movzbl (%r14,%rdi,1),%r13d - movzbl %cl,%edi - movzbl (%r14,%rbp,1),%ebp - movzbl (%r14,%rsi,1),%esi - - shll $8,%r9d - shrl $16,%edx - shll $8,%r13d - xorl %r9d,%r10d - shrl $16,%eax - movzbl %dl,%r9d - shrl $16,%ebx - xorl %r13d,%r11d - shll $8,%ebp - movzbl %al,%r13d - movzbl (%r14,%rdi,1),%edi - xorl %ebp,%r12d - - shll $8,%esi - movzbl %bl,%ebp - shll $16,%edi - xorl %esi,%r8d - movzbl (%r14,%r9,1),%r9d - movzbl %dh,%esi - movzbl (%r14,%r13,1),%r13d - xorl %edi,%r10d - - shrl $8,%ecx - movzbl %ah,%edi - shll $16,%r9d - shrl $8,%ebx - shll $16,%r13d - xorl %r9d,%r11d - movzbl (%r14,%rbp,1),%ebp - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rcx,1),%edx - movzbl (%r14,%rbx,1),%ecx - - shll $16,%ebp - xorl %r13d,%r12d - shll $24,%esi - xorl %ebp,%r8d - shll $24,%edi - xorl %esi,%r10d - shll $24,%edx - xorl %edi,%r11d - shll $24,%ecx - movl %r10d,%eax - movl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx - cmpq 16(%rsp),%r15 - je L$enc_compact_done - movl $0x80808080,%r10d - movl $0x80808080,%r11d - andl %eax,%r10d - andl %ebx,%r11d - movl %r10d,%esi - movl %r11d,%edi - shrl $7,%r10d - leal (%rax,%rax,1),%r8d - shrl $7,%r11d - leal (%rbx,%rbx,1),%r9d - subl %r10d,%esi - subl %r11d,%edi - andl $0xfefefefe,%r8d - andl $0xfefefefe,%r9d - andl $0x1b1b1b1b,%esi - andl $0x1b1b1b1b,%edi - movl %eax,%r10d - movl %ebx,%r11d - xorl %esi,%r8d - xorl %edi,%r9d - - xorl %r8d,%eax - xorl %r9d,%ebx - movl $0x80808080,%r12d - roll $24,%eax - movl $0x80808080,%ebp - roll $24,%ebx - andl %ecx,%r12d - andl %edx,%ebp - xorl %r8d,%eax - xorl %r9d,%ebx - movl %r12d,%esi - rorl $16,%r10d - movl %ebp,%edi - rorl $16,%r11d - leal (%rcx,%rcx,1),%r8d - shrl $7,%r12d - xorl %r10d,%eax - shrl $7,%ebp - xorl %r11d,%ebx - rorl $8,%r10d - leal (%rdx,%rdx,1),%r9d - rorl $8,%r11d - subl %r12d,%esi - subl %ebp,%edi - xorl %r10d,%eax - xorl %r11d,%ebx - - andl $0xfefefefe,%r8d - andl $0xfefefefe,%r9d - andl $0x1b1b1b1b,%esi - andl $0x1b1b1b1b,%edi - movl %ecx,%r12d - movl %edx,%ebp - xorl %esi,%r8d - xorl %edi,%r9d - - rorl $16,%r12d - xorl %r8d,%ecx - rorl $16,%ebp - xorl %r9d,%edx - roll $24,%ecx - movl 0(%r14),%esi - roll $24,%edx - xorl %r8d,%ecx - movl 64(%r14),%edi - xorl %r9d,%edx - movl 128(%r14),%r8d - xorl %r12d,%ecx - rorl $8,%r12d - xorl %ebp,%edx - rorl $8,%ebp - xorl %r12d,%ecx - movl 192(%r14),%r9d - xorl %ebp,%edx - jmp L$enc_loop_compact -.p2align 4 -L$enc_compact_done: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx -.byte 0xf3,0xc3 - - -.p2align 4 -.globl _aes_nohw_encrypt -.private_extern _aes_nohw_encrypt - -.private_extern _aes_nohw_encrypt -_aes_nohw_encrypt: - - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - - leaq -63(%rdx),%rcx - andq $-64,%rsp - subq %rsp,%rcx - negq %rcx - andq $0x3c0,%rcx - subq %rcx,%rsp - subq $32,%rsp - - movq %rsi,16(%rsp) - movq %rax,24(%rsp) - -L$enc_prologue: - - movq %rdx,%r15 - movl 240(%r15),%r13d - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - - shll $4,%r13d - leaq (%r15,%r13,1),%rbp - movq %r15,(%rsp) - movq %rbp,8(%rsp) - - - leaq L$AES_Te+2048(%rip),%r14 - leaq 768(%rsp),%rbp - subq %r14,%rbp - andq $0x300,%rbp - leaq (%r14,%rbp,1),%r14 - - call _x86_64_AES_encrypt_compact - - movq 16(%rsp),%r9 - movq 24(%rsp),%rsi - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$enc_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 4 -_x86_64_AES_decrypt: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - - movl 240(%r15),%r13d - subl $1,%r13d - jmp L$dec_loop -.p2align 4 -L$dec_loop: - - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movl 0(%r14,%rsi,8),%r10d - movl 0(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r12d - - movzbl %dh,%esi - movzbl %ah,%edi - movzbl %dl,%ebp - xorl 3(%r14,%rsi,8),%r10d - xorl 3(%r14,%rdi,8),%r11d - movl 0(%r14,%rbp,8),%r8d - - movzbl %bh,%esi - shrl $16,%eax - movzbl %ch,%ebp - xorl 3(%r14,%rsi,8),%r12d - shrl $16,%edx - xorl 3(%r14,%rbp,8),%r8d - - shrl $16,%ebx - leaq 16(%r15),%r15 - shrl $16,%ecx - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - xorl 2(%r14,%rsi,8),%r10d - xorl 2(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r12d - - movzbl %bh,%esi - movzbl %ch,%edi - movzbl %bl,%ebp - xorl 1(%r14,%rsi,8),%r10d - xorl 1(%r14,%rdi,8),%r11d - xorl 2(%r14,%rbp,8),%r8d - - movzbl %dh,%esi - movl 12(%r15),%edx - movzbl %ah,%ebp - xorl 1(%r14,%rsi,8),%r12d - movl 0(%r15),%eax - xorl 1(%r14,%rbp,8),%r8d - - xorl %r10d,%eax - movl 4(%r15),%ebx - movl 8(%r15),%ecx - xorl %r12d,%ecx - xorl %r11d,%ebx - xorl %r8d,%edx - subl $1,%r13d - jnz L$dec_loop - leaq 2048(%r14),%r14 - movzbl %al,%esi - movzbl %bl,%edi - movzbl %cl,%ebp - movzbl (%r14,%rsi,1),%r10d - movzbl (%r14,%rdi,1),%r11d - movzbl (%r14,%rbp,1),%r12d - - movzbl %dl,%esi - movzbl %dh,%edi - movzbl %ah,%ebp - movzbl (%r14,%rsi,1),%r8d - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rbp,1),%ebp - - shll $8,%edi - shll $8,%ebp - - xorl %edi,%r10d - xorl %ebp,%r11d - shrl $16,%edx - - movzbl %bh,%esi - movzbl %ch,%edi - shrl $16,%eax - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - - shll $8,%esi - shll $8,%edi - shrl $16,%ebx - xorl %esi,%r12d - xorl %edi,%r8d - shrl $16,%ecx - - movzbl %cl,%esi - movzbl %dl,%edi - movzbl %al,%ebp - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rbp,1),%ebp - - shll $16,%esi - shll $16,%edi - shll $16,%ebp - - xorl %esi,%r10d - xorl %edi,%r11d - xorl %ebp,%r12d - - movzbl %bl,%esi - movzbl %bh,%edi - movzbl %ch,%ebp - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movzbl (%r14,%rbp,1),%ebp - - shll $16,%esi - shll $24,%edi - shll $24,%ebp - - xorl %esi,%r8d - xorl %edi,%r10d - xorl %ebp,%r11d - - movzbl %dh,%esi - movzbl %ah,%edi - movl 16+12(%r15),%edx - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%edi - movl 16+0(%r15),%eax - - shll $24,%esi - shll $24,%edi - - xorl %esi,%r12d - xorl %edi,%r8d - - movl 16+4(%r15),%ebx - movl 16+8(%r15),%ecx - leaq -2048(%r14),%r14 - xorl %r10d,%eax - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx -.byte 0xf3,0xc3 - - -.p2align 4 -_x86_64_AES_decrypt_compact: - - leaq 128(%r14),%r8 - movl 0-128(%r8),%edi - movl 32-128(%r8),%ebp - movl 64-128(%r8),%r10d - movl 96-128(%r8),%r11d - movl 128-128(%r8),%edi - movl 160-128(%r8),%ebp - movl 192-128(%r8),%r10d - movl 224-128(%r8),%r11d - jmp L$dec_loop_compact - -.p2align 4 -L$dec_loop_compact: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx - leaq 16(%r15),%r15 - movzbl %al,%r10d - movzbl %bl,%r11d - movzbl %cl,%r12d - movzbl %dl,%r8d - movzbl %dh,%esi - movzbl %ah,%edi - shrl $16,%edx - movzbl %bh,%ebp - movzbl (%r14,%r10,1),%r10d - movzbl (%r14,%r11,1),%r11d - movzbl (%r14,%r12,1),%r12d - movzbl (%r14,%r8,1),%r8d - - movzbl (%r14,%rsi,1),%r9d - movzbl %ch,%esi - movzbl (%r14,%rdi,1),%r13d - movzbl (%r14,%rbp,1),%ebp - movzbl (%r14,%rsi,1),%esi - - shrl $16,%ecx - shll $8,%r13d - shll $8,%r9d - movzbl %cl,%edi - shrl $16,%eax - xorl %r9d,%r10d - shrl $16,%ebx - movzbl %dl,%r9d - - shll $8,%ebp - xorl %r13d,%r11d - shll $8,%esi - movzbl %al,%r13d - movzbl (%r14,%rdi,1),%edi - xorl %ebp,%r12d - movzbl %bl,%ebp - - shll $16,%edi - xorl %esi,%r8d - movzbl (%r14,%r9,1),%r9d - movzbl %bh,%esi - movzbl (%r14,%rbp,1),%ebp - xorl %edi,%r10d - movzbl (%r14,%r13,1),%r13d - movzbl %ch,%edi - - shll $16,%ebp - shll $16,%r9d - shll $16,%r13d - xorl %ebp,%r8d - movzbl %dh,%ebp - xorl %r9d,%r11d - shrl $8,%eax - xorl %r13d,%r12d - - movzbl (%r14,%rsi,1),%esi - movzbl (%r14,%rdi,1),%ebx - movzbl (%r14,%rbp,1),%ecx - movzbl (%r14,%rax,1),%edx - - movl %r10d,%eax - shll $24,%esi - shll $24,%ebx - shll $24,%ecx - xorl %esi,%eax - shll $24,%edx - xorl %r11d,%ebx - xorl %r12d,%ecx - xorl %r8d,%edx - cmpq 16(%rsp),%r15 - je L$dec_compact_done - - movq 256+0(%r14),%rsi - shlq $32,%rbx - shlq $32,%rdx - movq 256+8(%r14),%rdi - orq %rbx,%rax - orq %rdx,%rcx - movq 256+16(%r14),%rbp - movq %rsi,%r9 - movq %rsi,%r12 - andq %rax,%r9 - andq %rcx,%r12 - movq %r9,%rbx - movq %r12,%rdx - shrq $7,%r9 - leaq (%rax,%rax,1),%r8 - shrq $7,%r12 - leaq (%rcx,%rcx,1),%r11 - subq %r9,%rbx - subq %r12,%rdx - andq %rdi,%r8 - andq %rdi,%r11 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r8 - xorq %rdx,%r11 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r8,%r10 - andq %r11,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - leaq (%r8,%r8,1),%r9 - shrq $7,%r13 - leaq (%r11,%r11,1),%r12 - subq %r10,%rbx - subq %r13,%rdx - andq %rdi,%r9 - andq %rdi,%r12 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r9 - xorq %rdx,%r12 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r9,%r10 - andq %r12,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - xorq %rax,%r8 - shrq $7,%r13 - xorq %rcx,%r11 - subq %r10,%rbx - subq %r13,%rdx - leaq (%r9,%r9,1),%r10 - leaq (%r12,%r12,1),%r13 - xorq %rax,%r9 - xorq %rcx,%r12 - andq %rdi,%r10 - andq %rdi,%r13 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r10 - xorq %rdx,%r13 - - xorq %r10,%rax - xorq %r13,%rcx - xorq %r10,%r8 - xorq %r13,%r11 - movq %rax,%rbx - movq %rcx,%rdx - xorq %r10,%r9 - shrq $32,%rbx - xorq %r13,%r12 - shrq $32,%rdx - xorq %r8,%r10 - roll $8,%eax - xorq %r11,%r13 - roll $8,%ecx - xorq %r9,%r10 - roll $8,%ebx - xorq %r12,%r13 - - roll $8,%edx - xorl %r10d,%eax - shrq $32,%r10 - xorl %r13d,%ecx - shrq $32,%r13 - xorl %r10d,%ebx - xorl %r13d,%edx - - movq %r8,%r10 - roll $24,%r8d - movq %r11,%r13 - roll $24,%r11d - shrq $32,%r10 - xorl %r8d,%eax - shrq $32,%r13 - xorl %r11d,%ecx - roll $24,%r10d - movq %r9,%r8 - roll $24,%r13d - movq %r12,%r11 - shrq $32,%r8 - xorl %r10d,%ebx - shrq $32,%r11 - xorl %r13d,%edx - - movq 0(%r14),%rsi - roll $16,%r9d - movq 64(%r14),%rdi - roll $16,%r12d - movq 128(%r14),%rbp - roll $16,%r8d - movq 192(%r14),%r10 - xorl %r9d,%eax - roll $16,%r11d - xorl %r12d,%ecx - movq 256(%r14),%r13 - xorl %r8d,%ebx - xorl %r11d,%edx - jmp L$dec_loop_compact -.p2align 4 -L$dec_compact_done: - xorl 0(%r15),%eax - xorl 4(%r15),%ebx - xorl 8(%r15),%ecx - xorl 12(%r15),%edx -.byte 0xf3,0xc3 - - -.p2align 4 -.globl _aes_nohw_decrypt -.private_extern _aes_nohw_decrypt - -.private_extern _aes_nohw_decrypt -_aes_nohw_decrypt: - - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - - leaq -63(%rdx),%rcx - andq $-64,%rsp - subq %rsp,%rcx - negq %rcx - andq $0x3c0,%rcx - subq %rcx,%rsp - subq $32,%rsp - - movq %rsi,16(%rsp) - movq %rax,24(%rsp) - -L$dec_prologue: - - movq %rdx,%r15 - movl 240(%r15),%r13d - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - - shll $4,%r13d - leaq (%r15,%r13,1),%rbp - movq %r15,(%rsp) - movq %rbp,8(%rsp) - - - leaq L$AES_Td+2048(%rip),%r14 - leaq 768(%rsp),%rbp - subq %r14,%rbp - andq $0x300,%rbp - leaq (%r14,%rbp,1),%r14 - shrq $3,%rbp - addq %rbp,%r14 - - call _x86_64_AES_decrypt_compact - - movq 16(%rsp),%r9 - movq 24(%rsp),%rsi - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$dec_epilogue: - .byte 0xf3,0xc3 - - -.p2align 4 -.globl _aes_nohw_set_encrypt_key -.private_extern _aes_nohw_set_encrypt_key - -_aes_nohw_set_encrypt_key: - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $8,%rsp - -L$enc_key_prologue: - - call _x86_64_AES_set_encrypt_key - - movq 40(%rsp),%rbp - - movq 48(%rsp),%rbx - - addq $56,%rsp - -L$enc_key_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 4 -_x86_64_AES_set_encrypt_key: - - movl %esi,%ecx - movq %rdi,%rsi - movq %rdx,%rdi - - testq $-1,%rsi - jz L$badpointer - testq $-1,%rdi - jz L$badpointer - - leaq L$AES_Te(%rip),%rbp - leaq 2048+128(%rbp),%rbp - - - movl 0-128(%rbp),%eax - movl 32-128(%rbp),%ebx - movl 64-128(%rbp),%r8d - movl 96-128(%rbp),%edx - movl 128-128(%rbp),%eax - movl 160-128(%rbp),%ebx - movl 192-128(%rbp),%r8d - movl 224-128(%rbp),%edx - - cmpl $128,%ecx - je L$10rounds - cmpl $192,%ecx - je L$12rounds - cmpl $256,%ecx - je L$14rounds - movq $-2,%rax - jmp L$exit - -L$10rounds: - movq 0(%rsi),%rax - movq 8(%rsi),%rdx - movq %rax,0(%rdi) - movq %rdx,8(%rdi) - - shrq $32,%rdx - xorl %ecx,%ecx - jmp L$10shortcut -.p2align 2 -L$10loop: - movl 0(%rdi),%eax - movl 12(%rdi),%edx -L$10shortcut: - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - - xorl 1024-128(%rbp,%rcx,4),%eax - movl %eax,16(%rdi) - xorl 4(%rdi),%eax - movl %eax,20(%rdi) - xorl 8(%rdi),%eax - movl %eax,24(%rdi) - xorl 12(%rdi),%eax - movl %eax,28(%rdi) - addl $1,%ecx - leaq 16(%rdi),%rdi - cmpl $10,%ecx - jl L$10loop - - movl $10,80(%rdi) - xorq %rax,%rax - jmp L$exit - -L$12rounds: - movq 0(%rsi),%rax - movq 8(%rsi),%rbx - movq 16(%rsi),%rdx - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rdx,16(%rdi) - - shrq $32,%rdx - xorl %ecx,%ecx - jmp L$12shortcut -.p2align 2 -L$12loop: - movl 0(%rdi),%eax - movl 20(%rdi),%edx -L$12shortcut: - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - - xorl 1024-128(%rbp,%rcx,4),%eax - movl %eax,24(%rdi) - xorl 4(%rdi),%eax - movl %eax,28(%rdi) - xorl 8(%rdi),%eax - movl %eax,32(%rdi) - xorl 12(%rdi),%eax - movl %eax,36(%rdi) - - cmpl $7,%ecx - je L$12break - addl $1,%ecx - - xorl 16(%rdi),%eax - movl %eax,40(%rdi) - xorl 20(%rdi),%eax - movl %eax,44(%rdi) - - leaq 24(%rdi),%rdi - jmp L$12loop -L$12break: - movl $12,72(%rdi) - xorq %rax,%rax - jmp L$exit - -L$14rounds: - movq 0(%rsi),%rax - movq 8(%rsi),%rbx - movq 16(%rsi),%rcx - movq 24(%rsi),%rdx - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,16(%rdi) - movq %rdx,24(%rdi) - - shrq $32,%rdx - xorl %ecx,%ecx - jmp L$14shortcut -.p2align 2 -L$14loop: - movl 0(%rdi),%eax - movl 28(%rdi),%edx -L$14shortcut: - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $24,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $8,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $16,%ebx - xorl %ebx,%eax - - xorl 1024-128(%rbp,%rcx,4),%eax - movl %eax,32(%rdi) - xorl 4(%rdi),%eax - movl %eax,36(%rdi) - xorl 8(%rdi),%eax - movl %eax,40(%rdi) - xorl 12(%rdi),%eax - movl %eax,44(%rdi) - - cmpl $6,%ecx - je L$14break - addl $1,%ecx - - movl %eax,%edx - movl 16(%rdi),%eax - movzbl %dl,%esi - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shrl $16,%edx - shll $8,%ebx - movzbl %dl,%esi - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - movzbl %dh,%esi - shll $16,%ebx - xorl %ebx,%eax - - movzbl -128(%rbp,%rsi,1),%ebx - shll $24,%ebx - xorl %ebx,%eax - - movl %eax,48(%rdi) - xorl 20(%rdi),%eax - movl %eax,52(%rdi) - xorl 24(%rdi),%eax - movl %eax,56(%rdi) - xorl 28(%rdi),%eax - movl %eax,60(%rdi) - - leaq 32(%rdi),%rdi - jmp L$14loop -L$14break: - movl $14,48(%rdi) - xorq %rax,%rax - jmp L$exit - -L$badpointer: - movq $-1,%rax -L$exit: -.byte 0xf3,0xc3 - - -.p2align 4 -.globl _aes_nohw_set_decrypt_key -.private_extern _aes_nohw_set_decrypt_key - -_aes_nohw_set_decrypt_key: - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - pushq %rdx - -L$dec_key_prologue: - - call _x86_64_AES_set_encrypt_key - movq (%rsp),%r8 - cmpl $0,%eax - jne L$abort - - movl 240(%r8),%r14d - xorq %rdi,%rdi - leaq (%rdi,%r14,4),%rcx - movq %r8,%rsi - leaq (%r8,%rcx,4),%rdi -.p2align 2 -L$invert: - movq 0(%rsi),%rax - movq 8(%rsi),%rbx - movq 0(%rdi),%rcx - movq 8(%rdi),%rdx - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,0(%rsi) - movq %rdx,8(%rsi) - leaq 16(%rsi),%rsi - leaq -16(%rdi),%rdi - cmpq %rsi,%rdi - jne L$invert - - leaq L$AES_Te+2048+1024(%rip),%rax - - movq 40(%rax),%rsi - movq 48(%rax),%rdi - movq 56(%rax),%rbp - - movq %r8,%r15 - subl $1,%r14d -.p2align 2 -L$permute: - leaq 16(%r15),%r15 - movq 0(%r15),%rax - movq 8(%r15),%rcx - movq %rsi,%r9 - movq %rsi,%r12 - andq %rax,%r9 - andq %rcx,%r12 - movq %r9,%rbx - movq %r12,%rdx - shrq $7,%r9 - leaq (%rax,%rax,1),%r8 - shrq $7,%r12 - leaq (%rcx,%rcx,1),%r11 - subq %r9,%rbx - subq %r12,%rdx - andq %rdi,%r8 - andq %rdi,%r11 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r8 - xorq %rdx,%r11 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r8,%r10 - andq %r11,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - leaq (%r8,%r8,1),%r9 - shrq $7,%r13 - leaq (%r11,%r11,1),%r12 - subq %r10,%rbx - subq %r13,%rdx - andq %rdi,%r9 - andq %rdi,%r12 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r9 - xorq %rdx,%r12 - movq %rsi,%r10 - movq %rsi,%r13 - - andq %r9,%r10 - andq %r12,%r13 - movq %r10,%rbx - movq %r13,%rdx - shrq $7,%r10 - xorq %rax,%r8 - shrq $7,%r13 - xorq %rcx,%r11 - subq %r10,%rbx - subq %r13,%rdx - leaq (%r9,%r9,1),%r10 - leaq (%r12,%r12,1),%r13 - xorq %rax,%r9 - xorq %rcx,%r12 - andq %rdi,%r10 - andq %rdi,%r13 - andq %rbp,%rbx - andq %rbp,%rdx - xorq %rbx,%r10 - xorq %rdx,%r13 - - xorq %r10,%rax - xorq %r13,%rcx - xorq %r10,%r8 - xorq %r13,%r11 - movq %rax,%rbx - movq %rcx,%rdx - xorq %r10,%r9 - shrq $32,%rbx - xorq %r13,%r12 - shrq $32,%rdx - xorq %r8,%r10 - roll $8,%eax - xorq %r11,%r13 - roll $8,%ecx - xorq %r9,%r10 - roll $8,%ebx - xorq %r12,%r13 - - roll $8,%edx - xorl %r10d,%eax - shrq $32,%r10 - xorl %r13d,%ecx - shrq $32,%r13 - xorl %r10d,%ebx - xorl %r13d,%edx - - movq %r8,%r10 - roll $24,%r8d - movq %r11,%r13 - roll $24,%r11d - shrq $32,%r10 - xorl %r8d,%eax - shrq $32,%r13 - xorl %r11d,%ecx - roll $24,%r10d - movq %r9,%r8 - roll $24,%r13d - movq %r12,%r11 - shrq $32,%r8 - xorl %r10d,%ebx - shrq $32,%r11 - xorl %r13d,%edx - - - roll $16,%r9d - - roll $16,%r12d - - roll $16,%r8d - - xorl %r9d,%eax - roll $16,%r11d - xorl %r12d,%ecx - - xorl %r8d,%ebx - xorl %r11d,%edx - movl %eax,0(%r15) - movl %ebx,4(%r15) - movl %ecx,8(%r15) - movl %edx,12(%r15) - subl $1,%r14d - jnz L$permute - - xorq %rax,%rax -L$abort: - movq 8(%rsp),%r15 - - movq 16(%rsp),%r14 - - movq 24(%rsp),%r13 - - movq 32(%rsp),%r12 - - movq 40(%rsp),%rbp - - movq 48(%rsp),%rbx - - addq $56,%rsp - -L$dec_key_epilogue: - .byte 0xf3,0xc3 - - -.p2align 4 -.globl _aes_nohw_cbc_encrypt -.private_extern _aes_nohw_cbc_encrypt - - -.private_extern _aes_nohw_cbc_encrypt -_aes_nohw_cbc_encrypt: - - cmpq $0,%rdx - je L$cbc_epilogue - pushfq - - - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$cbc_prologue: - - cld - movl %r9d,%r9d - - leaq L$AES_Te(%rip),%r14 - leaq L$AES_Td(%rip),%r10 - cmpq $0,%r9 - cmoveq %r10,%r14 - - - leaq _OPENSSL_ia32cap_P(%rip),%r10 - movl (%r10),%r10d - cmpq $512,%rdx - jb L$cbc_slow_prologue - testq $15,%rdx - jnz L$cbc_slow_prologue - btl $28,%r10d - jc L$cbc_slow_prologue - - - leaq -88-248(%rsp),%r15 - andq $-64,%r15 - - - movq %r14,%r10 - leaq 2304(%r14),%r11 - movq %r15,%r12 - andq $0xFFF,%r10 - andq $0xFFF,%r11 - andq $0xFFF,%r12 - - cmpq %r11,%r12 - jb L$cbc_te_break_out - subq %r11,%r12 - subq %r12,%r15 - jmp L$cbc_te_ok -L$cbc_te_break_out: - subq %r10,%r12 - andq $0xFFF,%r12 - addq $320,%r12 - subq %r12,%r15 -.p2align 2 -L$cbc_te_ok: - - xchgq %rsp,%r15 - - - movq %r15,16(%rsp) - -L$cbc_fast_body: - movq %rdi,24(%rsp) - movq %rsi,32(%rsp) - movq %rdx,40(%rsp) - movq %rcx,48(%rsp) - movq %r8,56(%rsp) - movl $0,80+240(%rsp) - movq %r8,%rbp - movq %r9,%rbx - movq %rsi,%r9 - movq %rdi,%r8 - movq %rcx,%r15 - - movl 240(%r15),%eax - - movq %r15,%r10 - subq %r14,%r10 - andq $0xfff,%r10 - cmpq $2304,%r10 - jb L$cbc_do_ecopy - cmpq $4096-248,%r10 - jb L$cbc_skip_ecopy -.p2align 2 -L$cbc_do_ecopy: - movq %r15,%rsi - leaq 80(%rsp),%rdi - leaq 80(%rsp),%r15 - movl $30,%ecx -.long 0x90A548F3 - movl %eax,(%rdi) -L$cbc_skip_ecopy: - movq %r15,0(%rsp) - - movl $18,%ecx -.p2align 2 -L$cbc_prefetch_te: - movq 0(%r14),%r10 - movq 32(%r14),%r11 - movq 64(%r14),%r12 - movq 96(%r14),%r13 - leaq 128(%r14),%r14 - subl $1,%ecx - jnz L$cbc_prefetch_te - leaq -2304(%r14),%r14 - - cmpq $0,%rbx - je L$FAST_DECRYPT - - - movl 0(%rbp),%eax - movl 4(%rbp),%ebx - movl 8(%rbp),%ecx - movl 12(%rbp),%edx - -.p2align 2 -L$cbc_fast_enc_loop: - xorl 0(%r8),%eax - xorl 4(%r8),%ebx - xorl 8(%r8),%ecx - xorl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - - call _x86_64_AES_encrypt - - movq 24(%rsp),%r8 - movq 40(%rsp),%r10 - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - subq $16,%r10 - testq $-16,%r10 - movq %r10,40(%rsp) - jnz L$cbc_fast_enc_loop - movq 56(%rsp),%rbp - movl %eax,0(%rbp) - movl %ebx,4(%rbp) - movl %ecx,8(%rbp) - movl %edx,12(%rbp) - - jmp L$cbc_fast_cleanup - - -.p2align 4 -L$FAST_DECRYPT: - cmpq %r8,%r9 - je L$cbc_fast_dec_in_place - - movq %rbp,64(%rsp) -.p2align 2 -L$cbc_fast_dec_loop: - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - - call _x86_64_AES_decrypt - - movq 64(%rsp),%rbp - movq 24(%rsp),%r8 - movq 40(%rsp),%r10 - xorl 0(%rbp),%eax - xorl 4(%rbp),%ebx - xorl 8(%rbp),%ecx - xorl 12(%rbp),%edx - movq %r8,%rbp - - subq $16,%r10 - movq %r10,40(%rsp) - movq %rbp,64(%rsp) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - jnz L$cbc_fast_dec_loop - movq 56(%rsp),%r12 - movq 0(%rbp),%r10 - movq 8(%rbp),%r11 - movq %r10,0(%r12) - movq %r11,8(%r12) - jmp L$cbc_fast_cleanup - -.p2align 4 -L$cbc_fast_dec_in_place: - movq 0(%rbp),%r10 - movq 8(%rbp),%r11 - movq %r10,0+64(%rsp) - movq %r11,8+64(%rsp) -.p2align 2 -L$cbc_fast_dec_in_place_loop: - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - - call _x86_64_AES_decrypt - - movq 24(%rsp),%r8 - movq 40(%rsp),%r10 - xorl 0+64(%rsp),%eax - xorl 4+64(%rsp),%ebx - xorl 8+64(%rsp),%ecx - xorl 12+64(%rsp),%edx - - movq 0(%r8),%r11 - movq 8(%r8),%r12 - subq $16,%r10 - jz L$cbc_fast_dec_in_place_done - - movq %r11,0+64(%rsp) - movq %r12,8+64(%rsp) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - movq %r10,40(%rsp) - jmp L$cbc_fast_dec_in_place_loop -L$cbc_fast_dec_in_place_done: - movq 56(%rsp),%rdi - movq %r11,0(%rdi) - movq %r12,8(%rdi) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - -.p2align 2 -L$cbc_fast_cleanup: - cmpl $0,80+240(%rsp) - leaq 80(%rsp),%rdi - je L$cbc_exit - movl $30,%ecx - xorq %rax,%rax -.long 0x90AB48F3 - - jmp L$cbc_exit - - -.p2align 4 -L$cbc_slow_prologue: - - - leaq -88(%rsp),%rbp - andq $-64,%rbp - - leaq -88-63(%rcx),%r10 - subq %rbp,%r10 - negq %r10 - andq $0x3c0,%r10 - subq %r10,%rbp - - xchgq %rsp,%rbp - - - movq %rbp,16(%rsp) - -L$cbc_slow_body: - - - - - movq %r8,56(%rsp) - movq %r8,%rbp - movq %r9,%rbx - movq %rsi,%r9 - movq %rdi,%r8 - movq %rcx,%r15 - movq %rdx,%r10 - - movl 240(%r15),%eax - movq %r15,0(%rsp) - shll $4,%eax - leaq (%r15,%rax,1),%rax - movq %rax,8(%rsp) - - - leaq 2048(%r14),%r14 - leaq 768-8(%rsp),%rax - subq %r14,%rax - andq $0x300,%rax - leaq (%r14,%rax,1),%r14 - - cmpq $0,%rbx - je L$SLOW_DECRYPT - - - testq $-16,%r10 - movl 0(%rbp),%eax - movl 4(%rbp),%ebx - movl 8(%rbp),%ecx - movl 12(%rbp),%edx - jz L$cbc_slow_enc_tail - -.p2align 2 -L$cbc_slow_enc_loop: - xorl 0(%r8),%eax - xorl 4(%r8),%ebx - xorl 8(%r8),%ecx - xorl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - movq %r9,32(%rsp) - movq %r10,40(%rsp) - - call _x86_64_AES_encrypt_compact - - movq 24(%rsp),%r8 - movq 32(%rsp),%r9 - movq 40(%rsp),%r10 - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - subq $16,%r10 - testq $-16,%r10 - jnz L$cbc_slow_enc_loop - testq $15,%r10 - jnz L$cbc_slow_enc_tail - movq 56(%rsp),%rbp - movl %eax,0(%rbp) - movl %ebx,4(%rbp) - movl %ecx,8(%rbp) - movl %edx,12(%rbp) - - jmp L$cbc_exit - -.p2align 2 -L$cbc_slow_enc_tail: - movq %rax,%r11 - movq %rcx,%r12 - movq %r10,%rcx - movq %r8,%rsi - movq %r9,%rdi -.long 0x9066A4F3 - movq $16,%rcx - subq %r10,%rcx - xorq %rax,%rax -.long 0x9066AAF3 - movq %r9,%r8 - movq $16,%r10 - movq %r11,%rax - movq %r12,%rcx - jmp L$cbc_slow_enc_loop - -.p2align 4 -L$SLOW_DECRYPT: - shrq $3,%rax - addq %rax,%r14 - - movq 0(%rbp),%r11 - movq 8(%rbp),%r12 - movq %r11,0+64(%rsp) - movq %r12,8+64(%rsp) - -.p2align 2 -L$cbc_slow_dec_loop: - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movq 0(%rsp),%r15 - movq %r8,24(%rsp) - movq %r9,32(%rsp) - movq %r10,40(%rsp) - - call _x86_64_AES_decrypt_compact - - movq 24(%rsp),%r8 - movq 32(%rsp),%r9 - movq 40(%rsp),%r10 - xorl 0+64(%rsp),%eax - xorl 4+64(%rsp),%ebx - xorl 8+64(%rsp),%ecx - xorl 12+64(%rsp),%edx - - movq 0(%r8),%r11 - movq 8(%r8),%r12 - subq $16,%r10 - jc L$cbc_slow_dec_partial - jz L$cbc_slow_dec_done - - movq %r11,0+64(%rsp) - movq %r12,8+64(%rsp) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - leaq 16(%r8),%r8 - leaq 16(%r9),%r9 - jmp L$cbc_slow_dec_loop -L$cbc_slow_dec_done: - movq 56(%rsp),%rdi - movq %r11,0(%rdi) - movq %r12,8(%rdi) - - movl %eax,0(%r9) - movl %ebx,4(%r9) - movl %ecx,8(%r9) - movl %edx,12(%r9) - - jmp L$cbc_exit - -.p2align 2 -L$cbc_slow_dec_partial: - movq 56(%rsp),%rdi - movq %r11,0(%rdi) - movq %r12,8(%rdi) - - movl %eax,0+64(%rsp) - movl %ebx,4+64(%rsp) - movl %ecx,8+64(%rsp) - movl %edx,12+64(%rsp) - - movq %r9,%rdi - leaq 64(%rsp),%rsi - leaq 16(%r10),%rcx -.long 0x9066A4F3 - jmp L$cbc_exit - -.p2align 4 -L$cbc_exit: - movq 16(%rsp),%rsi - - movq (%rsi),%r15 - - movq 8(%rsi),%r14 - - movq 16(%rsi),%r13 - - movq 24(%rsi),%r12 - - movq 32(%rsi),%rbp - - movq 40(%rsi),%rbx - - leaq 48(%rsi),%rsp - -L$cbc_popfq: - popfq - - - -L$cbc_epilogue: - .byte 0xf3,0xc3 - - -.p2align 6 -L$AES_Te: -.long 0xa56363c6,0xa56363c6 -.long 0x847c7cf8,0x847c7cf8 -.long 0x997777ee,0x997777ee -.long 0x8d7b7bf6,0x8d7b7bf6 -.long 0x0df2f2ff,0x0df2f2ff -.long 0xbd6b6bd6,0xbd6b6bd6 -.long 0xb16f6fde,0xb16f6fde -.long 0x54c5c591,0x54c5c591 -.long 0x50303060,0x50303060 -.long 0x03010102,0x03010102 -.long 0xa96767ce,0xa96767ce -.long 0x7d2b2b56,0x7d2b2b56 -.long 0x19fefee7,0x19fefee7 -.long 0x62d7d7b5,0x62d7d7b5 -.long 0xe6abab4d,0xe6abab4d -.long 0x9a7676ec,0x9a7676ec -.long 0x45caca8f,0x45caca8f -.long 0x9d82821f,0x9d82821f -.long 0x40c9c989,0x40c9c989 -.long 0x877d7dfa,0x877d7dfa -.long 0x15fafaef,0x15fafaef -.long 0xeb5959b2,0xeb5959b2 -.long 0xc947478e,0xc947478e -.long 0x0bf0f0fb,0x0bf0f0fb -.long 0xecadad41,0xecadad41 -.long 0x67d4d4b3,0x67d4d4b3 -.long 0xfda2a25f,0xfda2a25f -.long 0xeaafaf45,0xeaafaf45 -.long 0xbf9c9c23,0xbf9c9c23 -.long 0xf7a4a453,0xf7a4a453 -.long 0x967272e4,0x967272e4 -.long 0x5bc0c09b,0x5bc0c09b -.long 0xc2b7b775,0xc2b7b775 -.long 0x1cfdfde1,0x1cfdfde1 -.long 0xae93933d,0xae93933d -.long 0x6a26264c,0x6a26264c -.long 0x5a36366c,0x5a36366c -.long 0x413f3f7e,0x413f3f7e -.long 0x02f7f7f5,0x02f7f7f5 -.long 0x4fcccc83,0x4fcccc83 -.long 0x5c343468,0x5c343468 -.long 0xf4a5a551,0xf4a5a551 -.long 0x34e5e5d1,0x34e5e5d1 -.long 0x08f1f1f9,0x08f1f1f9 -.long 0x937171e2,0x937171e2 -.long 0x73d8d8ab,0x73d8d8ab -.long 0x53313162,0x53313162 -.long 0x3f15152a,0x3f15152a -.long 0x0c040408,0x0c040408 -.long 0x52c7c795,0x52c7c795 -.long 0x65232346,0x65232346 -.long 0x5ec3c39d,0x5ec3c39d -.long 0x28181830,0x28181830 -.long 0xa1969637,0xa1969637 -.long 0x0f05050a,0x0f05050a -.long 0xb59a9a2f,0xb59a9a2f -.long 0x0907070e,0x0907070e -.long 0x36121224,0x36121224 -.long 0x9b80801b,0x9b80801b -.long 0x3de2e2df,0x3de2e2df -.long 0x26ebebcd,0x26ebebcd -.long 0x6927274e,0x6927274e -.long 0xcdb2b27f,0xcdb2b27f -.long 0x9f7575ea,0x9f7575ea -.long 0x1b090912,0x1b090912 -.long 0x9e83831d,0x9e83831d -.long 0x742c2c58,0x742c2c58 -.long 0x2e1a1a34,0x2e1a1a34 -.long 0x2d1b1b36,0x2d1b1b36 -.long 0xb26e6edc,0xb26e6edc -.long 0xee5a5ab4,0xee5a5ab4 -.long 0xfba0a05b,0xfba0a05b -.long 0xf65252a4,0xf65252a4 -.long 0x4d3b3b76,0x4d3b3b76 -.long 0x61d6d6b7,0x61d6d6b7 -.long 0xceb3b37d,0xceb3b37d -.long 0x7b292952,0x7b292952 -.long 0x3ee3e3dd,0x3ee3e3dd -.long 0x712f2f5e,0x712f2f5e -.long 0x97848413,0x97848413 -.long 0xf55353a6,0xf55353a6 -.long 0x68d1d1b9,0x68d1d1b9 -.long 0x00000000,0x00000000 -.long 0x2cededc1,0x2cededc1 -.long 0x60202040,0x60202040 -.long 0x1ffcfce3,0x1ffcfce3 -.long 0xc8b1b179,0xc8b1b179 -.long 0xed5b5bb6,0xed5b5bb6 -.long 0xbe6a6ad4,0xbe6a6ad4 -.long 0x46cbcb8d,0x46cbcb8d -.long 0xd9bebe67,0xd9bebe67 -.long 0x4b393972,0x4b393972 -.long 0xde4a4a94,0xde4a4a94 -.long 0xd44c4c98,0xd44c4c98 -.long 0xe85858b0,0xe85858b0 -.long 0x4acfcf85,0x4acfcf85 -.long 0x6bd0d0bb,0x6bd0d0bb -.long 0x2aefefc5,0x2aefefc5 -.long 0xe5aaaa4f,0xe5aaaa4f -.long 0x16fbfbed,0x16fbfbed -.long 0xc5434386,0xc5434386 -.long 0xd74d4d9a,0xd74d4d9a -.long 0x55333366,0x55333366 -.long 0x94858511,0x94858511 -.long 0xcf45458a,0xcf45458a -.long 0x10f9f9e9,0x10f9f9e9 -.long 0x06020204,0x06020204 -.long 0x817f7ffe,0x817f7ffe -.long 0xf05050a0,0xf05050a0 -.long 0x443c3c78,0x443c3c78 -.long 0xba9f9f25,0xba9f9f25 -.long 0xe3a8a84b,0xe3a8a84b -.long 0xf35151a2,0xf35151a2 -.long 0xfea3a35d,0xfea3a35d -.long 0xc0404080,0xc0404080 -.long 0x8a8f8f05,0x8a8f8f05 -.long 0xad92923f,0xad92923f -.long 0xbc9d9d21,0xbc9d9d21 -.long 0x48383870,0x48383870 -.long 0x04f5f5f1,0x04f5f5f1 -.long 0xdfbcbc63,0xdfbcbc63 -.long 0xc1b6b677,0xc1b6b677 -.long 0x75dadaaf,0x75dadaaf -.long 0x63212142,0x63212142 -.long 0x30101020,0x30101020 -.long 0x1affffe5,0x1affffe5 -.long 0x0ef3f3fd,0x0ef3f3fd -.long 0x6dd2d2bf,0x6dd2d2bf -.long 0x4ccdcd81,0x4ccdcd81 -.long 0x140c0c18,0x140c0c18 -.long 0x35131326,0x35131326 -.long 0x2fececc3,0x2fececc3 -.long 0xe15f5fbe,0xe15f5fbe -.long 0xa2979735,0xa2979735 -.long 0xcc444488,0xcc444488 -.long 0x3917172e,0x3917172e -.long 0x57c4c493,0x57c4c493 -.long 0xf2a7a755,0xf2a7a755 -.long 0x827e7efc,0x827e7efc -.long 0x473d3d7a,0x473d3d7a -.long 0xac6464c8,0xac6464c8 -.long 0xe75d5dba,0xe75d5dba -.long 0x2b191932,0x2b191932 -.long 0x957373e6,0x957373e6 -.long 0xa06060c0,0xa06060c0 -.long 0x98818119,0x98818119 -.long 0xd14f4f9e,0xd14f4f9e -.long 0x7fdcdca3,0x7fdcdca3 -.long 0x66222244,0x66222244 -.long 0x7e2a2a54,0x7e2a2a54 -.long 0xab90903b,0xab90903b -.long 0x8388880b,0x8388880b -.long 0xca46468c,0xca46468c -.long 0x29eeeec7,0x29eeeec7 -.long 0xd3b8b86b,0xd3b8b86b -.long 0x3c141428,0x3c141428 -.long 0x79dedea7,0x79dedea7 -.long 0xe25e5ebc,0xe25e5ebc -.long 0x1d0b0b16,0x1d0b0b16 -.long 0x76dbdbad,0x76dbdbad -.long 0x3be0e0db,0x3be0e0db -.long 0x56323264,0x56323264 -.long 0x4e3a3a74,0x4e3a3a74 -.long 0x1e0a0a14,0x1e0a0a14 -.long 0xdb494992,0xdb494992 -.long 0x0a06060c,0x0a06060c -.long 0x6c242448,0x6c242448 -.long 0xe45c5cb8,0xe45c5cb8 -.long 0x5dc2c29f,0x5dc2c29f -.long 0x6ed3d3bd,0x6ed3d3bd -.long 0xefacac43,0xefacac43 -.long 0xa66262c4,0xa66262c4 -.long 0xa8919139,0xa8919139 -.long 0xa4959531,0xa4959531 -.long 0x37e4e4d3,0x37e4e4d3 -.long 0x8b7979f2,0x8b7979f2 -.long 0x32e7e7d5,0x32e7e7d5 -.long 0x43c8c88b,0x43c8c88b -.long 0x5937376e,0x5937376e -.long 0xb76d6dda,0xb76d6dda -.long 0x8c8d8d01,0x8c8d8d01 -.long 0x64d5d5b1,0x64d5d5b1 -.long 0xd24e4e9c,0xd24e4e9c -.long 0xe0a9a949,0xe0a9a949 -.long 0xb46c6cd8,0xb46c6cd8 -.long 0xfa5656ac,0xfa5656ac -.long 0x07f4f4f3,0x07f4f4f3 -.long 0x25eaeacf,0x25eaeacf -.long 0xaf6565ca,0xaf6565ca -.long 0x8e7a7af4,0x8e7a7af4 -.long 0xe9aeae47,0xe9aeae47 -.long 0x18080810,0x18080810 -.long 0xd5baba6f,0xd5baba6f -.long 0x887878f0,0x887878f0 -.long 0x6f25254a,0x6f25254a -.long 0x722e2e5c,0x722e2e5c -.long 0x241c1c38,0x241c1c38 -.long 0xf1a6a657,0xf1a6a657 -.long 0xc7b4b473,0xc7b4b473 -.long 0x51c6c697,0x51c6c697 -.long 0x23e8e8cb,0x23e8e8cb -.long 0x7cdddda1,0x7cdddda1 -.long 0x9c7474e8,0x9c7474e8 -.long 0x211f1f3e,0x211f1f3e -.long 0xdd4b4b96,0xdd4b4b96 -.long 0xdcbdbd61,0xdcbdbd61 -.long 0x868b8b0d,0x868b8b0d -.long 0x858a8a0f,0x858a8a0f -.long 0x907070e0,0x907070e0 -.long 0x423e3e7c,0x423e3e7c -.long 0xc4b5b571,0xc4b5b571 -.long 0xaa6666cc,0xaa6666cc -.long 0xd8484890,0xd8484890 -.long 0x05030306,0x05030306 -.long 0x01f6f6f7,0x01f6f6f7 -.long 0x120e0e1c,0x120e0e1c -.long 0xa36161c2,0xa36161c2 -.long 0x5f35356a,0x5f35356a -.long 0xf95757ae,0xf95757ae -.long 0xd0b9b969,0xd0b9b969 -.long 0x91868617,0x91868617 -.long 0x58c1c199,0x58c1c199 -.long 0x271d1d3a,0x271d1d3a -.long 0xb99e9e27,0xb99e9e27 -.long 0x38e1e1d9,0x38e1e1d9 -.long 0x13f8f8eb,0x13f8f8eb -.long 0xb398982b,0xb398982b -.long 0x33111122,0x33111122 -.long 0xbb6969d2,0xbb6969d2 -.long 0x70d9d9a9,0x70d9d9a9 -.long 0x898e8e07,0x898e8e07 -.long 0xa7949433,0xa7949433 -.long 0xb69b9b2d,0xb69b9b2d -.long 0x221e1e3c,0x221e1e3c -.long 0x92878715,0x92878715 -.long 0x20e9e9c9,0x20e9e9c9 -.long 0x49cece87,0x49cece87 -.long 0xff5555aa,0xff5555aa -.long 0x78282850,0x78282850 -.long 0x7adfdfa5,0x7adfdfa5 -.long 0x8f8c8c03,0x8f8c8c03 -.long 0xf8a1a159,0xf8a1a159 -.long 0x80898909,0x80898909 -.long 0x170d0d1a,0x170d0d1a -.long 0xdabfbf65,0xdabfbf65 -.long 0x31e6e6d7,0x31e6e6d7 -.long 0xc6424284,0xc6424284 -.long 0xb86868d0,0xb86868d0 -.long 0xc3414182,0xc3414182 -.long 0xb0999929,0xb0999929 -.long 0x772d2d5a,0x772d2d5a -.long 0x110f0f1e,0x110f0f1e -.long 0xcbb0b07b,0xcbb0b07b -.long 0xfc5454a8,0xfc5454a8 -.long 0xd6bbbb6d,0xd6bbbb6d -.long 0x3a16162c,0x3a16162c -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -.long 0x00000001, 0x00000002, 0x00000004, 0x00000008 -.long 0x00000010, 0x00000020, 0x00000040, 0x00000080 -.long 0x0000001b, 0x00000036, 0x80808080, 0x80808080 -.long 0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b -.p2align 6 -L$AES_Td: -.long 0x50a7f451,0x50a7f451 -.long 0x5365417e,0x5365417e -.long 0xc3a4171a,0xc3a4171a -.long 0x965e273a,0x965e273a -.long 0xcb6bab3b,0xcb6bab3b -.long 0xf1459d1f,0xf1459d1f -.long 0xab58faac,0xab58faac -.long 0x9303e34b,0x9303e34b -.long 0x55fa3020,0x55fa3020 -.long 0xf66d76ad,0xf66d76ad -.long 0x9176cc88,0x9176cc88 -.long 0x254c02f5,0x254c02f5 -.long 0xfcd7e54f,0xfcd7e54f -.long 0xd7cb2ac5,0xd7cb2ac5 -.long 0x80443526,0x80443526 -.long 0x8fa362b5,0x8fa362b5 -.long 0x495ab1de,0x495ab1de -.long 0x671bba25,0x671bba25 -.long 0x980eea45,0x980eea45 -.long 0xe1c0fe5d,0xe1c0fe5d -.long 0x02752fc3,0x02752fc3 -.long 0x12f04c81,0x12f04c81 -.long 0xa397468d,0xa397468d -.long 0xc6f9d36b,0xc6f9d36b -.long 0xe75f8f03,0xe75f8f03 -.long 0x959c9215,0x959c9215 -.long 0xeb7a6dbf,0xeb7a6dbf -.long 0xda595295,0xda595295 -.long 0x2d83bed4,0x2d83bed4 -.long 0xd3217458,0xd3217458 -.long 0x2969e049,0x2969e049 -.long 0x44c8c98e,0x44c8c98e -.long 0x6a89c275,0x6a89c275 -.long 0x78798ef4,0x78798ef4 -.long 0x6b3e5899,0x6b3e5899 -.long 0xdd71b927,0xdd71b927 -.long 0xb64fe1be,0xb64fe1be -.long 0x17ad88f0,0x17ad88f0 -.long 0x66ac20c9,0x66ac20c9 -.long 0xb43ace7d,0xb43ace7d -.long 0x184adf63,0x184adf63 -.long 0x82311ae5,0x82311ae5 -.long 0x60335197,0x60335197 -.long 0x457f5362,0x457f5362 -.long 0xe07764b1,0xe07764b1 -.long 0x84ae6bbb,0x84ae6bbb -.long 0x1ca081fe,0x1ca081fe -.long 0x942b08f9,0x942b08f9 -.long 0x58684870,0x58684870 -.long 0x19fd458f,0x19fd458f -.long 0x876cde94,0x876cde94 -.long 0xb7f87b52,0xb7f87b52 -.long 0x23d373ab,0x23d373ab -.long 0xe2024b72,0xe2024b72 -.long 0x578f1fe3,0x578f1fe3 -.long 0x2aab5566,0x2aab5566 -.long 0x0728ebb2,0x0728ebb2 -.long 0x03c2b52f,0x03c2b52f -.long 0x9a7bc586,0x9a7bc586 -.long 0xa50837d3,0xa50837d3 -.long 0xf2872830,0xf2872830 -.long 0xb2a5bf23,0xb2a5bf23 -.long 0xba6a0302,0xba6a0302 -.long 0x5c8216ed,0x5c8216ed -.long 0x2b1ccf8a,0x2b1ccf8a -.long 0x92b479a7,0x92b479a7 -.long 0xf0f207f3,0xf0f207f3 -.long 0xa1e2694e,0xa1e2694e -.long 0xcdf4da65,0xcdf4da65 -.long 0xd5be0506,0xd5be0506 -.long 0x1f6234d1,0x1f6234d1 -.long 0x8afea6c4,0x8afea6c4 -.long 0x9d532e34,0x9d532e34 -.long 0xa055f3a2,0xa055f3a2 -.long 0x32e18a05,0x32e18a05 -.long 0x75ebf6a4,0x75ebf6a4 -.long 0x39ec830b,0x39ec830b -.long 0xaaef6040,0xaaef6040 -.long 0x069f715e,0x069f715e -.long 0x51106ebd,0x51106ebd -.long 0xf98a213e,0xf98a213e -.long 0x3d06dd96,0x3d06dd96 -.long 0xae053edd,0xae053edd -.long 0x46bde64d,0x46bde64d -.long 0xb58d5491,0xb58d5491 -.long 0x055dc471,0x055dc471 -.long 0x6fd40604,0x6fd40604 -.long 0xff155060,0xff155060 -.long 0x24fb9819,0x24fb9819 -.long 0x97e9bdd6,0x97e9bdd6 -.long 0xcc434089,0xcc434089 -.long 0x779ed967,0x779ed967 -.long 0xbd42e8b0,0xbd42e8b0 -.long 0x888b8907,0x888b8907 -.long 0x385b19e7,0x385b19e7 -.long 0xdbeec879,0xdbeec879 -.long 0x470a7ca1,0x470a7ca1 -.long 0xe90f427c,0xe90f427c -.long 0xc91e84f8,0xc91e84f8 -.long 0x00000000,0x00000000 -.long 0x83868009,0x83868009 -.long 0x48ed2b32,0x48ed2b32 -.long 0xac70111e,0xac70111e -.long 0x4e725a6c,0x4e725a6c -.long 0xfbff0efd,0xfbff0efd -.long 0x5638850f,0x5638850f -.long 0x1ed5ae3d,0x1ed5ae3d -.long 0x27392d36,0x27392d36 -.long 0x64d90f0a,0x64d90f0a -.long 0x21a65c68,0x21a65c68 -.long 0xd1545b9b,0xd1545b9b -.long 0x3a2e3624,0x3a2e3624 -.long 0xb1670a0c,0xb1670a0c -.long 0x0fe75793,0x0fe75793 -.long 0xd296eeb4,0xd296eeb4 -.long 0x9e919b1b,0x9e919b1b -.long 0x4fc5c080,0x4fc5c080 -.long 0xa220dc61,0xa220dc61 -.long 0x694b775a,0x694b775a -.long 0x161a121c,0x161a121c -.long 0x0aba93e2,0x0aba93e2 -.long 0xe52aa0c0,0xe52aa0c0 -.long 0x43e0223c,0x43e0223c -.long 0x1d171b12,0x1d171b12 -.long 0x0b0d090e,0x0b0d090e -.long 0xadc78bf2,0xadc78bf2 -.long 0xb9a8b62d,0xb9a8b62d -.long 0xc8a91e14,0xc8a91e14 -.long 0x8519f157,0x8519f157 -.long 0x4c0775af,0x4c0775af -.long 0xbbdd99ee,0xbbdd99ee -.long 0xfd607fa3,0xfd607fa3 -.long 0x9f2601f7,0x9f2601f7 -.long 0xbcf5725c,0xbcf5725c -.long 0xc53b6644,0xc53b6644 -.long 0x347efb5b,0x347efb5b -.long 0x7629438b,0x7629438b -.long 0xdcc623cb,0xdcc623cb -.long 0x68fcedb6,0x68fcedb6 -.long 0x63f1e4b8,0x63f1e4b8 -.long 0xcadc31d7,0xcadc31d7 -.long 0x10856342,0x10856342 -.long 0x40229713,0x40229713 -.long 0x2011c684,0x2011c684 -.long 0x7d244a85,0x7d244a85 -.long 0xf83dbbd2,0xf83dbbd2 -.long 0x1132f9ae,0x1132f9ae -.long 0x6da129c7,0x6da129c7 -.long 0x4b2f9e1d,0x4b2f9e1d -.long 0xf330b2dc,0xf330b2dc -.long 0xec52860d,0xec52860d -.long 0xd0e3c177,0xd0e3c177 -.long 0x6c16b32b,0x6c16b32b -.long 0x99b970a9,0x99b970a9 -.long 0xfa489411,0xfa489411 -.long 0x2264e947,0x2264e947 -.long 0xc48cfca8,0xc48cfca8 -.long 0x1a3ff0a0,0x1a3ff0a0 -.long 0xd82c7d56,0xd82c7d56 -.long 0xef903322,0xef903322 -.long 0xc74e4987,0xc74e4987 -.long 0xc1d138d9,0xc1d138d9 -.long 0xfea2ca8c,0xfea2ca8c -.long 0x360bd498,0x360bd498 -.long 0xcf81f5a6,0xcf81f5a6 -.long 0x28de7aa5,0x28de7aa5 -.long 0x268eb7da,0x268eb7da -.long 0xa4bfad3f,0xa4bfad3f -.long 0xe49d3a2c,0xe49d3a2c -.long 0x0d927850,0x0d927850 -.long 0x9bcc5f6a,0x9bcc5f6a -.long 0x62467e54,0x62467e54 -.long 0xc2138df6,0xc2138df6 -.long 0xe8b8d890,0xe8b8d890 -.long 0x5ef7392e,0x5ef7392e -.long 0xf5afc382,0xf5afc382 -.long 0xbe805d9f,0xbe805d9f -.long 0x7c93d069,0x7c93d069 -.long 0xa92dd56f,0xa92dd56f -.long 0xb31225cf,0xb31225cf -.long 0x3b99acc8,0x3b99acc8 -.long 0xa77d1810,0xa77d1810 -.long 0x6e639ce8,0x6e639ce8 -.long 0x7bbb3bdb,0x7bbb3bdb -.long 0x097826cd,0x097826cd -.long 0xf418596e,0xf418596e -.long 0x01b79aec,0x01b79aec -.long 0xa89a4f83,0xa89a4f83 -.long 0x656e95e6,0x656e95e6 -.long 0x7ee6ffaa,0x7ee6ffaa -.long 0x08cfbc21,0x08cfbc21 -.long 0xe6e815ef,0xe6e815ef -.long 0xd99be7ba,0xd99be7ba -.long 0xce366f4a,0xce366f4a -.long 0xd4099fea,0xd4099fea -.long 0xd67cb029,0xd67cb029 -.long 0xafb2a431,0xafb2a431 -.long 0x31233f2a,0x31233f2a -.long 0x3094a5c6,0x3094a5c6 -.long 0xc066a235,0xc066a235 -.long 0x37bc4e74,0x37bc4e74 -.long 0xa6ca82fc,0xa6ca82fc -.long 0xb0d090e0,0xb0d090e0 -.long 0x15d8a733,0x15d8a733 -.long 0x4a9804f1,0x4a9804f1 -.long 0xf7daec41,0xf7daec41 -.long 0x0e50cd7f,0x0e50cd7f -.long 0x2ff69117,0x2ff69117 -.long 0x8dd64d76,0x8dd64d76 -.long 0x4db0ef43,0x4db0ef43 -.long 0x544daacc,0x544daacc -.long 0xdf0496e4,0xdf0496e4 -.long 0xe3b5d19e,0xe3b5d19e -.long 0x1b886a4c,0x1b886a4c -.long 0xb81f2cc1,0xb81f2cc1 -.long 0x7f516546,0x7f516546 -.long 0x04ea5e9d,0x04ea5e9d -.long 0x5d358c01,0x5d358c01 -.long 0x737487fa,0x737487fa -.long 0x2e410bfb,0x2e410bfb -.long 0x5a1d67b3,0x5a1d67b3 -.long 0x52d2db92,0x52d2db92 -.long 0x335610e9,0x335610e9 -.long 0x1347d66d,0x1347d66d -.long 0x8c61d79a,0x8c61d79a -.long 0x7a0ca137,0x7a0ca137 -.long 0x8e14f859,0x8e14f859 -.long 0x893c13eb,0x893c13eb -.long 0xee27a9ce,0xee27a9ce -.long 0x35c961b7,0x35c961b7 -.long 0xede51ce1,0xede51ce1 -.long 0x3cb1477a,0x3cb1477a -.long 0x59dfd29c,0x59dfd29c -.long 0x3f73f255,0x3f73f255 -.long 0x79ce1418,0x79ce1418 -.long 0xbf37c773,0xbf37c773 -.long 0xeacdf753,0xeacdf753 -.long 0x5baafd5f,0x5baafd5f -.long 0x146f3ddf,0x146f3ddf -.long 0x86db4478,0x86db4478 -.long 0x81f3afca,0x81f3afca -.long 0x3ec468b9,0x3ec468b9 -.long 0x2c342438,0x2c342438 -.long 0x5f40a3c2,0x5f40a3c2 -.long 0x72c31d16,0x72c31d16 -.long 0x0c25e2bc,0x0c25e2bc -.long 0x8b493c28,0x8b493c28 -.long 0x41950dff,0x41950dff -.long 0x7101a839,0x7101a839 -.long 0xdeb30c08,0xdeb30c08 -.long 0x9ce4b4d8,0x9ce4b4d8 -.long 0x90c15664,0x90c15664 -.long 0x6184cb7b,0x6184cb7b -.long 0x70b632d5,0x70b632d5 -.long 0x745c6c48,0x745c6c48 -.long 0x4257b8d0,0x4257b8d0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d -.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe -.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 -.byte 65,69,83,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.p2align 6 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S deleted file mode 100644 index b08a2fbbf9..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S +++ /dev/null @@ -1,850 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.p2align 5 -_aesni_ctr32_ghash_6x: - - vmovdqu 32(%r11),%xmm2 - subq $6,%rdx - vpxor %xmm4,%xmm4,%xmm4 - vmovdqu 0-128(%rcx),%xmm15 - vpaddb %xmm2,%xmm1,%xmm10 - vpaddb %xmm2,%xmm10,%xmm11 - vpaddb %xmm2,%xmm11,%xmm12 - vpaddb %xmm2,%xmm12,%xmm13 - vpaddb %xmm2,%xmm13,%xmm14 - vpxor %xmm15,%xmm1,%xmm9 - vmovdqu %xmm4,16+8(%rsp) - jmp L$oop6x - -.p2align 5 -L$oop6x: - addl $100663296,%ebx - jc L$handle_ctr32 - vmovdqu 0-32(%r9),%xmm3 - vpaddb %xmm2,%xmm14,%xmm1 - vpxor %xmm15,%xmm10,%xmm10 - vpxor %xmm15,%xmm11,%xmm11 - -L$resume_ctr32: - vmovdqu %xmm1,(%r8) - vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 - vpxor %xmm15,%xmm12,%xmm12 - vmovups 16-128(%rcx),%xmm2 - vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 - - - - - - - - - - - - - - - - - - xorq %r12,%r12 - cmpq %r14,%r15 - - vaesenc %xmm2,%xmm9,%xmm9 - vmovdqu 48+8(%rsp),%xmm0 - vpxor %xmm15,%xmm13,%xmm13 - vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 - vaesenc %xmm2,%xmm10,%xmm10 - vpxor %xmm15,%xmm14,%xmm14 - setnc %r12b - vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 - vaesenc %xmm2,%xmm11,%xmm11 - vmovdqu 16-32(%r9),%xmm3 - negq %r12 - vaesenc %xmm2,%xmm12,%xmm12 - vpxor %xmm5,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 - vpxor %xmm4,%xmm8,%xmm8 - vaesenc %xmm2,%xmm13,%xmm13 - vpxor %xmm5,%xmm1,%xmm4 - andq $0x60,%r12 - vmovups 32-128(%rcx),%xmm15 - vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 - vaesenc %xmm2,%xmm14,%xmm14 - - vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 - leaq (%r14,%r12,1),%r14 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor 16+8(%rsp),%xmm8,%xmm8 - vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 - vmovdqu 64+8(%rsp),%xmm0 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 88(%r14),%r13 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 80(%r14),%r12 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,32+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,40+8(%rsp) - vmovdqu 48-32(%r9),%xmm5 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 48-128(%rcx),%xmm15 - vpxor %xmm1,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm2,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 - vaesenc %xmm15,%xmm10,%xmm10 - vpxor %xmm3,%xmm7,%xmm7 - vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 - vaesenc %xmm15,%xmm11,%xmm11 - vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 - vmovdqu 80+8(%rsp),%xmm0 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vpxor %xmm1,%xmm4,%xmm4 - vmovdqu 64-32(%r9),%xmm1 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 64-128(%rcx),%xmm15 - vpxor %xmm2,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm3,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 72(%r14),%r13 - vpxor %xmm5,%xmm7,%xmm7 - vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 64(%r14),%r12 - vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 - vmovdqu 96+8(%rsp),%xmm0 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,48+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,56+8(%rsp) - vpxor %xmm2,%xmm4,%xmm4 - vmovdqu 96-32(%r9),%xmm2 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 80-128(%rcx),%xmm15 - vpxor %xmm3,%xmm6,%xmm6 - vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm5,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 56(%r14),%r13 - vpxor %xmm1,%xmm7,%xmm7 - vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 - vpxor 112+8(%rsp),%xmm8,%xmm8 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 48(%r14),%r12 - vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,64+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,72+8(%rsp) - vpxor %xmm3,%xmm4,%xmm4 - vmovdqu 112-32(%r9),%xmm3 - vaesenc %xmm15,%xmm14,%xmm14 - - vmovups 96-128(%rcx),%xmm15 - vpxor %xmm5,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm1,%xmm6,%xmm6 - vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 - vaesenc %xmm15,%xmm10,%xmm10 - movbeq 40(%r14),%r13 - vpxor %xmm2,%xmm7,%xmm7 - vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 32(%r14),%r12 - vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 - vaesenc %xmm15,%xmm12,%xmm12 - movq %r13,80+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - movq %r12,88+8(%rsp) - vpxor %xmm5,%xmm6,%xmm6 - vaesenc %xmm15,%xmm14,%xmm14 - vpxor %xmm1,%xmm6,%xmm6 - - vmovups 112-128(%rcx),%xmm15 - vpslldq $8,%xmm6,%xmm5 - vpxor %xmm2,%xmm4,%xmm4 - vmovdqu 16(%r11),%xmm3 - - vaesenc %xmm15,%xmm9,%xmm9 - vpxor %xmm8,%xmm7,%xmm7 - vaesenc %xmm15,%xmm10,%xmm10 - vpxor %xmm5,%xmm4,%xmm4 - movbeq 24(%r14),%r13 - vaesenc %xmm15,%xmm11,%xmm11 - movbeq 16(%r14),%r12 - vpalignr $8,%xmm4,%xmm4,%xmm0 - vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 - movq %r13,96+8(%rsp) - vaesenc %xmm15,%xmm12,%xmm12 - movq %r12,104+8(%rsp) - vaesenc %xmm15,%xmm13,%xmm13 - vmovups 128-128(%rcx),%xmm1 - vaesenc %xmm15,%xmm14,%xmm14 - - vaesenc %xmm1,%xmm9,%xmm9 - vmovups 144-128(%rcx),%xmm15 - vaesenc %xmm1,%xmm10,%xmm10 - vpsrldq $8,%xmm6,%xmm6 - vaesenc %xmm1,%xmm11,%xmm11 - vpxor %xmm6,%xmm7,%xmm7 - vaesenc %xmm1,%xmm12,%xmm12 - vpxor %xmm0,%xmm4,%xmm4 - movbeq 8(%r14),%r13 - vaesenc %xmm1,%xmm13,%xmm13 - movbeq 0(%r14),%r12 - vaesenc %xmm1,%xmm14,%xmm14 - vmovups 160-128(%rcx),%xmm1 - cmpl $11,%ebp - jb L$enc_tail - - vaesenc %xmm15,%xmm9,%xmm9 - vaesenc %xmm15,%xmm10,%xmm10 - vaesenc %xmm15,%xmm11,%xmm11 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vaesenc %xmm15,%xmm14,%xmm14 - - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - vmovups 176-128(%rcx),%xmm15 - vaesenc %xmm1,%xmm14,%xmm14 - vmovups 192-128(%rcx),%xmm1 - je L$enc_tail - - vaesenc %xmm15,%xmm9,%xmm9 - vaesenc %xmm15,%xmm10,%xmm10 - vaesenc %xmm15,%xmm11,%xmm11 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vaesenc %xmm15,%xmm14,%xmm14 - - vaesenc %xmm1,%xmm9,%xmm9 - vaesenc %xmm1,%xmm10,%xmm10 - vaesenc %xmm1,%xmm11,%xmm11 - vaesenc %xmm1,%xmm12,%xmm12 - vaesenc %xmm1,%xmm13,%xmm13 - vmovups 208-128(%rcx),%xmm15 - vaesenc %xmm1,%xmm14,%xmm14 - vmovups 224-128(%rcx),%xmm1 - jmp L$enc_tail - -.p2align 5 -L$handle_ctr32: - vmovdqu (%r11),%xmm0 - vpshufb %xmm0,%xmm1,%xmm6 - vmovdqu 48(%r11),%xmm5 - vpaddd 64(%r11),%xmm6,%xmm10 - vpaddd %xmm5,%xmm6,%xmm11 - vmovdqu 0-32(%r9),%xmm3 - vpaddd %xmm5,%xmm10,%xmm12 - vpshufb %xmm0,%xmm10,%xmm10 - vpaddd %xmm5,%xmm11,%xmm13 - vpshufb %xmm0,%xmm11,%xmm11 - vpxor %xmm15,%xmm10,%xmm10 - vpaddd %xmm5,%xmm12,%xmm14 - vpshufb %xmm0,%xmm12,%xmm12 - vpxor %xmm15,%xmm11,%xmm11 - vpaddd %xmm5,%xmm13,%xmm1 - vpshufb %xmm0,%xmm13,%xmm13 - vpshufb %xmm0,%xmm14,%xmm14 - vpshufb %xmm0,%xmm1,%xmm1 - jmp L$resume_ctr32 - -.p2align 5 -L$enc_tail: - vaesenc %xmm15,%xmm9,%xmm9 - vmovdqu %xmm7,16+8(%rsp) - vpalignr $8,%xmm4,%xmm4,%xmm8 - vaesenc %xmm15,%xmm10,%xmm10 - vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 - vpxor 0(%rdi),%xmm1,%xmm2 - vaesenc %xmm15,%xmm11,%xmm11 - vpxor 16(%rdi),%xmm1,%xmm0 - vaesenc %xmm15,%xmm12,%xmm12 - vpxor 32(%rdi),%xmm1,%xmm5 - vaesenc %xmm15,%xmm13,%xmm13 - vpxor 48(%rdi),%xmm1,%xmm6 - vaesenc %xmm15,%xmm14,%xmm14 - vpxor 64(%rdi),%xmm1,%xmm7 - vpxor 80(%rdi),%xmm1,%xmm3 - vmovdqu (%r8),%xmm1 - - vaesenclast %xmm2,%xmm9,%xmm9 - vmovdqu 32(%r11),%xmm2 - vaesenclast %xmm0,%xmm10,%xmm10 - vpaddb %xmm2,%xmm1,%xmm0 - movq %r13,112+8(%rsp) - leaq 96(%rdi),%rdi - vaesenclast %xmm5,%xmm11,%xmm11 - vpaddb %xmm2,%xmm0,%xmm5 - movq %r12,120+8(%rsp) - leaq 96(%rsi),%rsi - vmovdqu 0-128(%rcx),%xmm15 - vaesenclast %xmm6,%xmm12,%xmm12 - vpaddb %xmm2,%xmm5,%xmm6 - vaesenclast %xmm7,%xmm13,%xmm13 - vpaddb %xmm2,%xmm6,%xmm7 - vaesenclast %xmm3,%xmm14,%xmm14 - vpaddb %xmm2,%xmm7,%xmm3 - - addq $0x60,%r10 - subq $0x6,%rdx - jc L$6x_done - - vmovups %xmm9,-96(%rsi) - vpxor %xmm15,%xmm1,%xmm9 - vmovups %xmm10,-80(%rsi) - vmovdqa %xmm0,%xmm10 - vmovups %xmm11,-64(%rsi) - vmovdqa %xmm5,%xmm11 - vmovups %xmm12,-48(%rsi) - vmovdqa %xmm6,%xmm12 - vmovups %xmm13,-32(%rsi) - vmovdqa %xmm7,%xmm13 - vmovups %xmm14,-16(%rsi) - vmovdqa %xmm3,%xmm14 - vmovdqu 32+8(%rsp),%xmm7 - jmp L$oop6x - -L$6x_done: - vpxor 16+8(%rsp),%xmm8,%xmm8 - vpxor %xmm4,%xmm8,%xmm8 - - .byte 0xf3,0xc3 - - -.globl _aesni_gcm_decrypt -.private_extern _aesni_gcm_decrypt - -.p2align 5 -_aesni_gcm_decrypt: - - xorq %r10,%r10 - - - - cmpq $0x60,%rdx - jb L$gcm_dec_abort - - leaq (%rsp),%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - vzeroupper - - vmovdqu (%r8),%xmm1 - addq $-128,%rsp - movl 12(%r8),%ebx - leaq L$bswap_mask(%rip),%r11 - leaq -128(%rcx),%r14 - movq $0xf80,%r15 - vmovdqu (%r9),%xmm8 - andq $-128,%rsp - vmovdqu (%r11),%xmm0 - leaq 128(%rcx),%rcx - leaq 32+32(%r9),%r9 - movl 240-128(%rcx),%ebp - vpshufb %xmm0,%xmm8,%xmm8 - - andq %r15,%r14 - andq %rsp,%r15 - subq %r14,%r15 - jc L$dec_no_key_aliasing - cmpq $768,%r15 - jnc L$dec_no_key_aliasing - subq %r15,%rsp -L$dec_no_key_aliasing: - - vmovdqu 80(%rdi),%xmm7 - leaq (%rdi),%r14 - vmovdqu 64(%rdi),%xmm4 - - - - - - - - leaq -192(%rdi,%rdx,1),%r15 - - vmovdqu 48(%rdi),%xmm5 - shrq $4,%rdx - xorq %r10,%r10 - vmovdqu 32(%rdi),%xmm6 - vpshufb %xmm0,%xmm7,%xmm7 - vmovdqu 16(%rdi),%xmm2 - vpshufb %xmm0,%xmm4,%xmm4 - vmovdqu (%rdi),%xmm3 - vpshufb %xmm0,%xmm5,%xmm5 - vmovdqu %xmm4,48(%rsp) - vpshufb %xmm0,%xmm6,%xmm6 - vmovdqu %xmm5,64(%rsp) - vpshufb %xmm0,%xmm2,%xmm2 - vmovdqu %xmm6,80(%rsp) - vpshufb %xmm0,%xmm3,%xmm3 - vmovdqu %xmm2,96(%rsp) - vmovdqu %xmm3,112(%rsp) - - call _aesni_ctr32_ghash_6x - - vmovups %xmm9,-96(%rsi) - vmovups %xmm10,-80(%rsi) - vmovups %xmm11,-64(%rsi) - vmovups %xmm12,-48(%rsi) - vmovups %xmm13,-32(%rsi) - vmovups %xmm14,-16(%rsi) - - vpshufb (%r11),%xmm8,%xmm8 - vmovdqu %xmm8,-64(%r9) - - vzeroupper - movq -48(%rax),%r15 - - movq -40(%rax),%r14 - - movq -32(%rax),%r13 - - movq -24(%rax),%r12 - - movq -16(%rax),%rbp - - movq -8(%rax),%rbx - - leaq (%rax),%rsp - -L$gcm_dec_abort: - movq %r10,%rax - .byte 0xf3,0xc3 - - - -.p2align 5 -_aesni_ctr32_6x: - - vmovdqu 0-128(%rcx),%xmm4 - vmovdqu 32(%r11),%xmm2 - leaq -1(%rbp),%r13 - vmovups 16-128(%rcx),%xmm15 - leaq 32-128(%rcx),%r12 - vpxor %xmm4,%xmm1,%xmm9 - addl $100663296,%ebx - jc L$handle_ctr32_2 - vpaddb %xmm2,%xmm1,%xmm10 - vpaddb %xmm2,%xmm10,%xmm11 - vpxor %xmm4,%xmm10,%xmm10 - vpaddb %xmm2,%xmm11,%xmm12 - vpxor %xmm4,%xmm11,%xmm11 - vpaddb %xmm2,%xmm12,%xmm13 - vpxor %xmm4,%xmm12,%xmm12 - vpaddb %xmm2,%xmm13,%xmm14 - vpxor %xmm4,%xmm13,%xmm13 - vpaddb %xmm2,%xmm14,%xmm1 - vpxor %xmm4,%xmm14,%xmm14 - jmp L$oop_ctr32 - -.p2align 4 -L$oop_ctr32: - vaesenc %xmm15,%xmm9,%xmm9 - vaesenc %xmm15,%xmm10,%xmm10 - vaesenc %xmm15,%xmm11,%xmm11 - vaesenc %xmm15,%xmm12,%xmm12 - vaesenc %xmm15,%xmm13,%xmm13 - vaesenc %xmm15,%xmm14,%xmm14 - vmovups (%r12),%xmm15 - leaq 16(%r12),%r12 - decl %r13d - jnz L$oop_ctr32 - - vmovdqu (%r12),%xmm3 - vaesenc %xmm15,%xmm9,%xmm9 - vpxor 0(%rdi),%xmm3,%xmm4 - vaesenc %xmm15,%xmm10,%xmm10 - vpxor 16(%rdi),%xmm3,%xmm5 - vaesenc %xmm15,%xmm11,%xmm11 - vpxor 32(%rdi),%xmm3,%xmm6 - vaesenc %xmm15,%xmm12,%xmm12 - vpxor 48(%rdi),%xmm3,%xmm8 - vaesenc %xmm15,%xmm13,%xmm13 - vpxor 64(%rdi),%xmm3,%xmm2 - vaesenc %xmm15,%xmm14,%xmm14 - vpxor 80(%rdi),%xmm3,%xmm3 - leaq 96(%rdi),%rdi - - vaesenclast %xmm4,%xmm9,%xmm9 - vaesenclast %xmm5,%xmm10,%xmm10 - vaesenclast %xmm6,%xmm11,%xmm11 - vaesenclast %xmm8,%xmm12,%xmm12 - vaesenclast %xmm2,%xmm13,%xmm13 - vaesenclast %xmm3,%xmm14,%xmm14 - vmovups %xmm9,0(%rsi) - vmovups %xmm10,16(%rsi) - vmovups %xmm11,32(%rsi) - vmovups %xmm12,48(%rsi) - vmovups %xmm13,64(%rsi) - vmovups %xmm14,80(%rsi) - leaq 96(%rsi),%rsi - - .byte 0xf3,0xc3 -.p2align 5 -L$handle_ctr32_2: - vpshufb %xmm0,%xmm1,%xmm6 - vmovdqu 48(%r11),%xmm5 - vpaddd 64(%r11),%xmm6,%xmm10 - vpaddd %xmm5,%xmm6,%xmm11 - vpaddd %xmm5,%xmm10,%xmm12 - vpshufb %xmm0,%xmm10,%xmm10 - vpaddd %xmm5,%xmm11,%xmm13 - vpshufb %xmm0,%xmm11,%xmm11 - vpxor %xmm4,%xmm10,%xmm10 - vpaddd %xmm5,%xmm12,%xmm14 - vpshufb %xmm0,%xmm12,%xmm12 - vpxor %xmm4,%xmm11,%xmm11 - vpaddd %xmm5,%xmm13,%xmm1 - vpshufb %xmm0,%xmm13,%xmm13 - vpxor %xmm4,%xmm12,%xmm12 - vpshufb %xmm0,%xmm14,%xmm14 - vpxor %xmm4,%xmm13,%xmm13 - vpshufb %xmm0,%xmm1,%xmm1 - vpxor %xmm4,%xmm14,%xmm14 - jmp L$oop_ctr32 - - - -.globl _aesni_gcm_encrypt -.private_extern _aesni_gcm_encrypt - -.p2align 5 -_aesni_gcm_encrypt: - -#ifdef BORINGSSL_DISPATCH_TEST - - movb $1,_BORINGSSL_function_hit+2(%rip) -#endif - xorq %r10,%r10 - - - - - cmpq $288,%rdx - jb L$gcm_enc_abort - - leaq (%rsp),%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - vzeroupper - - vmovdqu (%r8),%xmm1 - addq $-128,%rsp - movl 12(%r8),%ebx - leaq L$bswap_mask(%rip),%r11 - leaq -128(%rcx),%r14 - movq $0xf80,%r15 - leaq 128(%rcx),%rcx - vmovdqu (%r11),%xmm0 - andq $-128,%rsp - movl 240-128(%rcx),%ebp - - andq %r15,%r14 - andq %rsp,%r15 - subq %r14,%r15 - jc L$enc_no_key_aliasing - cmpq $768,%r15 - jnc L$enc_no_key_aliasing - subq %r15,%rsp -L$enc_no_key_aliasing: - - leaq (%rsi),%r14 - - - - - - - - - leaq -192(%rsi,%rdx,1),%r15 - - shrq $4,%rdx - - call _aesni_ctr32_6x - vpshufb %xmm0,%xmm9,%xmm8 - vpshufb %xmm0,%xmm10,%xmm2 - vmovdqu %xmm8,112(%rsp) - vpshufb %xmm0,%xmm11,%xmm4 - vmovdqu %xmm2,96(%rsp) - vpshufb %xmm0,%xmm12,%xmm5 - vmovdqu %xmm4,80(%rsp) - vpshufb %xmm0,%xmm13,%xmm6 - vmovdqu %xmm5,64(%rsp) - vpshufb %xmm0,%xmm14,%xmm7 - vmovdqu %xmm6,48(%rsp) - - call _aesni_ctr32_6x - - vmovdqu (%r9),%xmm8 - leaq 32+32(%r9),%r9 - subq $12,%rdx - movq $192,%r10 - vpshufb %xmm0,%xmm8,%xmm8 - - call _aesni_ctr32_ghash_6x - vmovdqu 32(%rsp),%xmm7 - vmovdqu (%r11),%xmm0 - vmovdqu 0-32(%r9),%xmm3 - vpunpckhqdq %xmm7,%xmm7,%xmm1 - vmovdqu 32-32(%r9),%xmm15 - vmovups %xmm9,-96(%rsi) - vpshufb %xmm0,%xmm9,%xmm9 - vpxor %xmm7,%xmm1,%xmm1 - vmovups %xmm10,-80(%rsi) - vpshufb %xmm0,%xmm10,%xmm10 - vmovups %xmm11,-64(%rsi) - vpshufb %xmm0,%xmm11,%xmm11 - vmovups %xmm12,-48(%rsi) - vpshufb %xmm0,%xmm12,%xmm12 - vmovups %xmm13,-32(%rsi) - vpshufb %xmm0,%xmm13,%xmm13 - vmovups %xmm14,-16(%rsi) - vpshufb %xmm0,%xmm14,%xmm14 - vmovdqu %xmm9,16(%rsp) - vmovdqu 48(%rsp),%xmm6 - vmovdqu 16-32(%r9),%xmm0 - vpunpckhqdq %xmm6,%xmm6,%xmm2 - vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 - vpxor %xmm6,%xmm2,%xmm2 - vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 - vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 - - vmovdqu 64(%rsp),%xmm9 - vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 - vmovdqu 48-32(%r9),%xmm3 - vpxor %xmm5,%xmm4,%xmm4 - vpunpckhqdq %xmm9,%xmm9,%xmm5 - vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 - vpxor %xmm9,%xmm5,%xmm5 - vpxor %xmm7,%xmm6,%xmm6 - vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 - vmovdqu 80-32(%r9),%xmm15 - vpxor %xmm1,%xmm2,%xmm2 - - vmovdqu 80(%rsp),%xmm1 - vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 - vmovdqu 64-32(%r9),%xmm0 - vpxor %xmm4,%xmm7,%xmm7 - vpunpckhqdq %xmm1,%xmm1,%xmm4 - vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 - vpxor %xmm1,%xmm4,%xmm4 - vpxor %xmm6,%xmm9,%xmm9 - vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu 96(%rsp),%xmm2 - vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 - vmovdqu 96-32(%r9),%xmm3 - vpxor %xmm7,%xmm6,%xmm6 - vpunpckhqdq %xmm2,%xmm2,%xmm7 - vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 - vpxor %xmm2,%xmm7,%xmm7 - vpxor %xmm9,%xmm1,%xmm1 - vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 - vmovdqu 128-32(%r9),%xmm15 - vpxor %xmm5,%xmm4,%xmm4 - - vpxor 112(%rsp),%xmm8,%xmm8 - vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 - vmovdqu 112-32(%r9),%xmm0 - vpunpckhqdq %xmm8,%xmm8,%xmm9 - vpxor %xmm6,%xmm5,%xmm5 - vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 - vpxor %xmm8,%xmm9,%xmm9 - vpxor %xmm1,%xmm2,%xmm2 - vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 - vpxor %xmm4,%xmm7,%xmm4 - - vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 - vmovdqu 0-32(%r9),%xmm3 - vpunpckhqdq %xmm14,%xmm14,%xmm1 - vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 - vpxor %xmm14,%xmm1,%xmm1 - vpxor %xmm5,%xmm6,%xmm5 - vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 - vmovdqu 32-32(%r9),%xmm15 - vpxor %xmm2,%xmm8,%xmm7 - vpxor %xmm4,%xmm9,%xmm6 - - vmovdqu 16-32(%r9),%xmm0 - vpxor %xmm5,%xmm7,%xmm9 - vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 - vpxor %xmm9,%xmm6,%xmm6 - vpunpckhqdq %xmm13,%xmm13,%xmm2 - vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 - vpxor %xmm13,%xmm2,%xmm2 - vpslldq $8,%xmm6,%xmm9 - vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 - vpxor %xmm9,%xmm5,%xmm8 - vpsrldq $8,%xmm6,%xmm6 - vpxor %xmm6,%xmm7,%xmm7 - - vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 - vmovdqu 48-32(%r9),%xmm3 - vpxor %xmm4,%xmm5,%xmm5 - vpunpckhqdq %xmm12,%xmm12,%xmm9 - vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 - vpxor %xmm12,%xmm9,%xmm9 - vpxor %xmm14,%xmm13,%xmm13 - vpalignr $8,%xmm8,%xmm8,%xmm14 - vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 - vmovdqu 80-32(%r9),%xmm15 - vpxor %xmm1,%xmm2,%xmm2 - - vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 - vmovdqu 64-32(%r9),%xmm0 - vpxor %xmm5,%xmm4,%xmm4 - vpunpckhqdq %xmm11,%xmm11,%xmm1 - vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 - vpxor %xmm11,%xmm1,%xmm1 - vpxor %xmm13,%xmm12,%xmm12 - vxorps 16(%rsp),%xmm7,%xmm7 - vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 - vpxor %xmm2,%xmm9,%xmm9 - - vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 - vxorps %xmm14,%xmm8,%xmm8 - - vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 - vmovdqu 96-32(%r9),%xmm3 - vpxor %xmm4,%xmm5,%xmm5 - vpunpckhqdq %xmm10,%xmm10,%xmm2 - vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 - vpxor %xmm10,%xmm2,%xmm2 - vpalignr $8,%xmm8,%xmm8,%xmm14 - vpxor %xmm12,%xmm11,%xmm11 - vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 - vmovdqu 128-32(%r9),%xmm15 - vpxor %xmm9,%xmm1,%xmm1 - - vxorps %xmm7,%xmm14,%xmm14 - vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 - vxorps %xmm14,%xmm8,%xmm8 - - vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 - vmovdqu 112-32(%r9),%xmm0 - vpxor %xmm5,%xmm4,%xmm4 - vpunpckhqdq %xmm8,%xmm8,%xmm9 - vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 - vpxor %xmm8,%xmm9,%xmm9 - vpxor %xmm11,%xmm10,%xmm10 - vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 - vpxor %xmm1,%xmm2,%xmm2 - - vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 - vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 - vpxor %xmm4,%xmm5,%xmm5 - vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 - vpxor %xmm10,%xmm7,%xmm7 - vpxor %xmm2,%xmm6,%xmm6 - - vpxor %xmm5,%xmm7,%xmm4 - vpxor %xmm4,%xmm6,%xmm6 - vpslldq $8,%xmm6,%xmm1 - vmovdqu 16(%r11),%xmm3 - vpsrldq $8,%xmm6,%xmm6 - vpxor %xmm1,%xmm5,%xmm8 - vpxor %xmm6,%xmm7,%xmm7 - - vpalignr $8,%xmm8,%xmm8,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 - vpxor %xmm2,%xmm8,%xmm8 - - vpalignr $8,%xmm8,%xmm8,%xmm2 - vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 - vpxor %xmm7,%xmm2,%xmm2 - vpxor %xmm2,%xmm8,%xmm8 - vpshufb (%r11),%xmm8,%xmm8 - vmovdqu %xmm8,-64(%r9) - - vzeroupper - movq -48(%rax),%r15 - - movq -40(%rax),%r14 - - movq -32(%rax),%r13 - - movq -24(%rax),%r12 - - movq -16(%rax),%rbp - - movq -8(%rax),%rbx - - leaq (%rax),%rsp - -L$gcm_enc_abort: - movq %r10,%rax - .byte 0xf3,0xc3 - - -.p2align 6 -L$bswap_mask: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -L$poly: -.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 -L$one_msb: -.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 -L$two_lsb: -.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -L$one_lsb: -.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.p2align 6 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aesni-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aesni-x86_64.S deleted file mode 100644 index 58e072ee1b..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/aesni-x86_64.S +++ /dev/null @@ -1,2503 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl _aes_hw_encrypt -.private_extern _aes_hw_encrypt - -.p2align 4 -_aes_hw_encrypt: - -#ifdef BORINGSSL_DISPATCH_TEST - - movb $1,_BORINGSSL_function_hit+1(%rip) -#endif - movups (%rdi),%xmm2 - movl 240(%rdx),%eax - movups (%rdx),%xmm0 - movups 16(%rdx),%xmm1 - leaq 32(%rdx),%rdx - xorps %xmm0,%xmm2 -L$oop_enc1_1: -.byte 102,15,56,220,209 - decl %eax - movups (%rdx),%xmm1 - leaq 16(%rdx),%rdx - jnz L$oop_enc1_1 -.byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - .byte 0xf3,0xc3 - - - -.globl _aes_hw_decrypt -.private_extern _aes_hw_decrypt - -.p2align 4 -_aes_hw_decrypt: - - movups (%rdi),%xmm2 - movl 240(%rdx),%eax - movups (%rdx),%xmm0 - movups 16(%rdx),%xmm1 - leaq 32(%rdx),%rdx - xorps %xmm0,%xmm2 -L$oop_dec1_2: -.byte 102,15,56,222,209 - decl %eax - movups (%rdx),%xmm1 - leaq 16(%rdx),%rdx - jnz L$oop_dec1_2 -.byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_encrypt2: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -L$enc_loop2: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$enc_loop2 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_decrypt2: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -L$dec_loop2: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$dec_loop2 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_encrypt3: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -L$enc_loop3: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$enc_loop3 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_decrypt3: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax - addq $16,%rax - -L$dec_loop3: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$dec_loop3 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_encrypt4: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - xorps %xmm0,%xmm5 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 0x0f,0x1f,0x00 - addq $16,%rax - -L$enc_loop4: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$enc_loop4 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_decrypt4: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - xorps %xmm0,%xmm4 - xorps %xmm0,%xmm5 - movups 32(%rcx),%xmm0 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 0x0f,0x1f,0x00 - addq $16,%rax - -L$dec_loop4: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$dec_loop4 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_encrypt6: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,220,209 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,220,217 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,220,225 - pxor %xmm0,%xmm7 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp L$enc_loop6_enter -.p2align 4 -L$enc_loop6: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -L$enc_loop6_enter: -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$enc_loop6 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 -.byte 102,15,56,221,240 -.byte 102,15,56,221,248 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_decrypt6: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 -.byte 102,15,56,222,209 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,222,217 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 -.byte 102,15,56,222,225 - pxor %xmm0,%xmm7 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp L$dec_loop6_enter -.p2align 4 -L$dec_loop6: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -L$dec_loop6_enter: -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$dec_loop6 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 -.byte 102,15,56,223,240 -.byte 102,15,56,223,248 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_encrypt8: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,220,209 - pxor %xmm0,%xmm7 - pxor %xmm0,%xmm8 -.byte 102,15,56,220,217 - pxor %xmm0,%xmm9 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp L$enc_loop8_inner -.p2align 4 -L$enc_loop8: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -L$enc_loop8_inner: -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 -L$enc_loop8_enter: - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$enc_loop8 - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 -.byte 102,15,56,221,224 -.byte 102,15,56,221,232 -.byte 102,15,56,221,240 -.byte 102,15,56,221,248 -.byte 102,68,15,56,221,192 -.byte 102,68,15,56,221,200 - .byte 0xf3,0xc3 - - - -.p2align 4 -_aesni_decrypt8: - - movups (%rcx),%xmm0 - shll $4,%eax - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm2 - xorps %xmm0,%xmm3 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 - leaq 32(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,222,209 - pxor %xmm0,%xmm7 - pxor %xmm0,%xmm8 -.byte 102,15,56,222,217 - pxor %xmm0,%xmm9 - movups (%rcx,%rax,1),%xmm0 - addq $16,%rax - jmp L$dec_loop8_inner -.p2align 4 -L$dec_loop8: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -L$dec_loop8_inner: -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 -L$dec_loop8_enter: - movups (%rcx,%rax,1),%xmm1 - addq $32,%rax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups -16(%rcx,%rax,1),%xmm0 - jnz L$dec_loop8 - -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 -.byte 102,15,56,223,224 -.byte 102,15,56,223,232 -.byte 102,15,56,223,240 -.byte 102,15,56,223,248 -.byte 102,68,15,56,223,192 -.byte 102,68,15,56,223,200 - .byte 0xf3,0xc3 - - -.globl _aes_hw_ecb_encrypt -.private_extern _aes_hw_ecb_encrypt - -.p2align 4 -_aes_hw_ecb_encrypt: - - andq $-16,%rdx - jz L$ecb_ret - - movl 240(%rcx),%eax - movups (%rcx),%xmm0 - movq %rcx,%r11 - movl %eax,%r10d - testl %r8d,%r8d - jz L$ecb_decrypt - - cmpq $0x80,%rdx - jb L$ecb_enc_tail - - movdqu (%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqu 32(%rdi),%xmm4 - movdqu 48(%rdi),%xmm5 - movdqu 64(%rdi),%xmm6 - movdqu 80(%rdi),%xmm7 - movdqu 96(%rdi),%xmm8 - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi - subq $0x80,%rdx - jmp L$ecb_enc_loop8_enter -.p2align 4 -L$ecb_enc_loop8: - movups %xmm2,(%rsi) - movq %r11,%rcx - movdqu (%rdi),%xmm2 - movl %r10d,%eax - movups %xmm3,16(%rsi) - movdqu 16(%rdi),%xmm3 - movups %xmm4,32(%rsi) - movdqu 32(%rdi),%xmm4 - movups %xmm5,48(%rsi) - movdqu 48(%rdi),%xmm5 - movups %xmm6,64(%rsi) - movdqu 64(%rdi),%xmm6 - movups %xmm7,80(%rsi) - movdqu 80(%rdi),%xmm7 - movups %xmm8,96(%rsi) - movdqu 96(%rdi),%xmm8 - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi -L$ecb_enc_loop8_enter: - - call _aesni_encrypt8 - - subq $0x80,%rdx - jnc L$ecb_enc_loop8 - - movups %xmm2,(%rsi) - movq %r11,%rcx - movups %xmm3,16(%rsi) - movl %r10d,%eax - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - movups %xmm7,80(%rsi) - movups %xmm8,96(%rsi) - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - addq $0x80,%rdx - jz L$ecb_ret - -L$ecb_enc_tail: - movups (%rdi),%xmm2 - cmpq $0x20,%rdx - jb L$ecb_enc_one - movups 16(%rdi),%xmm3 - je L$ecb_enc_two - movups 32(%rdi),%xmm4 - cmpq $0x40,%rdx - jb L$ecb_enc_three - movups 48(%rdi),%xmm5 - je L$ecb_enc_four - movups 64(%rdi),%xmm6 - cmpq $0x60,%rdx - jb L$ecb_enc_five - movups 80(%rdi),%xmm7 - je L$ecb_enc_six - movdqu 96(%rdi),%xmm8 - xorps %xmm9,%xmm9 - call _aesni_encrypt8 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - movups %xmm7,80(%rsi) - movups %xmm8,96(%rsi) - jmp L$ecb_ret -.p2align 4 -L$ecb_enc_one: - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -L$oop_enc1_3: -.byte 102,15,56,220,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz L$oop_enc1_3 -.byte 102,15,56,221,209 - movups %xmm2,(%rsi) - jmp L$ecb_ret -.p2align 4 -L$ecb_enc_two: - call _aesni_encrypt2 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - jmp L$ecb_ret -.p2align 4 -L$ecb_enc_three: - call _aesni_encrypt3 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - jmp L$ecb_ret -.p2align 4 -L$ecb_enc_four: - call _aesni_encrypt4 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - jmp L$ecb_ret -.p2align 4 -L$ecb_enc_five: - xorps %xmm7,%xmm7 - call _aesni_encrypt6 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - jmp L$ecb_ret -.p2align 4 -L$ecb_enc_six: - call _aesni_encrypt6 - movups %xmm2,(%rsi) - movups %xmm3,16(%rsi) - movups %xmm4,32(%rsi) - movups %xmm5,48(%rsi) - movups %xmm6,64(%rsi) - movups %xmm7,80(%rsi) - jmp L$ecb_ret - -.p2align 4 -L$ecb_decrypt: - cmpq $0x80,%rdx - jb L$ecb_dec_tail - - movdqu (%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqu 32(%rdi),%xmm4 - movdqu 48(%rdi),%xmm5 - movdqu 64(%rdi),%xmm6 - movdqu 80(%rdi),%xmm7 - movdqu 96(%rdi),%xmm8 - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi - subq $0x80,%rdx - jmp L$ecb_dec_loop8_enter -.p2align 4 -L$ecb_dec_loop8: - movups %xmm2,(%rsi) - movq %r11,%rcx - movdqu (%rdi),%xmm2 - movl %r10d,%eax - movups %xmm3,16(%rsi) - movdqu 16(%rdi),%xmm3 - movups %xmm4,32(%rsi) - movdqu 32(%rdi),%xmm4 - movups %xmm5,48(%rsi) - movdqu 48(%rdi),%xmm5 - movups %xmm6,64(%rsi) - movdqu 64(%rdi),%xmm6 - movups %xmm7,80(%rsi) - movdqu 80(%rdi),%xmm7 - movups %xmm8,96(%rsi) - movdqu 96(%rdi),%xmm8 - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - movdqu 112(%rdi),%xmm9 - leaq 128(%rdi),%rdi -L$ecb_dec_loop8_enter: - - call _aesni_decrypt8 - - movups (%r11),%xmm0 - subq $0x80,%rdx - jnc L$ecb_dec_loop8 - - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movq %r11,%rcx - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movl %r10d,%eax - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - movups %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - movups %xmm8,96(%rsi) - pxor %xmm8,%xmm8 - movups %xmm9,112(%rsi) - pxor %xmm9,%xmm9 - leaq 128(%rsi),%rsi - addq $0x80,%rdx - jz L$ecb_ret - -L$ecb_dec_tail: - movups (%rdi),%xmm2 - cmpq $0x20,%rdx - jb L$ecb_dec_one - movups 16(%rdi),%xmm3 - je L$ecb_dec_two - movups 32(%rdi),%xmm4 - cmpq $0x40,%rdx - jb L$ecb_dec_three - movups 48(%rdi),%xmm5 - je L$ecb_dec_four - movups 64(%rdi),%xmm6 - cmpq $0x60,%rdx - jb L$ecb_dec_five - movups 80(%rdi),%xmm7 - je L$ecb_dec_six - movups 96(%rdi),%xmm8 - movups (%rcx),%xmm0 - xorps %xmm9,%xmm9 - call _aesni_decrypt8 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - movups %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - movups %xmm8,96(%rsi) - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 - jmp L$ecb_ret -.p2align 4 -L$ecb_dec_one: - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -L$oop_dec1_4: -.byte 102,15,56,222,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz L$oop_dec1_4 -.byte 102,15,56,223,209 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - jmp L$ecb_ret -.p2align 4 -L$ecb_dec_two: - call _aesni_decrypt2 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - jmp L$ecb_ret -.p2align 4 -L$ecb_dec_three: - call _aesni_decrypt3 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - jmp L$ecb_ret -.p2align 4 -L$ecb_dec_four: - call _aesni_decrypt4 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - jmp L$ecb_ret -.p2align 4 -L$ecb_dec_five: - xorps %xmm7,%xmm7 - call _aesni_decrypt6 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - jmp L$ecb_ret -.p2align 4 -L$ecb_dec_six: - call _aesni_decrypt6 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - movups %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movups %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movups %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - movups %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - movups %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - -L$ecb_ret: - xorps %xmm0,%xmm0 - pxor %xmm1,%xmm1 - .byte 0xf3,0xc3 - - -.globl _aes_hw_ctr32_encrypt_blocks -.private_extern _aes_hw_ctr32_encrypt_blocks - -.p2align 4 -_aes_hw_ctr32_encrypt_blocks: - -#ifdef BORINGSSL_DISPATCH_TEST - movb $1,_BORINGSSL_function_hit(%rip) -#endif - cmpq $1,%rdx - jne L$ctr32_bulk - - - - movups (%r8),%xmm2 - movups (%rdi),%xmm3 - movl 240(%rcx),%edx - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -L$oop_enc1_5: -.byte 102,15,56,220,209 - decl %edx - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz L$oop_enc1_5 -.byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - xorps %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movups %xmm2,(%rsi) - xorps %xmm2,%xmm2 - jmp L$ctr32_epilogue - -.p2align 4 -L$ctr32_bulk: - leaq (%rsp),%r11 - - pushq %rbp - - subq $128,%rsp - andq $-16,%rsp - - - - - movdqu (%r8),%xmm2 - movdqu (%rcx),%xmm0 - movl 12(%r8),%r8d - pxor %xmm0,%xmm2 - movl 12(%rcx),%ebp - movdqa %xmm2,0(%rsp) - bswapl %r8d - movdqa %xmm2,%xmm3 - movdqa %xmm2,%xmm4 - movdqa %xmm2,%xmm5 - movdqa %xmm2,64(%rsp) - movdqa %xmm2,80(%rsp) - movdqa %xmm2,96(%rsp) - movq %rdx,%r10 - movdqa %xmm2,112(%rsp) - - leaq 1(%r8),%rax - leaq 2(%r8),%rdx - bswapl %eax - bswapl %edx - xorl %ebp,%eax - xorl %ebp,%edx -.byte 102,15,58,34,216,3 - leaq 3(%r8),%rax - movdqa %xmm3,16(%rsp) -.byte 102,15,58,34,226,3 - bswapl %eax - movq %r10,%rdx - leaq 4(%r8),%r10 - movdqa %xmm4,32(%rsp) - xorl %ebp,%eax - bswapl %r10d -.byte 102,15,58,34,232,3 - xorl %ebp,%r10d - movdqa %xmm5,48(%rsp) - leaq 5(%r8),%r9 - movl %r10d,64+12(%rsp) - bswapl %r9d - leaq 6(%r8),%r10 - movl 240(%rcx),%eax - xorl %ebp,%r9d - bswapl %r10d - movl %r9d,80+12(%rsp) - xorl %ebp,%r10d - leaq 7(%r8),%r9 - movl %r10d,96+12(%rsp) - bswapl %r9d - leaq _OPENSSL_ia32cap_P(%rip),%r10 - movl 4(%r10),%r10d - xorl %ebp,%r9d - andl $71303168,%r10d - movl %r9d,112+12(%rsp) - - movups 16(%rcx),%xmm1 - - movdqa 64(%rsp),%xmm6 - movdqa 80(%rsp),%xmm7 - - cmpq $8,%rdx - jb L$ctr32_tail - - subq $6,%rdx - cmpl $4194304,%r10d - je L$ctr32_6x - - leaq 128(%rcx),%rcx - subq $2,%rdx - jmp L$ctr32_loop8 - -.p2align 4 -L$ctr32_6x: - shll $4,%eax - movl $48,%r10d - bswapl %ebp - leaq 32(%rcx,%rax,1),%rcx - subq %rax,%r10 - jmp L$ctr32_loop6 - -.p2align 4 -L$ctr32_loop6: - addl $6,%r8d - movups -48(%rcx,%r10,1),%xmm0 -.byte 102,15,56,220,209 - movl %r8d,%eax - xorl %ebp,%eax -.byte 102,15,56,220,217 -.byte 0x0f,0x38,0xf1,0x44,0x24,12 - leal 1(%r8),%eax -.byte 102,15,56,220,225 - xorl %ebp,%eax -.byte 0x0f,0x38,0xf1,0x44,0x24,28 -.byte 102,15,56,220,233 - leal 2(%r8),%eax - xorl %ebp,%eax -.byte 102,15,56,220,241 -.byte 0x0f,0x38,0xf1,0x44,0x24,44 - leal 3(%r8),%eax -.byte 102,15,56,220,249 - movups -32(%rcx,%r10,1),%xmm1 - xorl %ebp,%eax - -.byte 102,15,56,220,208 -.byte 0x0f,0x38,0xf1,0x44,0x24,60 - leal 4(%r8),%eax -.byte 102,15,56,220,216 - xorl %ebp,%eax -.byte 0x0f,0x38,0xf1,0x44,0x24,76 -.byte 102,15,56,220,224 - leal 5(%r8),%eax - xorl %ebp,%eax -.byte 102,15,56,220,232 -.byte 0x0f,0x38,0xf1,0x44,0x24,92 - movq %r10,%rax -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 - movups -16(%rcx,%r10,1),%xmm0 - - call L$enc_loop6 - - movdqu (%rdi),%xmm8 - movdqu 16(%rdi),%xmm9 - movdqu 32(%rdi),%xmm10 - movdqu 48(%rdi),%xmm11 - movdqu 64(%rdi),%xmm12 - movdqu 80(%rdi),%xmm13 - leaq 96(%rdi),%rdi - movups -64(%rcx,%r10,1),%xmm1 - pxor %xmm2,%xmm8 - movaps 0(%rsp),%xmm2 - pxor %xmm3,%xmm9 - movaps 16(%rsp),%xmm3 - pxor %xmm4,%xmm10 - movaps 32(%rsp),%xmm4 - pxor %xmm5,%xmm11 - movaps 48(%rsp),%xmm5 - pxor %xmm6,%xmm12 - movaps 64(%rsp),%xmm6 - pxor %xmm7,%xmm13 - movaps 80(%rsp),%xmm7 - movdqu %xmm8,(%rsi) - movdqu %xmm9,16(%rsi) - movdqu %xmm10,32(%rsi) - movdqu %xmm11,48(%rsi) - movdqu %xmm12,64(%rsi) - movdqu %xmm13,80(%rsi) - leaq 96(%rsi),%rsi - - subq $6,%rdx - jnc L$ctr32_loop6 - - addq $6,%rdx - jz L$ctr32_done - - leal -48(%r10),%eax - leaq -80(%rcx,%r10,1),%rcx - negl %eax - shrl $4,%eax - jmp L$ctr32_tail - -.p2align 5 -L$ctr32_loop8: - addl $8,%r8d - movdqa 96(%rsp),%xmm8 -.byte 102,15,56,220,209 - movl %r8d,%r9d - movdqa 112(%rsp),%xmm9 -.byte 102,15,56,220,217 - bswapl %r9d - movups 32-128(%rcx),%xmm0 -.byte 102,15,56,220,225 - xorl %ebp,%r9d - nop -.byte 102,15,56,220,233 - movl %r9d,0+12(%rsp) - leaq 1(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 48-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movl %r9d,16+12(%rsp) - leaq 2(%r8),%r9 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 64-128(%rcx),%xmm0 - bswapl %r9d -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movl %r9d,32+12(%rsp) - leaq 3(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 80-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movl %r9d,48+12(%rsp) - leaq 4(%r8),%r9 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 96-128(%rcx),%xmm0 - bswapl %r9d -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movl %r9d,64+12(%rsp) - leaq 5(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 112-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 - movl %r9d,80+12(%rsp) - leaq 6(%r8),%r9 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 128-128(%rcx),%xmm0 - bswapl %r9d -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - xorl %ebp,%r9d -.byte 0x66,0x90 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movl %r9d,96+12(%rsp) - leaq 7(%r8),%r9 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 144-128(%rcx),%xmm1 - bswapl %r9d -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 - xorl %ebp,%r9d - movdqu 0(%rdi),%xmm10 -.byte 102,15,56,220,232 - movl %r9d,112+12(%rsp) - cmpl $11,%eax -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 160-128(%rcx),%xmm0 - - jb L$ctr32_enc_done - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 176-128(%rcx),%xmm1 - -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 192-128(%rcx),%xmm0 - je L$ctr32_enc_done - -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movups 208-128(%rcx),%xmm1 - -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 -.byte 102,15,56,220,224 -.byte 102,15,56,220,232 -.byte 102,15,56,220,240 -.byte 102,15,56,220,248 -.byte 102,68,15,56,220,192 -.byte 102,68,15,56,220,200 - movups 224-128(%rcx),%xmm0 - jmp L$ctr32_enc_done - -.p2align 4 -L$ctr32_enc_done: - movdqu 16(%rdi),%xmm11 - pxor %xmm0,%xmm10 - movdqu 32(%rdi),%xmm12 - pxor %xmm0,%xmm11 - movdqu 48(%rdi),%xmm13 - pxor %xmm0,%xmm12 - movdqu 64(%rdi),%xmm14 - pxor %xmm0,%xmm13 - movdqu 80(%rdi),%xmm15 - pxor %xmm0,%xmm14 - pxor %xmm0,%xmm15 -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 -.byte 102,68,15,56,220,201 - movdqu 96(%rdi),%xmm1 - leaq 128(%rdi),%rdi - -.byte 102,65,15,56,221,210 - pxor %xmm0,%xmm1 - movdqu 112-128(%rdi),%xmm10 -.byte 102,65,15,56,221,219 - pxor %xmm0,%xmm10 - movdqa 0(%rsp),%xmm11 -.byte 102,65,15,56,221,228 -.byte 102,65,15,56,221,237 - movdqa 16(%rsp),%xmm12 - movdqa 32(%rsp),%xmm13 -.byte 102,65,15,56,221,246 -.byte 102,65,15,56,221,255 - movdqa 48(%rsp),%xmm14 - movdqa 64(%rsp),%xmm15 -.byte 102,68,15,56,221,193 - movdqa 80(%rsp),%xmm0 - movups 16-128(%rcx),%xmm1 -.byte 102,69,15,56,221,202 - - movups %xmm2,(%rsi) - movdqa %xmm11,%xmm2 - movups %xmm3,16(%rsi) - movdqa %xmm12,%xmm3 - movups %xmm4,32(%rsi) - movdqa %xmm13,%xmm4 - movups %xmm5,48(%rsi) - movdqa %xmm14,%xmm5 - movups %xmm6,64(%rsi) - movdqa %xmm15,%xmm6 - movups %xmm7,80(%rsi) - movdqa %xmm0,%xmm7 - movups %xmm8,96(%rsi) - movups %xmm9,112(%rsi) - leaq 128(%rsi),%rsi - - subq $8,%rdx - jnc L$ctr32_loop8 - - addq $8,%rdx - jz L$ctr32_done - leaq -128(%rcx),%rcx - -L$ctr32_tail: - - - leaq 16(%rcx),%rcx - cmpq $4,%rdx - jb L$ctr32_loop3 - je L$ctr32_loop4 - - - shll $4,%eax - movdqa 96(%rsp),%xmm8 - pxor %xmm9,%xmm9 - - movups 16(%rcx),%xmm0 -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - leaq 32-16(%rcx,%rax,1),%rcx - negq %rax -.byte 102,15,56,220,225 - addq $16,%rax - movups (%rdi),%xmm10 -.byte 102,15,56,220,233 -.byte 102,15,56,220,241 - movups 16(%rdi),%xmm11 - movups 32(%rdi),%xmm12 -.byte 102,15,56,220,249 -.byte 102,68,15,56,220,193 - - call L$enc_loop8_enter - - movdqu 48(%rdi),%xmm13 - pxor %xmm10,%xmm2 - movdqu 64(%rdi),%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm10,%xmm6 - movdqu %xmm5,48(%rsi) - movdqu %xmm6,64(%rsi) - cmpq $6,%rdx - jb L$ctr32_done - - movups 80(%rdi),%xmm11 - xorps %xmm11,%xmm7 - movups %xmm7,80(%rsi) - je L$ctr32_done - - movups 96(%rdi),%xmm12 - xorps %xmm12,%xmm8 - movups %xmm8,96(%rsi) - jmp L$ctr32_done - -.p2align 5 -L$ctr32_loop4: -.byte 102,15,56,220,209 - leaq 16(%rcx),%rcx - decl %eax -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 -.byte 102,15,56,220,233 - movups (%rcx),%xmm1 - jnz L$ctr32_loop4 -.byte 102,15,56,221,209 -.byte 102,15,56,221,217 - movups (%rdi),%xmm10 - movups 16(%rdi),%xmm11 -.byte 102,15,56,221,225 -.byte 102,15,56,221,233 - movups 32(%rdi),%xmm12 - movups 48(%rdi),%xmm13 - - xorps %xmm10,%xmm2 - movups %xmm2,(%rsi) - xorps %xmm11,%xmm3 - movups %xmm3,16(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm4,32(%rsi) - pxor %xmm13,%xmm5 - movdqu %xmm5,48(%rsi) - jmp L$ctr32_done - -.p2align 5 -L$ctr32_loop3: -.byte 102,15,56,220,209 - leaq 16(%rcx),%rcx - decl %eax -.byte 102,15,56,220,217 -.byte 102,15,56,220,225 - movups (%rcx),%xmm1 - jnz L$ctr32_loop3 -.byte 102,15,56,221,209 -.byte 102,15,56,221,217 -.byte 102,15,56,221,225 - - movups (%rdi),%xmm10 - xorps %xmm10,%xmm2 - movups %xmm2,(%rsi) - cmpq $2,%rdx - jb L$ctr32_done - - movups 16(%rdi),%xmm11 - xorps %xmm11,%xmm3 - movups %xmm3,16(%rsi) - je L$ctr32_done - - movups 32(%rdi),%xmm12 - xorps %xmm12,%xmm4 - movups %xmm4,32(%rsi) - -L$ctr32_done: - xorps %xmm0,%xmm0 - xorl %ebp,%ebp - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - movaps %xmm0,0(%rsp) - pxor %xmm8,%xmm8 - movaps %xmm0,16(%rsp) - pxor %xmm9,%xmm9 - movaps %xmm0,32(%rsp) - pxor %xmm10,%xmm10 - movaps %xmm0,48(%rsp) - pxor %xmm11,%xmm11 - movaps %xmm0,64(%rsp) - pxor %xmm12,%xmm12 - movaps %xmm0,80(%rsp) - pxor %xmm13,%xmm13 - movaps %xmm0,96(%rsp) - pxor %xmm14,%xmm14 - movaps %xmm0,112(%rsp) - pxor %xmm15,%xmm15 - movq -8(%r11),%rbp - - leaq (%r11),%rsp - -L$ctr32_epilogue: - .byte 0xf3,0xc3 - - -.globl _aes_hw_cbc_encrypt -.private_extern _aes_hw_cbc_encrypt - -.p2align 4 -_aes_hw_cbc_encrypt: - - testq %rdx,%rdx - jz L$cbc_ret - - movl 240(%rcx),%r10d - movq %rcx,%r11 - testl %r9d,%r9d - jz L$cbc_decrypt - - movups (%r8),%xmm2 - movl %r10d,%eax - cmpq $16,%rdx - jb L$cbc_enc_tail - subq $16,%rdx - jmp L$cbc_enc_loop -.p2align 4 -L$cbc_enc_loop: - movups (%rdi),%xmm3 - leaq 16(%rdi),%rdi - - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - xorps %xmm0,%xmm3 - leaq 32(%rcx),%rcx - xorps %xmm3,%xmm2 -L$oop_enc1_6: -.byte 102,15,56,220,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz L$oop_enc1_6 -.byte 102,15,56,221,209 - movl %r10d,%eax - movq %r11,%rcx - movups %xmm2,0(%rsi) - leaq 16(%rsi),%rsi - subq $16,%rdx - jnc L$cbc_enc_loop - addq $16,%rdx - jnz L$cbc_enc_tail - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movups %xmm2,(%r8) - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - jmp L$cbc_ret - -L$cbc_enc_tail: - movq %rdx,%rcx - xchgq %rdi,%rsi -.long 0x9066A4F3 - movl $16,%ecx - subq %rdx,%rcx - xorl %eax,%eax -.long 0x9066AAF3 - leaq -16(%rdi),%rdi - movl %r10d,%eax - movq %rdi,%rsi - movq %r11,%rcx - xorq %rdx,%rdx - jmp L$cbc_enc_loop - -.p2align 4 -L$cbc_decrypt: - cmpq $16,%rdx - jne L$cbc_decrypt_bulk - - - - movdqu (%rdi),%xmm2 - movdqu (%r8),%xmm3 - movdqa %xmm2,%xmm4 - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -L$oop_dec1_7: -.byte 102,15,56,222,209 - decl %r10d - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz L$oop_dec1_7 -.byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movdqu %xmm4,(%r8) - xorps %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - jmp L$cbc_ret -.p2align 4 -L$cbc_decrypt_bulk: - leaq (%rsp),%r11 - - pushq %rbp - - subq $16,%rsp - andq $-16,%rsp - movq %rcx,%rbp - movups (%r8),%xmm10 - movl %r10d,%eax - cmpq $0x50,%rdx - jbe L$cbc_dec_tail - - movups (%rcx),%xmm0 - movdqu 0(%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqa %xmm2,%xmm11 - movdqu 32(%rdi),%xmm4 - movdqa %xmm3,%xmm12 - movdqu 48(%rdi),%xmm5 - movdqa %xmm4,%xmm13 - movdqu 64(%rdi),%xmm6 - movdqa %xmm5,%xmm14 - movdqu 80(%rdi),%xmm7 - movdqa %xmm6,%xmm15 - leaq _OPENSSL_ia32cap_P(%rip),%r9 - movl 4(%r9),%r9d - cmpq $0x70,%rdx - jbe L$cbc_dec_six_or_seven - - andl $71303168,%r9d - subq $0x50,%rdx - cmpl $4194304,%r9d - je L$cbc_dec_loop6_enter - subq $0x20,%rdx - leaq 112(%rcx),%rcx - jmp L$cbc_dec_loop8_enter -.p2align 4 -L$cbc_dec_loop8: - movups %xmm9,(%rsi) - leaq 16(%rsi),%rsi -L$cbc_dec_loop8_enter: - movdqu 96(%rdi),%xmm8 - pxor %xmm0,%xmm2 - movdqu 112(%rdi),%xmm9 - pxor %xmm0,%xmm3 - movups 16-112(%rcx),%xmm1 - pxor %xmm0,%xmm4 - movq $-1,%rbp - cmpq $0x70,%rdx - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 - pxor %xmm0,%xmm8 - -.byte 102,15,56,222,209 - pxor %xmm0,%xmm9 - movups 32-112(%rcx),%xmm0 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 - adcq $0,%rbp - andq $128,%rbp -.byte 102,68,15,56,222,201 - addq %rdi,%rbp - movups 48-112(%rcx),%xmm1 -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 64-112(%rcx),%xmm0 - nop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 80-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 96-112(%rcx),%xmm0 - nop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 112-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 128-112(%rcx),%xmm0 - nop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 144-112(%rcx),%xmm1 - cmpl $11,%eax -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 160-112(%rcx),%xmm0 - jb L$cbc_dec_done -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 176-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 192-112(%rcx),%xmm0 - je L$cbc_dec_done -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movups 208-112(%rcx),%xmm1 - nop -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 -.byte 102,15,56,222,224 -.byte 102,15,56,222,232 -.byte 102,15,56,222,240 -.byte 102,15,56,222,248 -.byte 102,68,15,56,222,192 -.byte 102,68,15,56,222,200 - movups 224-112(%rcx),%xmm0 - jmp L$cbc_dec_done -.p2align 4 -L$cbc_dec_done: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - pxor %xmm0,%xmm10 - pxor %xmm0,%xmm11 -.byte 102,15,56,222,225 -.byte 102,15,56,222,233 - pxor %xmm0,%xmm12 - pxor %xmm0,%xmm13 -.byte 102,15,56,222,241 -.byte 102,15,56,222,249 - pxor %xmm0,%xmm14 - pxor %xmm0,%xmm15 -.byte 102,68,15,56,222,193 -.byte 102,68,15,56,222,201 - movdqu 80(%rdi),%xmm1 - -.byte 102,65,15,56,223,210 - movdqu 96(%rdi),%xmm10 - pxor %xmm0,%xmm1 -.byte 102,65,15,56,223,219 - pxor %xmm0,%xmm10 - movdqu 112(%rdi),%xmm0 -.byte 102,65,15,56,223,228 - leaq 128(%rdi),%rdi - movdqu 0(%rbp),%xmm11 -.byte 102,65,15,56,223,237 -.byte 102,65,15,56,223,246 - movdqu 16(%rbp),%xmm12 - movdqu 32(%rbp),%xmm13 -.byte 102,65,15,56,223,255 -.byte 102,68,15,56,223,193 - movdqu 48(%rbp),%xmm14 - movdqu 64(%rbp),%xmm15 -.byte 102,69,15,56,223,202 - movdqa %xmm0,%xmm10 - movdqu 80(%rbp),%xmm1 - movups -112(%rcx),%xmm0 - - movups %xmm2,(%rsi) - movdqa %xmm11,%xmm2 - movups %xmm3,16(%rsi) - movdqa %xmm12,%xmm3 - movups %xmm4,32(%rsi) - movdqa %xmm13,%xmm4 - movups %xmm5,48(%rsi) - movdqa %xmm14,%xmm5 - movups %xmm6,64(%rsi) - movdqa %xmm15,%xmm6 - movups %xmm7,80(%rsi) - movdqa %xmm1,%xmm7 - movups %xmm8,96(%rsi) - leaq 112(%rsi),%rsi - - subq $0x80,%rdx - ja L$cbc_dec_loop8 - - movaps %xmm9,%xmm2 - leaq -112(%rcx),%rcx - addq $0x70,%rdx - jle L$cbc_dec_clear_tail_collected - movups %xmm9,(%rsi) - leaq 16(%rsi),%rsi - cmpq $0x50,%rdx - jbe L$cbc_dec_tail - - movaps %xmm11,%xmm2 -L$cbc_dec_six_or_seven: - cmpq $0x60,%rdx - ja L$cbc_dec_seven - - movaps %xmm7,%xmm8 - call _aesni_decrypt6 - pxor %xmm10,%xmm2 - movaps %xmm8,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - pxor %xmm14,%xmm6 - movdqu %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - pxor %xmm15,%xmm7 - movdqu %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - leaq 80(%rsi),%rsi - movdqa %xmm7,%xmm2 - pxor %xmm7,%xmm7 - jmp L$cbc_dec_tail_collected - -.p2align 4 -L$cbc_dec_seven: - movups 96(%rdi),%xmm8 - xorps %xmm9,%xmm9 - call _aesni_decrypt8 - movups 80(%rdi),%xmm9 - pxor %xmm10,%xmm2 - movups 96(%rdi),%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - pxor %xmm14,%xmm6 - movdqu %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - pxor %xmm15,%xmm7 - movdqu %xmm6,64(%rsi) - pxor %xmm6,%xmm6 - pxor %xmm9,%xmm8 - movdqu %xmm7,80(%rsi) - pxor %xmm7,%xmm7 - leaq 96(%rsi),%rsi - movdqa %xmm8,%xmm2 - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 - jmp L$cbc_dec_tail_collected - -.p2align 4 -L$cbc_dec_loop6: - movups %xmm7,(%rsi) - leaq 16(%rsi),%rsi - movdqu 0(%rdi),%xmm2 - movdqu 16(%rdi),%xmm3 - movdqa %xmm2,%xmm11 - movdqu 32(%rdi),%xmm4 - movdqa %xmm3,%xmm12 - movdqu 48(%rdi),%xmm5 - movdqa %xmm4,%xmm13 - movdqu 64(%rdi),%xmm6 - movdqa %xmm5,%xmm14 - movdqu 80(%rdi),%xmm7 - movdqa %xmm6,%xmm15 -L$cbc_dec_loop6_enter: - leaq 96(%rdi),%rdi - movdqa %xmm7,%xmm8 - - call _aesni_decrypt6 - - pxor %xmm10,%xmm2 - movdqa %xmm8,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm14,%xmm6 - movq %rbp,%rcx - movdqu %xmm5,48(%rsi) - pxor %xmm15,%xmm7 - movl %r10d,%eax - movdqu %xmm6,64(%rsi) - leaq 80(%rsi),%rsi - subq $0x60,%rdx - ja L$cbc_dec_loop6 - - movdqa %xmm7,%xmm2 - addq $0x50,%rdx - jle L$cbc_dec_clear_tail_collected - movups %xmm7,(%rsi) - leaq 16(%rsi),%rsi - -L$cbc_dec_tail: - movups (%rdi),%xmm2 - subq $0x10,%rdx - jbe L$cbc_dec_one - - movups 16(%rdi),%xmm3 - movaps %xmm2,%xmm11 - subq $0x10,%rdx - jbe L$cbc_dec_two - - movups 32(%rdi),%xmm4 - movaps %xmm3,%xmm12 - subq $0x10,%rdx - jbe L$cbc_dec_three - - movups 48(%rdi),%xmm5 - movaps %xmm4,%xmm13 - subq $0x10,%rdx - jbe L$cbc_dec_four - - movups 64(%rdi),%xmm6 - movaps %xmm5,%xmm14 - movaps %xmm6,%xmm15 - xorps %xmm7,%xmm7 - call _aesni_decrypt6 - pxor %xmm10,%xmm2 - movaps %xmm15,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - pxor %xmm14,%xmm6 - movdqu %xmm5,48(%rsi) - pxor %xmm5,%xmm5 - leaq 64(%rsi),%rsi - movdqa %xmm6,%xmm2 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - subq $0x10,%rdx - jmp L$cbc_dec_tail_collected - -.p2align 4 -L$cbc_dec_one: - movaps %xmm2,%xmm11 - movups (%rcx),%xmm0 - movups 16(%rcx),%xmm1 - leaq 32(%rcx),%rcx - xorps %xmm0,%xmm2 -L$oop_dec1_8: -.byte 102,15,56,222,209 - decl %eax - movups (%rcx),%xmm1 - leaq 16(%rcx),%rcx - jnz L$oop_dec1_8 -.byte 102,15,56,223,209 - xorps %xmm10,%xmm2 - movaps %xmm11,%xmm10 - jmp L$cbc_dec_tail_collected -.p2align 4 -L$cbc_dec_two: - movaps %xmm3,%xmm12 - call _aesni_decrypt2 - pxor %xmm10,%xmm2 - movaps %xmm12,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - movdqa %xmm3,%xmm2 - pxor %xmm3,%xmm3 - leaq 16(%rsi),%rsi - jmp L$cbc_dec_tail_collected -.p2align 4 -L$cbc_dec_three: - movaps %xmm4,%xmm13 - call _aesni_decrypt3 - pxor %xmm10,%xmm2 - movaps %xmm13,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - movdqa %xmm4,%xmm2 - pxor %xmm4,%xmm4 - leaq 32(%rsi),%rsi - jmp L$cbc_dec_tail_collected -.p2align 4 -L$cbc_dec_four: - movaps %xmm5,%xmm14 - call _aesni_decrypt4 - pxor %xmm10,%xmm2 - movaps %xmm14,%xmm10 - pxor %xmm11,%xmm3 - movdqu %xmm2,(%rsi) - pxor %xmm12,%xmm4 - movdqu %xmm3,16(%rsi) - pxor %xmm3,%xmm3 - pxor %xmm13,%xmm5 - movdqu %xmm4,32(%rsi) - pxor %xmm4,%xmm4 - movdqa %xmm5,%xmm2 - pxor %xmm5,%xmm5 - leaq 48(%rsi),%rsi - jmp L$cbc_dec_tail_collected - -.p2align 4 -L$cbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 -L$cbc_dec_tail_collected: - movups %xmm10,(%r8) - andq $15,%rdx - jnz L$cbc_dec_tail_partial - movups %xmm2,(%rsi) - pxor %xmm2,%xmm2 - jmp L$cbc_dec_ret -.p2align 4 -L$cbc_dec_tail_partial: - movaps %xmm2,(%rsp) - pxor %xmm2,%xmm2 - movq $16,%rcx - movq %rsi,%rdi - subq %rdx,%rcx - leaq (%rsp),%rsi -.long 0x9066A4F3 - movdqa %xmm2,(%rsp) - -L$cbc_dec_ret: - xorps %xmm0,%xmm0 - pxor %xmm1,%xmm1 - movq -8(%r11),%rbp - - leaq (%r11),%rsp - -L$cbc_ret: - .byte 0xf3,0xc3 - - -.globl _aes_hw_set_decrypt_key -.private_extern _aes_hw_set_decrypt_key - -.p2align 4 -_aes_hw_set_decrypt_key: - -.byte 0x48,0x83,0xEC,0x08 - - call __aesni_set_encrypt_key - shll $4,%esi - testl %eax,%eax - jnz L$dec_key_ret - leaq 16(%rdx,%rsi,1),%rdi - - movups (%rdx),%xmm0 - movups (%rdi),%xmm1 - movups %xmm0,(%rdi) - movups %xmm1,(%rdx) - leaq 16(%rdx),%rdx - leaq -16(%rdi),%rdi - -L$dec_key_inverse: - movups (%rdx),%xmm0 - movups (%rdi),%xmm1 -.byte 102,15,56,219,192 -.byte 102,15,56,219,201 - leaq 16(%rdx),%rdx - leaq -16(%rdi),%rdi - movups %xmm0,16(%rdi) - movups %xmm1,-16(%rdx) - cmpq %rdx,%rdi - ja L$dec_key_inverse - - movups (%rdx),%xmm0 -.byte 102,15,56,219,192 - pxor %xmm1,%xmm1 - movups %xmm0,(%rdi) - pxor %xmm0,%xmm0 -L$dec_key_ret: - addq $8,%rsp - - .byte 0xf3,0xc3 - -L$SEH_end_set_decrypt_key: - -.globl _aes_hw_set_encrypt_key -.private_extern _aes_hw_set_encrypt_key - -.p2align 4 -_aes_hw_set_encrypt_key: -__aesni_set_encrypt_key: - -#ifdef BORINGSSL_DISPATCH_TEST - movb $1,_BORINGSSL_function_hit+3(%rip) -#endif -.byte 0x48,0x83,0xEC,0x08 - - movq $-1,%rax - testq %rdi,%rdi - jz L$enc_key_ret - testq %rdx,%rdx - jz L$enc_key_ret - - movups (%rdi),%xmm0 - xorps %xmm4,%xmm4 - leaq _OPENSSL_ia32cap_P(%rip),%r10 - movl 4(%r10),%r10d - andl $268437504,%r10d - leaq 16(%rdx),%rax - cmpl $256,%esi - je L$14rounds - cmpl $192,%esi - je L$12rounds - cmpl $128,%esi - jne L$bad_keybits - -L$10rounds: - movl $9,%esi - cmpl $268435456,%r10d - je L$10rounds_alt - - movups %xmm0,(%rdx) -.byte 102,15,58,223,200,1 - call L$key_expansion_128_cold -.byte 102,15,58,223,200,2 - call L$key_expansion_128 -.byte 102,15,58,223,200,4 - call L$key_expansion_128 -.byte 102,15,58,223,200,8 - call L$key_expansion_128 -.byte 102,15,58,223,200,16 - call L$key_expansion_128 -.byte 102,15,58,223,200,32 - call L$key_expansion_128 -.byte 102,15,58,223,200,64 - call L$key_expansion_128 -.byte 102,15,58,223,200,128 - call L$key_expansion_128 -.byte 102,15,58,223,200,27 - call L$key_expansion_128 -.byte 102,15,58,223,200,54 - call L$key_expansion_128 - movups %xmm0,(%rax) - movl %esi,80(%rax) - xorl %eax,%eax - jmp L$enc_key_ret - -.p2align 4 -L$10rounds_alt: - movdqa L$key_rotate(%rip),%xmm5 - movl $8,%r10d - movdqa L$key_rcon1(%rip),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,(%rdx) - jmp L$oop_key128 - -.p2align 4 -L$oop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leaq 16(%rax),%rax - - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%rax) - movdqa %xmm0,%xmm2 - - decl %r10d - jnz L$oop_key128 - - movdqa L$key_rcon1b(%rip),%xmm4 - -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - - pxor %xmm2,%xmm0 - movdqu %xmm0,(%rax) - - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%rax) - - movl %esi,96(%rax) - xorl %eax,%eax - jmp L$enc_key_ret - -.p2align 4 -L$12rounds: - movq 16(%rdi),%xmm2 - movl $11,%esi - cmpl $268435456,%r10d - je L$12rounds_alt - - movups %xmm0,(%rdx) -.byte 102,15,58,223,202,1 - call L$key_expansion_192a_cold -.byte 102,15,58,223,202,2 - call L$key_expansion_192b -.byte 102,15,58,223,202,4 - call L$key_expansion_192a -.byte 102,15,58,223,202,8 - call L$key_expansion_192b -.byte 102,15,58,223,202,16 - call L$key_expansion_192a -.byte 102,15,58,223,202,32 - call L$key_expansion_192b -.byte 102,15,58,223,202,64 - call L$key_expansion_192a -.byte 102,15,58,223,202,128 - call L$key_expansion_192b - movups %xmm0,(%rax) - movl %esi,48(%rax) - xorq %rax,%rax - jmp L$enc_key_ret - -.p2align 4 -L$12rounds_alt: - movdqa L$key_rotate192(%rip),%xmm5 - movdqa L$key_rcon1(%rip),%xmm4 - movl $8,%r10d - movdqu %xmm0,(%rdx) - jmp L$oop_key192 - -.p2align 4 -L$oop_key192: - movq %xmm2,0(%rax) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leaq 24(%rax),%rax - - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - - pshufd $0xff,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%rax) - - decl %r10d - jnz L$oop_key192 - - movl %esi,32(%rax) - xorl %eax,%eax - jmp L$enc_key_ret - -.p2align 4 -L$14rounds: - movups 16(%rdi),%xmm2 - movl $13,%esi - leaq 16(%rax),%rax - cmpl $268435456,%r10d - je L$14rounds_alt - - movups %xmm0,(%rdx) - movups %xmm2,16(%rdx) -.byte 102,15,58,223,202,1 - call L$key_expansion_256a_cold -.byte 102,15,58,223,200,1 - call L$key_expansion_256b -.byte 102,15,58,223,202,2 - call L$key_expansion_256a -.byte 102,15,58,223,200,2 - call L$key_expansion_256b -.byte 102,15,58,223,202,4 - call L$key_expansion_256a -.byte 102,15,58,223,200,4 - call L$key_expansion_256b -.byte 102,15,58,223,202,8 - call L$key_expansion_256a -.byte 102,15,58,223,200,8 - call L$key_expansion_256b -.byte 102,15,58,223,202,16 - call L$key_expansion_256a -.byte 102,15,58,223,200,16 - call L$key_expansion_256b -.byte 102,15,58,223,202,32 - call L$key_expansion_256a -.byte 102,15,58,223,200,32 - call L$key_expansion_256b -.byte 102,15,58,223,202,64 - call L$key_expansion_256a - movups %xmm0,(%rax) - movl %esi,16(%rax) - xorq %rax,%rax - jmp L$enc_key_ret - -.p2align 4 -L$14rounds_alt: - movdqa L$key_rotate(%rip),%xmm5 - movdqa L$key_rcon1(%rip),%xmm4 - movl $7,%r10d - movdqu %xmm0,0(%rdx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,16(%rdx) - jmp L$oop_key256 - -.p2align 4 -L$oop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - - pxor %xmm2,%xmm0 - movdqu %xmm0,(%rax) - - decl %r10d - jz L$done_key256 - - pshufd $0xff,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%rax) - leaq 32(%rax),%rax - movdqa %xmm2,%xmm1 - - jmp L$oop_key256 - -L$done_key256: - movl %esi,16(%rax) - xorl %eax,%eax - jmp L$enc_key_ret - -.p2align 4 -L$bad_keybits: - movq $-2,%rax -L$enc_key_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - addq $8,%rsp - - .byte 0xf3,0xc3 - -L$SEH_end_set_encrypt_key: - -.p2align 4 -L$key_expansion_128: - movups %xmm0,(%rax) - leaq 16(%rax),%rax -L$key_expansion_128_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - .byte 0xf3,0xc3 - -.p2align 4 -L$key_expansion_192a: - movups %xmm0,(%rax) - leaq 16(%rax),%rax -L$key_expansion_192a_cold: - movaps %xmm2,%xmm5 -L$key_expansion_192b_warm: - shufps $16,%xmm0,%xmm4 - movdqa %xmm2,%xmm3 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - pslldq $4,%xmm3 - xorps %xmm4,%xmm0 - pshufd $85,%xmm1,%xmm1 - pxor %xmm3,%xmm2 - pxor %xmm1,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm3,%xmm2 - .byte 0xf3,0xc3 - -.p2align 4 -L$key_expansion_192b: - movaps %xmm0,%xmm3 - shufps $68,%xmm0,%xmm5 - movups %xmm5,(%rax) - shufps $78,%xmm2,%xmm3 - movups %xmm3,16(%rax) - leaq 32(%rax),%rax - jmp L$key_expansion_192b_warm - -.p2align 4 -L$key_expansion_256a: - movups %xmm2,(%rax) - leaq 16(%rax),%rax -L$key_expansion_256a_cold: - shufps $16,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $140,%xmm0,%xmm4 - xorps %xmm4,%xmm0 - shufps $255,%xmm1,%xmm1 - xorps %xmm1,%xmm0 - .byte 0xf3,0xc3 - -.p2align 4 -L$key_expansion_256b: - movups %xmm0,(%rax) - leaq 16(%rax),%rax - - shufps $16,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $140,%xmm2,%xmm4 - xorps %xmm4,%xmm2 - shufps $170,%xmm1,%xmm1 - xorps %xmm1,%xmm2 - .byte 0xf3,0xc3 - - -.p2align 6 -L$bswap_mask: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -L$increment32: -.long 6,6,6,0 -L$increment64: -.long 1,0,0,0 -L$xts_magic: -.long 0x87,0,1,0 -L$increment1: -.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 -L$key_rotate: -.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -L$key_rotate192: -.long 0x04070605,0x04070605,0x04070605,0x04070605 -L$key_rcon1: -.long 1,1,1,1 -L$key_rcon1b: -.long 0x1b,0x1b,0x1b,0x1b - -.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.p2align 6 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S deleted file mode 100644 index 1b9129f2dd..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S +++ /dev/null @@ -1,426 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - - - -.globl _gcm_gmult_ssse3 -.private_extern _gcm_gmult_ssse3 -.p2align 4 -_gcm_gmult_ssse3: - -L$gmult_seh_begin: - movdqu (%rdi),%xmm0 - movdqa L$reverse_bytes(%rip),%xmm10 - movdqa L$low4_mask(%rip),%xmm2 - - -.byte 102,65,15,56,0,194 - - - movdqa %xmm2,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm2,%xmm0 - - - - - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - movq $5,%rax -L$oop_row_1: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz L$oop_row_1 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $5,%rax -L$oop_row_2: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz L$oop_row_2 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $6,%rax -L$oop_row_3: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz L$oop_row_3 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - -.byte 102,65,15,56,0,210 - movdqu %xmm2,(%rdi) - - - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - .byte 0xf3,0xc3 -L$gmult_seh_end: - - - - - - - - -.globl _gcm_ghash_ssse3 -.private_extern _gcm_ghash_ssse3 -.p2align 4 -_gcm_ghash_ssse3: -L$ghash_seh_begin: - - movdqu (%rdi),%xmm0 - movdqa L$reverse_bytes(%rip),%xmm10 - movdqa L$low4_mask(%rip),%xmm11 - - - andq $-16,%rcx - - - -.byte 102,65,15,56,0,194 - - - pxor %xmm3,%xmm3 -L$oop_ghash: - - movdqu (%rdx),%xmm1 -.byte 102,65,15,56,0,202 - pxor %xmm1,%xmm0 - - - movdqa %xmm11,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm11,%xmm0 - - - - - pxor %xmm2,%xmm2 - - movq $5,%rax -L$oop_row_4: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz L$oop_row_4 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $5,%rax -L$oop_row_5: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz L$oop_row_5 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movq $6,%rax -L$oop_row_6: - movdqa (%rsi),%xmm4 - leaq 16(%rsi),%rsi - - - movdqa %xmm2,%xmm6 -.byte 102,15,58,15,243,1 - movdqa %xmm6,%xmm3 - psrldq $1,%xmm2 - - - - - movdqa %xmm4,%xmm5 -.byte 102,15,56,0,224 -.byte 102,15,56,0,233 - - - pxor %xmm5,%xmm2 - - - - movdqa %xmm4,%xmm5 - psllq $60,%xmm5 - movdqa %xmm5,%xmm6 - pslldq $8,%xmm6 - pxor %xmm6,%xmm3 - - - psrldq $8,%xmm5 - pxor %xmm5,%xmm2 - psrlq $4,%xmm4 - pxor %xmm4,%xmm2 - - subq $1,%rax - jnz L$oop_row_6 - - - - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $1,%xmm3 - pxor %xmm3,%xmm2 - psrlq $5,%xmm3 - pxor %xmm3,%xmm2 - pxor %xmm3,%xmm3 - movdqa %xmm2,%xmm0 - - - leaq -256(%rsi),%rsi - - - leaq 16(%rdx),%rdx - subq $16,%rcx - jnz L$oop_ghash - - -.byte 102,65,15,56,0,194 - movdqu %xmm0,(%rdi) - - - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - .byte 0xf3,0xc3 -L$ghash_seh_end: - - - -.p2align 4 - - -L$reverse_bytes: -.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - -L$low4_mask: -.quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/ghash-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/ghash-x86_64.S deleted file mode 100644 index d7dcf5d61f..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/ghash-x86_64.S +++ /dev/null @@ -1,1858 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.globl _gcm_gmult_4bit -.private_extern _gcm_gmult_4bit - -.p2align 4 -_gcm_gmult_4bit: - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $280,%rsp - -L$gmult_prologue: - - movzbq 15(%rdi),%r8 - leaq L$rem_4bit(%rip),%r11 - xorq %rax,%rax - xorq %rbx,%rbx - movb %r8b,%al - movb %r8b,%bl - shlb $4,%al - movq $14,%rcx - movq 8(%rsi,%rax,1),%r8 - movq (%rsi,%rax,1),%r9 - andb $0xf0,%bl - movq %r8,%rdx - jmp L$oop1 - -.p2align 4 -L$oop1: - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - movb (%rdi,%rcx,1),%al - shrq $4,%r9 - xorq 8(%rsi,%rbx,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rbx,1),%r9 - movb %al,%bl - xorq (%r11,%rdx,8),%r9 - movq %r8,%rdx - shlb $4,%al - xorq %r10,%r8 - decq %rcx - js L$break1 - - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - shrq $4,%r9 - xorq 8(%rsi,%rax,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rax,1),%r9 - andb $0xf0,%bl - xorq (%r11,%rdx,8),%r9 - movq %r8,%rdx - xorq %r10,%r8 - jmp L$oop1 - -.p2align 4 -L$break1: - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - shrq $4,%r9 - xorq 8(%rsi,%rax,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rax,1),%r9 - andb $0xf0,%bl - xorq (%r11,%rdx,8),%r9 - movq %r8,%rdx - xorq %r10,%r8 - - shrq $4,%r8 - andq $0xf,%rdx - movq %r9,%r10 - shrq $4,%r9 - xorq 8(%rsi,%rbx,1),%r8 - shlq $60,%r10 - xorq (%rsi,%rbx,1),%r9 - xorq %r10,%r8 - xorq (%r11,%rdx,8),%r9 - - bswapq %r8 - bswapq %r9 - movq %r8,8(%rdi) - movq %r9,(%rdi) - - leaq 280+48(%rsp),%rsi - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$gmult_epilogue: - .byte 0xf3,0xc3 - - -.globl _gcm_ghash_4bit -.private_extern _gcm_ghash_4bit - -.p2align 4 -_gcm_ghash_4bit: - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $280,%rsp - -L$ghash_prologue: - movq %rdx,%r14 - movq %rcx,%r15 - subq $-128,%rsi - leaq 16+128(%rsp),%rbp - xorl %edx,%edx - movq 0+0-128(%rsi),%r8 - movq 0+8-128(%rsi),%rax - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq 16+0-128(%rsi),%r9 - shlb $4,%dl - movq 16+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,0(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,0(%rbp) - movq 32+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,0-128(%rbp) - movq 32+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,1(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,8(%rbp) - movq 48+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,8-128(%rbp) - movq 48+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,2(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,16(%rbp) - movq 64+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,16-128(%rbp) - movq 64+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,3(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,24(%rbp) - movq 80+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,24-128(%rbp) - movq 80+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,4(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,32(%rbp) - movq 96+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,32-128(%rbp) - movq 96+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,5(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,40(%rbp) - movq 112+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,40-128(%rbp) - movq 112+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,6(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,48(%rbp) - movq 128+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,48-128(%rbp) - movq 128+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,7(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,56(%rbp) - movq 144+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,56-128(%rbp) - movq 144+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,8(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,64(%rbp) - movq 160+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,64-128(%rbp) - movq 160+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,9(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,72(%rbp) - movq 176+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,72-128(%rbp) - movq 176+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,10(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,80(%rbp) - movq 192+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,80-128(%rbp) - movq 192+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,11(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,88(%rbp) - movq 208+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,88-128(%rbp) - movq 208+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,12(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,96(%rbp) - movq 224+0-128(%rsi),%r8 - shlb $4,%dl - movq %rax,96-128(%rbp) - movq 224+8-128(%rsi),%rax - shlq $60,%r10 - movb %dl,13(%rsp) - orq %r10,%rbx - movb %al,%dl - shrq $4,%rax - movq %r8,%r10 - shrq $4,%r8 - movq %r9,104(%rbp) - movq 240+0-128(%rsi),%r9 - shlb $4,%dl - movq %rbx,104-128(%rbp) - movq 240+8-128(%rsi),%rbx - shlq $60,%r10 - movb %dl,14(%rsp) - orq %r10,%rax - movb %bl,%dl - shrq $4,%rbx - movq %r9,%r10 - shrq $4,%r9 - movq %r8,112(%rbp) - shlb $4,%dl - movq %rax,112-128(%rbp) - shlq $60,%r10 - movb %dl,15(%rsp) - orq %r10,%rbx - movq %r9,120(%rbp) - movq %rbx,120-128(%rbp) - addq $-128,%rsi - movq 8(%rdi),%r8 - movq 0(%rdi),%r9 - addq %r14,%r15 - leaq L$rem_8bit(%rip),%r11 - jmp L$outer_loop -.p2align 4 -L$outer_loop: - xorq (%r14),%r9 - movq 8(%r14),%rdx - leaq 16(%r14),%r14 - xorq %r8,%rdx - movq %r9,(%rdi) - movq %rdx,8(%rdi) - shrq $32,%rdx - xorq %rax,%rax - roll $8,%edx - movb %dl,%al - movzbl %dl,%ebx - shlb $4,%al - shrl $4,%ebx - roll $8,%edx - movq 8(%rsi,%rax,1),%r8 - movq (%rsi,%rax,1),%r9 - movb %dl,%al - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - xorq %r8,%r12 - movq %r9,%r10 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl 8(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl 4(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl 0(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - shrl $4,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r12,2),%r12 - movzbl %dl,%ebx - shlb $4,%al - movzbq (%rsp,%rcx,1),%r13 - shrl $4,%ebx - shlq $48,%r12 - xorq %r8,%r13 - movq %r9,%r10 - xorq %r12,%r9 - shrq $8,%r8 - movzbq %r13b,%r13 - shrq $8,%r9 - xorq -128(%rbp,%rcx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rcx,8),%r9 - roll $8,%edx - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - movb %dl,%al - xorq %r10,%r8 - movzwq (%r11,%r13,2),%r13 - movzbl %dl,%ecx - shlb $4,%al - movzbq (%rsp,%rbx,1),%r12 - andl $240,%ecx - shlq $48,%r13 - xorq %r8,%r12 - movq %r9,%r10 - xorq %r13,%r9 - shrq $8,%r8 - movzbq %r12b,%r12 - movl -4(%rdi),%edx - shrq $8,%r9 - xorq -128(%rbp,%rbx,8),%r8 - shlq $56,%r10 - xorq (%rbp,%rbx,8),%r9 - movzwq (%r11,%r12,2),%r12 - xorq 8(%rsi,%rax,1),%r8 - xorq (%rsi,%rax,1),%r9 - shlq $48,%r12 - xorq %r10,%r8 - xorq %r12,%r9 - movzbq %r8b,%r13 - shrq $4,%r8 - movq %r9,%r10 - shlb $4,%r13b - shrq $4,%r9 - xorq 8(%rsi,%rcx,1),%r8 - movzwq (%r11,%r13,2),%r13 - shlq $60,%r10 - xorq (%rsi,%rcx,1),%r9 - xorq %r10,%r8 - shlq $48,%r13 - bswapq %r8 - xorq %r13,%r9 - bswapq %r9 - cmpq %r15,%r14 - jb L$outer_loop - movq %r8,8(%rdi) - movq %r9,(%rdi) - - leaq 280+48(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq 0(%rsi),%rsp - -L$ghash_epilogue: - .byte 0xf3,0xc3 - - -.globl _gcm_init_clmul -.private_extern _gcm_init_clmul - -.p2align 4 -_gcm_init_clmul: - -L$_init_clmul: - movdqu (%rsi),%xmm2 - pshufd $78,%xmm2,%xmm2 - - - pshufd $255,%xmm2,%xmm4 - movdqa %xmm2,%xmm3 - psllq $1,%xmm2 - pxor %xmm5,%xmm5 - psrlq $63,%xmm3 - pcmpgtd %xmm4,%xmm5 - pslldq $8,%xmm3 - por %xmm3,%xmm2 - - - pand L$0x1c2_polynomial(%rip),%xmm5 - pxor %xmm5,%xmm2 - - - pshufd $78,%xmm2,%xmm6 - movdqa %xmm2,%xmm0 - pxor %xmm2,%xmm6 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,222,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - pshufd $78,%xmm2,%xmm3 - pshufd $78,%xmm0,%xmm4 - pxor %xmm2,%xmm3 - movdqu %xmm2,0(%rdi) - pxor %xmm0,%xmm4 - movdqu %xmm0,16(%rdi) -.byte 102,15,58,15,227,8 - movdqu %xmm4,32(%rdi) - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,222,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - movdqa %xmm0,%xmm5 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,222,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - pshufd $78,%xmm5,%xmm3 - pshufd $78,%xmm0,%xmm4 - pxor %xmm5,%xmm3 - movdqu %xmm5,48(%rdi) - pxor %xmm0,%xmm4 - movdqu %xmm0,64(%rdi) -.byte 102,15,58,15,227,8 - movdqu %xmm4,80(%rdi) - .byte 0xf3,0xc3 - - -.globl _gcm_gmult_clmul -.private_extern _gcm_gmult_clmul - -.p2align 4 -_gcm_gmult_clmul: - -L$_gmult_clmul: - movdqu (%rdi),%xmm0 - movdqa L$bswap_mask(%rip),%xmm5 - movdqu (%rsi),%xmm2 - movdqu 32(%rsi),%xmm4 -.byte 102,15,56,0,197 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,220,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,197 - movdqu %xmm0,(%rdi) - .byte 0xf3,0xc3 - - -.globl _gcm_ghash_clmul -.private_extern _gcm_ghash_clmul - -.p2align 5 -_gcm_ghash_clmul: - -L$_ghash_clmul: - movdqa L$bswap_mask(%rip),%xmm10 - - movdqu (%rdi),%xmm0 - movdqu (%rsi),%xmm2 - movdqu 32(%rsi),%xmm7 -.byte 102,65,15,56,0,194 - - subq $0x10,%rcx - jz L$odd_tail - - movdqu 16(%rsi),%xmm6 - leaq _OPENSSL_ia32cap_P(%rip),%rax - movl 4(%rax),%eax - cmpq $0x30,%rcx - jb L$skip4x - - andl $71303168,%eax - cmpl $4194304,%eax - je L$skip4x - - subq $0x30,%rcx - movq $0xA040608020C0E000,%rax - movdqu 48(%rsi),%xmm14 - movdqu 64(%rsi),%xmm15 - - - - - movdqu 48(%rdx),%xmm3 - movdqu 32(%rdx),%xmm11 -.byte 102,65,15,56,0,218 -.byte 102,69,15,56,0,218 - movdqa %xmm3,%xmm5 - pshufd $78,%xmm3,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,68,218,0 -.byte 102,15,58,68,234,17 -.byte 102,15,58,68,231,0 - - movdqa %xmm11,%xmm13 - pshufd $78,%xmm11,%xmm12 - pxor %xmm11,%xmm12 -.byte 102,68,15,58,68,222,0 -.byte 102,68,15,58,68,238,17 -.byte 102,68,15,58,68,231,16 - xorps %xmm11,%xmm3 - xorps %xmm13,%xmm5 - movups 80(%rsi),%xmm7 - xorps %xmm12,%xmm4 - - movdqu 16(%rdx),%xmm11 - movdqu 0(%rdx),%xmm8 -.byte 102,69,15,56,0,218 -.byte 102,69,15,56,0,194 - movdqa %xmm11,%xmm13 - pshufd $78,%xmm11,%xmm12 - pxor %xmm8,%xmm0 - pxor %xmm11,%xmm12 -.byte 102,69,15,58,68,222,0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm8 - pxor %xmm0,%xmm8 -.byte 102,69,15,58,68,238,17 -.byte 102,68,15,58,68,231,0 - xorps %xmm11,%xmm3 - xorps %xmm13,%xmm5 - - leaq 64(%rdx),%rdx - subq $0x40,%rcx - jc L$tail4x - - jmp L$mod4_loop -.p2align 5 -L$mod4_loop: -.byte 102,65,15,58,68,199,0 - xorps %xmm12,%xmm4 - movdqu 48(%rdx),%xmm11 -.byte 102,69,15,56,0,218 -.byte 102,65,15,58,68,207,17 - xorps %xmm3,%xmm0 - movdqu 32(%rdx),%xmm3 - movdqa %xmm11,%xmm13 -.byte 102,68,15,58,68,199,16 - pshufd $78,%xmm11,%xmm12 - xorps %xmm5,%xmm1 - pxor %xmm11,%xmm12 -.byte 102,65,15,56,0,218 - movups 32(%rsi),%xmm7 - xorps %xmm4,%xmm8 -.byte 102,68,15,58,68,218,0 - pshufd $78,%xmm3,%xmm4 - - pxor %xmm0,%xmm8 - movdqa %xmm3,%xmm5 - pxor %xmm1,%xmm8 - pxor %xmm3,%xmm4 - movdqa %xmm8,%xmm9 -.byte 102,68,15,58,68,234,17 - pslldq $8,%xmm8 - psrldq $8,%xmm9 - pxor %xmm8,%xmm0 - movdqa L$7_mask(%rip),%xmm8 - pxor %xmm9,%xmm1 -.byte 102,76,15,110,200 - - pand %xmm0,%xmm8 -.byte 102,69,15,56,0,200 - pxor %xmm0,%xmm9 -.byte 102,68,15,58,68,231,0 - psllq $57,%xmm9 - movdqa %xmm9,%xmm8 - pslldq $8,%xmm9 -.byte 102,15,58,68,222,0 - psrldq $8,%xmm8 - pxor %xmm9,%xmm0 - pxor %xmm8,%xmm1 - movdqu 0(%rdx),%xmm8 - - movdqa %xmm0,%xmm9 - psrlq $1,%xmm0 -.byte 102,15,58,68,238,17 - xorps %xmm11,%xmm3 - movdqu 16(%rdx),%xmm11 -.byte 102,69,15,56,0,218 -.byte 102,15,58,68,231,16 - xorps %xmm13,%xmm5 - movups 80(%rsi),%xmm7 -.byte 102,69,15,56,0,194 - pxor %xmm9,%xmm1 - pxor %xmm0,%xmm9 - psrlq $5,%xmm0 - - movdqa %xmm11,%xmm13 - pxor %xmm12,%xmm4 - pshufd $78,%xmm11,%xmm12 - pxor %xmm9,%xmm0 - pxor %xmm8,%xmm1 - pxor %xmm11,%xmm12 -.byte 102,69,15,58,68,222,0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - movdqa %xmm0,%xmm1 -.byte 102,69,15,58,68,238,17 - xorps %xmm11,%xmm3 - pshufd $78,%xmm0,%xmm8 - pxor %xmm0,%xmm8 - -.byte 102,68,15,58,68,231,0 - xorps %xmm13,%xmm5 - - leaq 64(%rdx),%rdx - subq $0x40,%rcx - jnc L$mod4_loop - -L$tail4x: -.byte 102,65,15,58,68,199,0 -.byte 102,65,15,58,68,207,17 -.byte 102,68,15,58,68,199,16 - xorps %xmm12,%xmm4 - xorps %xmm3,%xmm0 - xorps %xmm5,%xmm1 - pxor %xmm0,%xmm1 - pxor %xmm4,%xmm8 - - pxor %xmm1,%xmm8 - pxor %xmm0,%xmm1 - - movdqa %xmm8,%xmm9 - psrldq $8,%xmm8 - pslldq $8,%xmm9 - pxor %xmm8,%xmm1 - pxor %xmm9,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - addq $0x40,%rcx - jz L$done - movdqu 32(%rsi),%xmm7 - subq $0x10,%rcx - jz L$odd_tail -L$skip4x: - - - - - - movdqu (%rdx),%xmm8 - movdqu 16(%rdx),%xmm3 -.byte 102,69,15,56,0,194 -.byte 102,65,15,56,0,218 - pxor %xmm8,%xmm0 - - movdqa %xmm3,%xmm5 - pshufd $78,%xmm3,%xmm4 - pxor %xmm3,%xmm4 -.byte 102,15,58,68,218,0 -.byte 102,15,58,68,234,17 -.byte 102,15,58,68,231,0 - - leaq 32(%rdx),%rdx - nop - subq $0x20,%rcx - jbe L$even_tail - nop - jmp L$mod_loop - -.p2align 5 -L$mod_loop: - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm8 - pshufd $78,%xmm0,%xmm4 - pxor %xmm0,%xmm4 - -.byte 102,15,58,68,198,0 -.byte 102,15,58,68,206,17 -.byte 102,15,58,68,231,16 - - pxor %xmm3,%xmm0 - pxor %xmm5,%xmm1 - movdqu (%rdx),%xmm9 - pxor %xmm0,%xmm8 -.byte 102,69,15,56,0,202 - movdqu 16(%rdx),%xmm3 - - pxor %xmm1,%xmm8 - pxor %xmm9,%xmm1 - pxor %xmm8,%xmm4 -.byte 102,65,15,56,0,218 - movdqa %xmm4,%xmm8 - psrldq $8,%xmm8 - pslldq $8,%xmm4 - pxor %xmm8,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm3,%xmm5 - - movdqa %xmm0,%xmm9 - movdqa %xmm0,%xmm8 - psllq $5,%xmm0 - pxor %xmm0,%xmm8 -.byte 102,15,58,68,218,0 - psllq $1,%xmm0 - pxor %xmm8,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm8 - pslldq $8,%xmm0 - psrldq $8,%xmm8 - pxor %xmm9,%xmm0 - pshufd $78,%xmm5,%xmm4 - pxor %xmm8,%xmm1 - pxor %xmm5,%xmm4 - - movdqa %xmm0,%xmm9 - psrlq $1,%xmm0 -.byte 102,15,58,68,234,17 - pxor %xmm9,%xmm1 - pxor %xmm0,%xmm9 - psrlq $5,%xmm0 - pxor %xmm9,%xmm0 - leaq 32(%rdx),%rdx - psrlq $1,%xmm0 -.byte 102,15,58,68,231,0 - pxor %xmm1,%xmm0 - - subq $0x20,%rcx - ja L$mod_loop - -L$even_tail: - movdqa %xmm0,%xmm1 - movdqa %xmm4,%xmm8 - pshufd $78,%xmm0,%xmm4 - pxor %xmm0,%xmm4 - -.byte 102,15,58,68,198,0 -.byte 102,15,58,68,206,17 -.byte 102,15,58,68,231,16 - - pxor %xmm3,%xmm0 - pxor %xmm5,%xmm1 - pxor %xmm0,%xmm8 - pxor %xmm1,%xmm8 - pxor %xmm8,%xmm4 - movdqa %xmm4,%xmm8 - psrldq $8,%xmm8 - pslldq $8,%xmm4 - pxor %xmm8,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 - testq %rcx,%rcx - jnz L$done - -L$odd_tail: - movdqu (%rdx),%xmm8 -.byte 102,69,15,56,0,194 - pxor %xmm8,%xmm0 - movdqa %xmm0,%xmm1 - pshufd $78,%xmm0,%xmm3 - pxor %xmm0,%xmm3 -.byte 102,15,58,68,194,0 -.byte 102,15,58,68,202,17 -.byte 102,15,58,68,223,0 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 - - movdqa %xmm3,%xmm4 - psrldq $8,%xmm3 - pslldq $8,%xmm4 - pxor %xmm3,%xmm1 - pxor %xmm4,%xmm0 - - movdqa %xmm0,%xmm4 - movdqa %xmm0,%xmm3 - psllq $5,%xmm0 - pxor %xmm0,%xmm3 - psllq $1,%xmm0 - pxor %xmm3,%xmm0 - psllq $57,%xmm0 - movdqa %xmm0,%xmm3 - pslldq $8,%xmm0 - psrldq $8,%xmm3 - pxor %xmm4,%xmm0 - pxor %xmm3,%xmm1 - - - movdqa %xmm0,%xmm4 - psrlq $1,%xmm0 - pxor %xmm4,%xmm1 - pxor %xmm0,%xmm4 - psrlq $5,%xmm0 - pxor %xmm4,%xmm0 - psrlq $1,%xmm0 - pxor %xmm1,%xmm0 -L$done: -.byte 102,65,15,56,0,194 - movdqu %xmm0,(%rdi) - .byte 0xf3,0xc3 - - -.globl _gcm_init_avx -.private_extern _gcm_init_avx - -.p2align 5 -_gcm_init_avx: - - vzeroupper - - vmovdqu (%rsi),%xmm2 - vpshufd $78,%xmm2,%xmm2 - - - vpshufd $255,%xmm2,%xmm4 - vpsrlq $63,%xmm2,%xmm3 - vpsllq $1,%xmm2,%xmm2 - vpxor %xmm5,%xmm5,%xmm5 - vpcmpgtd %xmm4,%xmm5,%xmm5 - vpslldq $8,%xmm3,%xmm3 - vpor %xmm3,%xmm2,%xmm2 - - - vpand L$0x1c2_polynomial(%rip),%xmm5,%xmm5 - vpxor %xmm5,%xmm2,%xmm2 - - vpunpckhqdq %xmm2,%xmm2,%xmm6 - vmovdqa %xmm2,%xmm0 - vpxor %xmm2,%xmm6,%xmm6 - movq $4,%r10 - jmp L$init_start_avx -.p2align 5 -L$init_loop_avx: - vpalignr $8,%xmm3,%xmm4,%xmm5 - vmovdqu %xmm5,-16(%rdi) - vpunpckhqdq %xmm0,%xmm0,%xmm3 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 - vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 - vpxor %xmm0,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - - vpslldq $8,%xmm3,%xmm4 - vpsrldq $8,%xmm3,%xmm3 - vpxor %xmm4,%xmm0,%xmm0 - vpxor %xmm3,%xmm1,%xmm1 - vpsllq $57,%xmm0,%xmm3 - vpsllq $62,%xmm0,%xmm4 - vpxor %xmm3,%xmm4,%xmm4 - vpsllq $63,%xmm0,%xmm3 - vpxor %xmm3,%xmm4,%xmm4 - vpslldq $8,%xmm4,%xmm3 - vpsrldq $8,%xmm4,%xmm4 - vpxor %xmm3,%xmm0,%xmm0 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrlq $1,%xmm0,%xmm4 - vpxor %xmm0,%xmm1,%xmm1 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $5,%xmm4,%xmm4 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $1,%xmm0,%xmm0 - vpxor %xmm1,%xmm0,%xmm0 -L$init_start_avx: - vmovdqa %xmm0,%xmm5 - vpunpckhqdq %xmm0,%xmm0,%xmm3 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 - vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 - vpxor %xmm0,%xmm1,%xmm4 - vpxor %xmm4,%xmm3,%xmm3 - - vpslldq $8,%xmm3,%xmm4 - vpsrldq $8,%xmm3,%xmm3 - vpxor %xmm4,%xmm0,%xmm0 - vpxor %xmm3,%xmm1,%xmm1 - vpsllq $57,%xmm0,%xmm3 - vpsllq $62,%xmm0,%xmm4 - vpxor %xmm3,%xmm4,%xmm4 - vpsllq $63,%xmm0,%xmm3 - vpxor %xmm3,%xmm4,%xmm4 - vpslldq $8,%xmm4,%xmm3 - vpsrldq $8,%xmm4,%xmm4 - vpxor %xmm3,%xmm0,%xmm0 - vpxor %xmm4,%xmm1,%xmm1 - - vpsrlq $1,%xmm0,%xmm4 - vpxor %xmm0,%xmm1,%xmm1 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $5,%xmm4,%xmm4 - vpxor %xmm4,%xmm0,%xmm0 - vpsrlq $1,%xmm0,%xmm0 - vpxor %xmm1,%xmm0,%xmm0 - vpshufd $78,%xmm5,%xmm3 - vpshufd $78,%xmm0,%xmm4 - vpxor %xmm5,%xmm3,%xmm3 - vmovdqu %xmm5,0(%rdi) - vpxor %xmm0,%xmm4,%xmm4 - vmovdqu %xmm0,16(%rdi) - leaq 48(%rdi),%rdi - subq $1,%r10 - jnz L$init_loop_avx - - vpalignr $8,%xmm4,%xmm3,%xmm5 - vmovdqu %xmm5,-16(%rdi) - - vzeroupper - .byte 0xf3,0xc3 - - -.globl _gcm_gmult_avx -.private_extern _gcm_gmult_avx - -.p2align 5 -_gcm_gmult_avx: - - jmp L$_gmult_clmul - - -.globl _gcm_ghash_avx -.private_extern _gcm_ghash_avx - -.p2align 5 -_gcm_ghash_avx: - - vzeroupper - - vmovdqu (%rdi),%xmm10 - leaq L$0x1c2_polynomial(%rip),%r10 - leaq 64(%rsi),%rsi - vmovdqu L$bswap_mask(%rip),%xmm13 - vpshufb %xmm13,%xmm10,%xmm10 - cmpq $0x80,%rcx - jb L$short_avx - subq $0x80,%rcx - - vmovdqu 112(%rdx),%xmm14 - vmovdqu 0-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm14 - vmovdqu 32-64(%rsi),%xmm7 - - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vmovdqu 96(%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm14,%xmm9,%xmm9 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 16-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vmovdqu 80(%rdx),%xmm14 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - - vpshufb %xmm13,%xmm14,%xmm14 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 48-64(%rsi),%xmm6 - vpxor %xmm14,%xmm9,%xmm9 - vmovdqu 64(%rdx),%xmm15 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 80-64(%rsi),%xmm7 - - vpshufb %xmm13,%xmm15,%xmm15 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm1,%xmm4,%xmm4 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 64-64(%rsi),%xmm6 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - - vmovdqu 48(%rdx),%xmm14 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb %xmm13,%xmm14,%xmm14 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 96-64(%rsi),%xmm6 - vpxor %xmm5,%xmm2,%xmm2 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 128-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - - vmovdqu 32(%rdx),%xmm15 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm1,%xmm4,%xmm4 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 112-64(%rsi),%xmm6 - vpxor %xmm2,%xmm5,%xmm5 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - - vmovdqu 16(%rdx),%xmm14 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpxor %xmm4,%xmm1,%xmm1 - vpshufb %xmm13,%xmm14,%xmm14 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 144-64(%rsi),%xmm6 - vpxor %xmm5,%xmm2,%xmm2 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 176-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - - vmovdqu (%rdx),%xmm15 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm1,%xmm4,%xmm4 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 160-64(%rsi),%xmm6 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 - - leaq 128(%rdx),%rdx - cmpq $0x80,%rcx - jb L$tail_avx - - vpxor %xmm10,%xmm15,%xmm15 - subq $0x80,%rcx - jmp L$oop8x_avx - -.p2align 5 -L$oop8x_avx: - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vmovdqu 112(%rdx),%xmm14 - vpxor %xmm0,%xmm3,%xmm3 - vpxor %xmm15,%xmm8,%xmm8 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10 - vpshufb %xmm13,%xmm14,%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11 - vmovdqu 0-64(%rsi),%xmm6 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12 - vmovdqu 32-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - - vmovdqu 96(%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpxor %xmm3,%xmm10,%xmm10 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vxorps %xmm4,%xmm11,%xmm11 - vmovdqu 16-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm5,%xmm12,%xmm12 - vxorps %xmm15,%xmm8,%xmm8 - - vmovdqu 80(%rdx),%xmm14 - vpxor %xmm10,%xmm12,%xmm12 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpxor %xmm11,%xmm12,%xmm12 - vpslldq $8,%xmm12,%xmm9 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vpsrldq $8,%xmm12,%xmm12 - vpxor %xmm9,%xmm10,%xmm10 - vmovdqu 48-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm14 - vxorps %xmm12,%xmm11,%xmm11 - vpxor %xmm1,%xmm4,%xmm4 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 80-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu 64(%rdx),%xmm15 - vpalignr $8,%xmm10,%xmm10,%xmm12 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpshufb %xmm13,%xmm15,%xmm15 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 64-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vxorps %xmm15,%xmm8,%xmm8 - vpxor %xmm5,%xmm2,%xmm2 - - vmovdqu 48(%rdx),%xmm14 - vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpshufb %xmm13,%xmm14,%xmm14 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 96-64(%rsi),%xmm6 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 128-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu 32(%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpshufb %xmm13,%xmm15,%xmm15 - vpxor %xmm3,%xmm0,%xmm0 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 112-64(%rsi),%xmm6 - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm4,%xmm1,%xmm1 - vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 - vpxor %xmm15,%xmm8,%xmm8 - vpxor %xmm5,%xmm2,%xmm2 - vxorps %xmm12,%xmm10,%xmm10 - - vmovdqu 16(%rdx),%xmm14 - vpalignr $8,%xmm10,%xmm10,%xmm12 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 - vpshufb %xmm13,%xmm14,%xmm14 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 - vmovdqu 144-64(%rsi),%xmm6 - vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 - vxorps %xmm11,%xmm12,%xmm12 - vpunpckhqdq %xmm14,%xmm14,%xmm9 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 - vmovdqu 176-64(%rsi),%xmm7 - vpxor %xmm14,%xmm9,%xmm9 - vpxor %xmm2,%xmm5,%xmm5 - - vmovdqu (%rdx),%xmm15 - vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 - vpshufb %xmm13,%xmm15,%xmm15 - vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 - vmovdqu 160-64(%rsi),%xmm6 - vpxor %xmm12,%xmm15,%xmm15 - vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 - vpxor %xmm10,%xmm15,%xmm15 - - leaq 128(%rdx),%rdx - subq $0x80,%rcx - jnc L$oop8x_avx - - addq $0x80,%rcx - jmp L$tail_no_xor_avx - -.p2align 5 -L$short_avx: - vmovdqu -16(%rdx,%rcx,1),%xmm14 - leaq (%rdx,%rcx,1),%rdx - vmovdqu 0-64(%rsi),%xmm6 - vmovdqu 32-64(%rsi),%xmm7 - vpshufb %xmm13,%xmm14,%xmm15 - - vmovdqa %xmm0,%xmm3 - vmovdqa %xmm1,%xmm4 - vmovdqa %xmm2,%xmm5 - subq $0x10,%rcx - jz L$tail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -32(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 16-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vpsrldq $8,%xmm7,%xmm7 - subq $0x10,%rcx - jz L$tail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -48(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 48-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vmovdqu 80-64(%rsi),%xmm7 - subq $0x10,%rcx - jz L$tail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -64(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 64-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vpsrldq $8,%xmm7,%xmm7 - subq $0x10,%rcx - jz L$tail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -80(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 96-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vmovdqu 128-64(%rsi),%xmm7 - subq $0x10,%rcx - jz L$tail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -96(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 112-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vpsrldq $8,%xmm7,%xmm7 - subq $0x10,%rcx - jz L$tail_avx - - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vmovdqu -112(%rdx),%xmm14 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vmovdqu 144-64(%rsi),%xmm6 - vpshufb %xmm13,%xmm14,%xmm15 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - vmovq 184-64(%rsi),%xmm7 - subq $0x10,%rcx - jmp L$tail_avx - -.p2align 5 -L$tail_avx: - vpxor %xmm10,%xmm15,%xmm15 -L$tail_no_xor_avx: - vpunpckhqdq %xmm15,%xmm15,%xmm8 - vpxor %xmm0,%xmm3,%xmm3 - vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 - vpxor %xmm15,%xmm8,%xmm8 - vpxor %xmm1,%xmm4,%xmm4 - vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 - vpxor %xmm2,%xmm5,%xmm5 - vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 - - vmovdqu (%r10),%xmm12 - - vpxor %xmm0,%xmm3,%xmm10 - vpxor %xmm1,%xmm4,%xmm11 - vpxor %xmm2,%xmm5,%xmm5 - - vpxor %xmm10,%xmm5,%xmm5 - vpxor %xmm11,%xmm5,%xmm5 - vpslldq $8,%xmm5,%xmm9 - vpsrldq $8,%xmm5,%xmm5 - vpxor %xmm9,%xmm10,%xmm10 - vpxor %xmm5,%xmm11,%xmm11 - - vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 - vpalignr $8,%xmm10,%xmm10,%xmm10 - vpxor %xmm9,%xmm10,%xmm10 - - vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 - vpalignr $8,%xmm10,%xmm10,%xmm10 - vpxor %xmm11,%xmm10,%xmm10 - vpxor %xmm9,%xmm10,%xmm10 - - cmpq $0,%rcx - jne L$short_avx - - vpshufb %xmm13,%xmm10,%xmm10 - vmovdqu %xmm10,(%rdi) - vzeroupper - .byte 0xf3,0xc3 - - -.p2align 6 -L$bswap_mask: -.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -L$0x1c2_polynomial: -.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 -L$7_mask: -.long 7,0,7,0 -L$7_mask_poly: -.long 7,0,450,0 -.p2align 6 - -L$rem_4bit: -.long 0,0,0,471859200,0,943718400,0,610271232 -.long 0,1887436800,0,1822425088,0,1220542464,0,1423966208 -.long 0,3774873600,0,4246732800,0,3644850176,0,3311403008 -.long 0,2441084928,0,2376073216,0,2847932416,0,3051356160 - -L$rem_8bit: -.value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E -.value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E -.value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E -.value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E -.value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E -.value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E -.value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E -.value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E -.value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE -.value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE -.value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE -.value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE -.value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E -.value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E -.value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE -.value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE -.value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E -.value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E -.value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E -.value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E -.value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E -.value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E -.value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E -.value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E -.value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE -.value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE -.value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE -.value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE -.value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E -.value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E -.value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE -.value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE - -.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.p2align 6 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/md5-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/md5-x86_64.S deleted file mode 100644 index cfb4180da3..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/md5-x86_64.S +++ /dev/null @@ -1,696 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text -.p2align 4 - -.globl _md5_block_asm_data_order -.private_extern _md5_block_asm_data_order - -_md5_block_asm_data_order: - - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r14 - - pushq %r15 - -L$prologue: - - - - - movq %rdi,%rbp - shlq $6,%rdx - leaq (%rsi,%rdx,1),%rdi - movl 0(%rbp),%eax - movl 4(%rbp),%ebx - movl 8(%rbp),%ecx - movl 12(%rbp),%edx - - - - - - - - cmpq %rdi,%rsi - je L$end - - -L$loop: - movl %eax,%r8d - movl %ebx,%r9d - movl %ecx,%r14d - movl %edx,%r15d - movl 0(%rsi),%r10d - movl %edx,%r11d - xorl %ecx,%r11d - leal -680876936(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 4(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal -389564586(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 8(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal 606105819(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 12(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal -1044525330(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 16(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - xorl %ecx,%r11d - leal -176418897(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 20(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal 1200080426(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 24(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal -1473231341(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 28(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal -45705983(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 32(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - xorl %ecx,%r11d - leal 1770035416(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 36(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal -1958414417(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 40(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal -42063(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 44(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal -1990404162(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 48(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - xorl %ecx,%r11d - leal 1804603682(%rax,%r10,1),%eax - andl %ebx,%r11d - xorl %edx,%r11d - movl 52(%rsi),%r10d - addl %r11d,%eax - roll $7,%eax - movl %ecx,%r11d - addl %ebx,%eax - xorl %ebx,%r11d - leal -40341101(%rdx,%r10,1),%edx - andl %eax,%r11d - xorl %ecx,%r11d - movl 56(%rsi),%r10d - addl %r11d,%edx - roll $12,%edx - movl %ebx,%r11d - addl %eax,%edx - xorl %eax,%r11d - leal -1502002290(%rcx,%r10,1),%ecx - andl %edx,%r11d - xorl %ebx,%r11d - movl 60(%rsi),%r10d - addl %r11d,%ecx - roll $17,%ecx - movl %eax,%r11d - addl %edx,%ecx - xorl %edx,%r11d - leal 1236535329(%rbx,%r10,1),%ebx - andl %ecx,%r11d - xorl %eax,%r11d - movl 0(%rsi),%r10d - addl %r11d,%ebx - roll $22,%ebx - movl %edx,%r11d - addl %ecx,%ebx - movl 4(%rsi),%r10d - movl %edx,%r11d - movl %edx,%r12d - notl %r11d - leal -165796510(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 24(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal -1069501632(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 44(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal 643717713(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 0(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal -373897302(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 20(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - notl %r11d - leal -701558691(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 40(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal 38016083(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 60(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal -660478335(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 16(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal -405537848(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 36(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - notl %r11d - leal 568446438(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 56(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal -1019803690(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 12(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal -187363961(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 32(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal 1163531501(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 52(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - notl %r11d - leal -1444681467(%rax,%r10,1),%eax - andl %ebx,%r12d - andl %ecx,%r11d - movl 8(%rsi),%r10d - orl %r11d,%r12d - movl %ecx,%r11d - addl %r12d,%eax - movl %ecx,%r12d - roll $5,%eax - addl %ebx,%eax - notl %r11d - leal -51403784(%rdx,%r10,1),%edx - andl %eax,%r12d - andl %ebx,%r11d - movl 28(%rsi),%r10d - orl %r11d,%r12d - movl %ebx,%r11d - addl %r12d,%edx - movl %ebx,%r12d - roll $9,%edx - addl %eax,%edx - notl %r11d - leal 1735328473(%rcx,%r10,1),%ecx - andl %edx,%r12d - andl %eax,%r11d - movl 48(%rsi),%r10d - orl %r11d,%r12d - movl %eax,%r11d - addl %r12d,%ecx - movl %eax,%r12d - roll $14,%ecx - addl %edx,%ecx - notl %r11d - leal -1926607734(%rbx,%r10,1),%ebx - andl %ecx,%r12d - andl %edx,%r11d - movl 0(%rsi),%r10d - orl %r11d,%r12d - movl %edx,%r11d - addl %r12d,%ebx - movl %edx,%r12d - roll $20,%ebx - addl %ecx,%ebx - movl 20(%rsi),%r10d - movl %ecx,%r11d - leal -378558(%rax,%r10,1),%eax - movl 32(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal -2022574463(%rdx,%r10,1),%edx - movl 44(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal 1839030562(%rcx,%r10,1),%ecx - movl 56(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal -35309556(%rbx,%r10,1),%ebx - movl 4(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - leal -1530992060(%rax,%r10,1),%eax - movl 16(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal 1272893353(%rdx,%r10,1),%edx - movl 28(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal -155497632(%rcx,%r10,1),%ecx - movl 40(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal -1094730640(%rbx,%r10,1),%ebx - movl 52(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - leal 681279174(%rax,%r10,1),%eax - movl 0(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal -358537222(%rdx,%r10,1),%edx - movl 12(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal -722521979(%rcx,%r10,1),%ecx - movl 24(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal 76029189(%rbx,%r10,1),%ebx - movl 36(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - leal -640364487(%rax,%r10,1),%eax - movl 48(%rsi),%r10d - xorl %edx,%r11d - xorl %ebx,%r11d - addl %r11d,%eax - roll $4,%eax - movl %ebx,%r11d - addl %ebx,%eax - leal -421815835(%rdx,%r10,1),%edx - movl 60(%rsi),%r10d - xorl %ecx,%r11d - xorl %eax,%r11d - addl %r11d,%edx - roll $11,%edx - movl %eax,%r11d - addl %eax,%edx - leal 530742520(%rcx,%r10,1),%ecx - movl 8(%rsi),%r10d - xorl %ebx,%r11d - xorl %edx,%r11d - addl %r11d,%ecx - roll $16,%ecx - movl %edx,%r11d - addl %edx,%ecx - leal -995338651(%rbx,%r10,1),%ebx - movl 0(%rsi),%r10d - xorl %eax,%r11d - xorl %ecx,%r11d - addl %r11d,%ebx - roll $23,%ebx - movl %ecx,%r11d - addl %ecx,%ebx - movl 0(%rsi),%r10d - movl $0xffffffff,%r11d - xorl %edx,%r11d - leal -198630844(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 28(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal 1126891415(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 56(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal -1416354905(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 20(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal -57434055(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 48(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - leal 1700485571(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 12(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal -1894986606(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 40(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal -1051523(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 4(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal -2054922799(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 32(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - leal 1873313359(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 60(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal -30611744(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 24(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal -1560198380(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 52(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal 1309151649(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 16(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - leal -145523070(%rax,%r10,1),%eax - orl %ebx,%r11d - xorl %ecx,%r11d - addl %r11d,%eax - movl 44(%rsi),%r10d - movl $0xffffffff,%r11d - roll $6,%eax - xorl %ecx,%r11d - addl %ebx,%eax - leal -1120210379(%rdx,%r10,1),%edx - orl %eax,%r11d - xorl %ebx,%r11d - addl %r11d,%edx - movl 8(%rsi),%r10d - movl $0xffffffff,%r11d - roll $10,%edx - xorl %ebx,%r11d - addl %eax,%edx - leal 718787259(%rcx,%r10,1),%ecx - orl %edx,%r11d - xorl %eax,%r11d - addl %r11d,%ecx - movl 36(%rsi),%r10d - movl $0xffffffff,%r11d - roll $15,%ecx - xorl %eax,%r11d - addl %edx,%ecx - leal -343485551(%rbx,%r10,1),%ebx - orl %ecx,%r11d - xorl %edx,%r11d - addl %r11d,%ebx - movl 0(%rsi),%r10d - movl $0xffffffff,%r11d - roll $21,%ebx - xorl %edx,%r11d - addl %ecx,%ebx - - addl %r8d,%eax - addl %r9d,%ebx - addl %r14d,%ecx - addl %r15d,%edx - - - addq $64,%rsi - cmpq %rdi,%rsi - jb L$loop - - -L$end: - movl %eax,0(%rbp) - movl %ebx,4(%rbp) - movl %ecx,8(%rbp) - movl %edx,12(%rbp) - - movq (%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r12 - - movq 24(%rsp),%rbx - - movq 32(%rsp),%rbp - - addq $40,%rsp - -L$epilogue: - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S deleted file mode 100644 index 1f4a93132f..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S +++ /dev/null @@ -1,4467 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - -.p2align 6 -L$poly: -.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001 - -L$One: -.long 1,1,1,1,1,1,1,1 -L$Two: -.long 2,2,2,2,2,2,2,2 -L$Three: -.long 3,3,3,3,3,3,3,3 -L$ONE_mont: -.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe - - -L$ord: -.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000 -L$ordK: -.quad 0xccd1c8aaee00bc4f - - - -.globl _ecp_nistz256_neg -.private_extern _ecp_nistz256_neg - -.p2align 5 -_ecp_nistz256_neg: - - pushq %r12 - - pushq %r13 - -L$neg_body: - - xorq %r8,%r8 - xorq %r9,%r9 - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r13,%r13 - - subq 0(%rsi),%r8 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - movq %r8,%rax - sbbq 24(%rsi),%r11 - leaq L$poly(%rip),%rsi - movq %r9,%rdx - sbbq $0,%r13 - - addq 0(%rsi),%r8 - movq %r10,%rcx - adcq 8(%rsi),%r9 - adcq 16(%rsi),%r10 - movq %r11,%r12 - adcq 24(%rsi),%r11 - testq %r13,%r13 - - cmovzq %rax,%r8 - cmovzq %rdx,%r9 - movq %r8,0(%rdi) - cmovzq %rcx,%r10 - movq %r9,8(%rdi) - cmovzq %r12,%r11 - movq %r10,16(%rdi) - movq %r11,24(%rdi) - - movq 0(%rsp),%r13 - - movq 8(%rsp),%r12 - - leaq 16(%rsp),%rsp - -L$neg_epilogue: - .byte 0xf3,0xc3 - - - - - - - - -.globl _ecp_nistz256_ord_mul_mont -.private_extern _ecp_nistz256_ord_mul_mont - -.p2align 5 -_ecp_nistz256_ord_mul_mont: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$ecp_nistz256_ord_mul_montx - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$ord_mul_body: - - movq 0(%rdx),%rax - movq %rdx,%rbx - leaq L$ord(%rip),%r14 - movq L$ordK(%rip),%r15 - - - movq %rax,%rcx - mulq 0(%rsi) - movq %rax,%r8 - movq %rcx,%rax - movq %rdx,%r9 - - mulq 8(%rsi) - addq %rax,%r9 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq 16(%rsi) - addq %rax,%r10 - movq %rcx,%rax - adcq $0,%rdx - - movq %r8,%r13 - imulq %r15,%r8 - - movq %rdx,%r11 - mulq 24(%rsi) - addq %rax,%r11 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%r12 - - - mulq 0(%r14) - movq %r8,%rbp - addq %rax,%r13 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%rcx - - subq %r8,%r10 - sbbq $0,%r8 - - mulq 8(%r14) - addq %rcx,%r9 - adcq $0,%rdx - addq %rax,%r9 - movq %rbp,%rax - adcq %rdx,%r10 - movq %rbp,%rdx - adcq $0,%r8 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r11 - movq 8(%rbx),%rax - sbbq %rdx,%rbp - - addq %r8,%r11 - adcq %rbp,%r12 - adcq $0,%r13 - - - movq %rax,%rcx - mulq 0(%rsi) - addq %rax,%r9 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 8(%rsi) - addq %rbp,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 16(%rsi) - addq %rbp,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rcx,%rax - adcq $0,%rdx - - movq %r9,%rcx - imulq %r15,%r9 - - movq %rdx,%rbp - mulq 24(%rsi) - addq %rbp,%r12 - adcq $0,%rdx - xorq %r8,%r8 - addq %rax,%r12 - movq %r9,%rax - adcq %rdx,%r13 - adcq $0,%r8 - - - mulq 0(%r14) - movq %r9,%rbp - addq %rax,%rcx - movq %r9,%rax - adcq %rdx,%rcx - - subq %r9,%r11 - sbbq $0,%r9 - - mulq 8(%r14) - addq %rcx,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %rbp,%rax - adcq %rdx,%r11 - movq %rbp,%rdx - adcq $0,%r9 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r12 - movq 16(%rbx),%rax - sbbq %rdx,%rbp - - addq %r9,%r12 - adcq %rbp,%r13 - adcq $0,%r8 - - - movq %rax,%rcx - mulq 0(%rsi) - addq %rax,%r10 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 8(%rsi) - addq %rbp,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 16(%rsi) - addq %rbp,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rcx,%rax - adcq $0,%rdx - - movq %r10,%rcx - imulq %r15,%r10 - - movq %rdx,%rbp - mulq 24(%rsi) - addq %rbp,%r13 - adcq $0,%rdx - xorq %r9,%r9 - addq %rax,%r13 - movq %r10,%rax - adcq %rdx,%r8 - adcq $0,%r9 - - - mulq 0(%r14) - movq %r10,%rbp - addq %rax,%rcx - movq %r10,%rax - adcq %rdx,%rcx - - subq %r10,%r12 - sbbq $0,%r10 - - mulq 8(%r14) - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rbp,%rax - adcq %rdx,%r12 - movq %rbp,%rdx - adcq $0,%r10 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r13 - movq 24(%rbx),%rax - sbbq %rdx,%rbp - - addq %r10,%r13 - adcq %rbp,%r8 - adcq $0,%r9 - - - movq %rax,%rcx - mulq 0(%rsi) - addq %rax,%r11 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 8(%rsi) - addq %rbp,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rcx,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq 16(%rsi) - addq %rbp,%r13 - adcq $0,%rdx - addq %rax,%r13 - movq %rcx,%rax - adcq $0,%rdx - - movq %r11,%rcx - imulq %r15,%r11 - - movq %rdx,%rbp - mulq 24(%rsi) - addq %rbp,%r8 - adcq $0,%rdx - xorq %r10,%r10 - addq %rax,%r8 - movq %r11,%rax - adcq %rdx,%r9 - adcq $0,%r10 - - - mulq 0(%r14) - movq %r11,%rbp - addq %rax,%rcx - movq %r11,%rax - adcq %rdx,%rcx - - subq %r11,%r13 - sbbq $0,%r11 - - mulq 8(%r14) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rbp,%rax - adcq %rdx,%r13 - movq %rbp,%rdx - adcq $0,%r11 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r8 - sbbq %rdx,%rbp - - addq %r11,%r8 - adcq %rbp,%r9 - adcq $0,%r10 - - - movq %r12,%rsi - subq 0(%r14),%r12 - movq %r13,%r11 - sbbq 8(%r14),%r13 - movq %r8,%rcx - sbbq 16(%r14),%r8 - movq %r9,%rbp - sbbq 24(%r14),%r9 - sbbq $0,%r10 - - cmovcq %rsi,%r12 - cmovcq %r11,%r13 - cmovcq %rcx,%r8 - cmovcq %rbp,%r9 - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - movq 0(%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r13 - - movq 24(%rsp),%r12 - - movq 32(%rsp),%rbx - - movq 40(%rsp),%rbp - - leaq 48(%rsp),%rsp - -L$ord_mul_epilogue: - .byte 0xf3,0xc3 - - - - - - - - - -.globl _ecp_nistz256_ord_sqr_mont -.private_extern _ecp_nistz256_ord_sqr_mont - -.p2align 5 -_ecp_nistz256_ord_sqr_mont: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$ecp_nistz256_ord_sqr_montx - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$ord_sqr_body: - - movq 0(%rsi),%r8 - movq 8(%rsi),%rax - movq 16(%rsi),%r14 - movq 24(%rsi),%r15 - leaq L$ord(%rip),%rsi - movq %rdx,%rbx - jmp L$oop_ord_sqr - -.p2align 5 -L$oop_ord_sqr: - - movq %rax,%rbp - mulq %r8 - movq %rax,%r9 -.byte 102,72,15,110,205 - movq %r14,%rax - movq %rdx,%r10 - - mulq %r8 - addq %rax,%r10 - movq %r15,%rax -.byte 102,73,15,110,214 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %r8 - addq %rax,%r11 - movq %r15,%rax -.byte 102,73,15,110,223 - adcq $0,%rdx - movq %rdx,%r12 - - - mulq %r14 - movq %rax,%r13 - movq %r14,%rax - movq %rdx,%r14 - - - mulq %rbp - addq %rax,%r11 - movq %r15,%rax - adcq $0,%rdx - movq %rdx,%r15 - - mulq %rbp - addq %rax,%r12 - adcq $0,%rdx - - addq %r15,%r12 - adcq %rdx,%r13 - adcq $0,%r14 - - - xorq %r15,%r15 - movq %r8,%rax - addq %r9,%r9 - adcq %r10,%r10 - adcq %r11,%r11 - adcq %r12,%r12 - adcq %r13,%r13 - adcq %r14,%r14 - adcq $0,%r15 - - - mulq %rax - movq %rax,%r8 -.byte 102,72,15,126,200 - movq %rdx,%rbp - - mulq %rax - addq %rbp,%r9 - adcq %rax,%r10 -.byte 102,72,15,126,208 - adcq $0,%rdx - movq %rdx,%rbp - - mulq %rax - addq %rbp,%r11 - adcq %rax,%r12 -.byte 102,72,15,126,216 - adcq $0,%rdx - movq %rdx,%rbp - - movq %r8,%rcx - imulq 32(%rsi),%r8 - - mulq %rax - addq %rbp,%r13 - adcq %rax,%r14 - movq 0(%rsi),%rax - adcq %rdx,%r15 - - - mulq %r8 - movq %r8,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r8,%r10 - sbbq $0,%rbp - - mulq %r8 - addq %rcx,%r9 - adcq $0,%rdx - addq %rax,%r9 - movq %r8,%rax - adcq %rdx,%r10 - movq %r8,%rdx - adcq $0,%rbp - - movq %r9,%rcx - imulq 32(%rsi),%r9 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r11 - movq 0(%rsi),%rax - sbbq %rdx,%r8 - - addq %rbp,%r11 - adcq $0,%r8 - - - mulq %r9 - movq %r9,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r9,%r11 - sbbq $0,%rbp - - mulq %r9 - addq %rcx,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %r9,%rax - adcq %rdx,%r11 - movq %r9,%rdx - adcq $0,%rbp - - movq %r10,%rcx - imulq 32(%rsi),%r10 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r8 - movq 0(%rsi),%rax - sbbq %rdx,%r9 - - addq %rbp,%r8 - adcq $0,%r9 - - - mulq %r10 - movq %r10,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r10,%r8 - sbbq $0,%rbp - - mulq %r10 - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %r10,%rax - adcq %rdx,%r8 - movq %r10,%rdx - adcq $0,%rbp - - movq %r11,%rcx - imulq 32(%rsi),%r11 - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r9 - movq 0(%rsi),%rax - sbbq %rdx,%r10 - - addq %rbp,%r9 - adcq $0,%r10 - - - mulq %r11 - movq %r11,%rbp - addq %rax,%rcx - movq 8(%rsi),%rax - adcq %rdx,%rcx - - subq %r11,%r9 - sbbq $0,%rbp - - mulq %r11 - addq %rcx,%r8 - adcq $0,%rdx - addq %rax,%r8 - movq %r11,%rax - adcq %rdx,%r9 - movq %r11,%rdx - adcq $0,%rbp - - shlq $32,%rax - shrq $32,%rdx - subq %rax,%r10 - sbbq %rdx,%r11 - - addq %rbp,%r10 - adcq $0,%r11 - - - xorq %rdx,%rdx - addq %r12,%r8 - adcq %r13,%r9 - movq %r8,%r12 - adcq %r14,%r10 - adcq %r15,%r11 - movq %r9,%rax - adcq $0,%rdx - - - subq 0(%rsi),%r8 - movq %r10,%r14 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - movq %r11,%r15 - sbbq 24(%rsi),%r11 - sbbq $0,%rdx - - cmovcq %r12,%r8 - cmovncq %r9,%rax - cmovncq %r10,%r14 - cmovncq %r11,%r15 - - decq %rbx - jnz L$oop_ord_sqr - - movq %r8,0(%rdi) - movq %rax,8(%rdi) - pxor %xmm1,%xmm1 - movq %r14,16(%rdi) - pxor %xmm2,%xmm2 - movq %r15,24(%rdi) - pxor %xmm3,%xmm3 - - movq 0(%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r13 - - movq 24(%rsp),%r12 - - movq 32(%rsp),%rbx - - movq 40(%rsp),%rbp - - leaq 48(%rsp),%rsp - -L$ord_sqr_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 5 -ecp_nistz256_ord_mul_montx: - -L$ecp_nistz256_ord_mul_montx: - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$ord_mulx_body: - - movq %rdx,%rbx - movq 0(%rdx),%rdx - movq 0(%rsi),%r9 - movq 8(%rsi),%r10 - movq 16(%rsi),%r11 - movq 24(%rsi),%r12 - leaq -128(%rsi),%rsi - leaq L$ord-128(%rip),%r14 - movq L$ordK(%rip),%r15 - - - mulxq %r9,%r8,%r9 - mulxq %r10,%rcx,%r10 - mulxq %r11,%rbp,%r11 - addq %rcx,%r9 - mulxq %r12,%rcx,%r12 - movq %r8,%rdx - mulxq %r15,%rdx,%rax - adcq %rbp,%r10 - adcq %rcx,%r11 - adcq $0,%r12 - - - xorq %r13,%r13 - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r8 - adoxq %rbp,%r9 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 24+128(%r14),%rcx,%rbp - movq 8(%rbx),%rdx - adcxq %rcx,%r11 - adoxq %rbp,%r12 - adcxq %r8,%r12 - adoxq %r8,%r13 - adcq $0,%r13 - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r9,%rdx - mulxq %r15,%rdx,%rax - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - adcxq %r8,%r13 - adoxq %r8,%r8 - adcq $0,%r8 - - - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 24+128(%r14),%rcx,%rbp - movq 16(%rbx),%rdx - adcxq %rcx,%r12 - adoxq %rbp,%r13 - adcxq %r9,%r13 - adoxq %r9,%r8 - adcq $0,%r8 - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r10,%rdx - mulxq %r15,%rdx,%rax - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - adcxq %r9,%r8 - adoxq %r9,%r9 - adcq $0,%r9 - - - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 24+128(%r14),%rcx,%rbp - movq 24(%rbx),%rdx - adcxq %rcx,%r13 - adoxq %rbp,%r8 - adcxq %r10,%r8 - adoxq %r10,%r9 - adcq $0,%r9 - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r11,%rdx - mulxq %r15,%rdx,%rax - adcxq %rcx,%r8 - adoxq %rbp,%r9 - - adcxq %r10,%r9 - adoxq %r10,%r10 - adcq $0,%r10 - - - mulxq 0+128(%r14),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 8+128(%r14),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 16+128(%r14),%rcx,%rbp - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - mulxq 24+128(%r14),%rcx,%rbp - leaq 128(%r14),%r14 - movq %r12,%rbx - adcxq %rcx,%r8 - adoxq %rbp,%r9 - movq %r13,%rdx - adcxq %r11,%r9 - adoxq %r11,%r10 - adcq $0,%r10 - - - - movq %r8,%rcx - subq 0(%r14),%r12 - sbbq 8(%r14),%r13 - sbbq 16(%r14),%r8 - movq %r9,%rbp - sbbq 24(%r14),%r9 - sbbq $0,%r10 - - cmovcq %rbx,%r12 - cmovcq %rdx,%r13 - cmovcq %rcx,%r8 - cmovcq %rbp,%r9 - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - movq 0(%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r13 - - movq 24(%rsp),%r12 - - movq 32(%rsp),%rbx - - movq 40(%rsp),%rbp - - leaq 48(%rsp),%rsp - -L$ord_mulx_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 5 -ecp_nistz256_ord_sqr_montx: - -L$ecp_nistz256_ord_sqr_montx: - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$ord_sqrx_body: - - movq %rdx,%rbx - movq 0(%rsi),%rdx - movq 8(%rsi),%r14 - movq 16(%rsi),%r15 - movq 24(%rsi),%r8 - leaq L$ord(%rip),%rsi - jmp L$oop_ord_sqrx - -.p2align 5 -L$oop_ord_sqrx: - mulxq %r14,%r9,%r10 - mulxq %r15,%rcx,%r11 - movq %rdx,%rax -.byte 102,73,15,110,206 - mulxq %r8,%rbp,%r12 - movq %r14,%rdx - addq %rcx,%r10 -.byte 102,73,15,110,215 - adcq %rbp,%r11 - adcq $0,%r12 - xorq %r13,%r13 - - mulxq %r15,%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq %r8,%rcx,%rbp - movq %r15,%rdx - adcxq %rcx,%r12 - adoxq %rbp,%r13 - adcq $0,%r13 - - mulxq %r8,%rcx,%r14 - movq %rax,%rdx -.byte 102,73,15,110,216 - xorq %r15,%r15 - adcxq %r9,%r9 - adoxq %rcx,%r13 - adcxq %r10,%r10 - adoxq %r15,%r14 - - - mulxq %rdx,%r8,%rbp -.byte 102,72,15,126,202 - adcxq %r11,%r11 - adoxq %rbp,%r9 - adcxq %r12,%r12 - mulxq %rdx,%rcx,%rax -.byte 102,72,15,126,210 - adcxq %r13,%r13 - adoxq %rcx,%r10 - adcxq %r14,%r14 - mulxq %rdx,%rcx,%rbp -.byte 0x67 -.byte 102,72,15,126,218 - adoxq %rax,%r11 - adcxq %r15,%r15 - adoxq %rcx,%r12 - adoxq %rbp,%r13 - mulxq %rdx,%rcx,%rax - adoxq %rcx,%r14 - adoxq %rax,%r15 - - - movq %r8,%rdx - mulxq 32(%rsi),%rdx,%rcx - - xorq %rax,%rax - mulxq 0(%rsi),%rcx,%rbp - adcxq %rcx,%r8 - adoxq %rbp,%r9 - mulxq 8(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - mulxq 16(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - mulxq 24(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r8 - adcxq %rax,%r8 - - - movq %r9,%rdx - mulxq 32(%rsi),%rdx,%rcx - - mulxq 0(%rsi),%rcx,%rbp - adoxq %rcx,%r9 - adcxq %rbp,%r10 - mulxq 8(%rsi),%rcx,%rbp - adoxq %rcx,%r10 - adcxq %rbp,%r11 - mulxq 16(%rsi),%rcx,%rbp - adoxq %rcx,%r11 - adcxq %rbp,%r8 - mulxq 24(%rsi),%rcx,%rbp - adoxq %rcx,%r8 - adcxq %rbp,%r9 - adoxq %rax,%r9 - - - movq %r10,%rdx - mulxq 32(%rsi),%rdx,%rcx - - mulxq 0(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - mulxq 8(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r8 - mulxq 16(%rsi),%rcx,%rbp - adcxq %rcx,%r8 - adoxq %rbp,%r9 - mulxq 24(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - adcxq %rax,%r10 - - - movq %r11,%rdx - mulxq 32(%rsi),%rdx,%rcx - - mulxq 0(%rsi),%rcx,%rbp - adoxq %rcx,%r11 - adcxq %rbp,%r8 - mulxq 8(%rsi),%rcx,%rbp - adoxq %rcx,%r8 - adcxq %rbp,%r9 - mulxq 16(%rsi),%rcx,%rbp - adoxq %rcx,%r9 - adcxq %rbp,%r10 - mulxq 24(%rsi),%rcx,%rbp - adoxq %rcx,%r10 - adcxq %rbp,%r11 - adoxq %rax,%r11 - - - addq %r8,%r12 - adcq %r13,%r9 - movq %r12,%rdx - adcq %r14,%r10 - adcq %r15,%r11 - movq %r9,%r14 - adcq $0,%rax - - - subq 0(%rsi),%r12 - movq %r10,%r15 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - movq %r11,%r8 - sbbq 24(%rsi),%r11 - sbbq $0,%rax - - cmovncq %r12,%rdx - cmovncq %r9,%r14 - cmovncq %r10,%r15 - cmovncq %r11,%r8 - - decq %rbx - jnz L$oop_ord_sqrx - - movq %rdx,0(%rdi) - movq %r14,8(%rdi) - pxor %xmm1,%xmm1 - movq %r15,16(%rdi) - pxor %xmm2,%xmm2 - movq %r8,24(%rdi) - pxor %xmm3,%xmm3 - - movq 0(%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r13 - - movq 24(%rsp),%r12 - - movq 32(%rsp),%rbx - - movq 40(%rsp),%rbp - - leaq 48(%rsp),%rsp - -L$ord_sqrx_epilogue: - .byte 0xf3,0xc3 - - - - - - - - -.globl _ecp_nistz256_mul_mont -.private_extern _ecp_nistz256_mul_mont - -.p2align 5 -_ecp_nistz256_mul_mont: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx -L$mul_mont: - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$mul_body: - cmpl $0x80100,%ecx - je L$mul_montx - movq %rdx,%rbx - movq 0(%rdx),%rax - movq 0(%rsi),%r9 - movq 8(%rsi),%r10 - movq 16(%rsi),%r11 - movq 24(%rsi),%r12 - - call __ecp_nistz256_mul_montq - jmp L$mul_mont_done - -.p2align 5 -L$mul_montx: - movq %rdx,%rbx - movq 0(%rdx),%rdx - movq 0(%rsi),%r9 - movq 8(%rsi),%r10 - movq 16(%rsi),%r11 - movq 24(%rsi),%r12 - leaq -128(%rsi),%rsi - - call __ecp_nistz256_mul_montx -L$mul_mont_done: - movq 0(%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r13 - - movq 24(%rsp),%r12 - - movq 32(%rsp),%rbx - - movq 40(%rsp),%rbp - - leaq 48(%rsp),%rsp - -L$mul_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_mul_montq: - - - - movq %rax,%rbp - mulq %r9 - movq L$poly+8(%rip),%r14 - movq %rax,%r8 - movq %rbp,%rax - movq %rdx,%r9 - - mulq %r10 - movq L$poly+24(%rip),%r15 - addq %rax,%r9 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %r11 - addq %rax,%r10 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %r12 - addq %rax,%r11 - movq %r8,%rax - adcq $0,%rdx - xorq %r13,%r13 - movq %rdx,%r12 - - - - - - - - - - - movq %r8,%rbp - shlq $32,%r8 - mulq %r15 - shrq $32,%rbp - addq %r8,%r9 - adcq %rbp,%r10 - adcq %rax,%r11 - movq 8(%rbx),%rax - adcq %rdx,%r12 - adcq $0,%r13 - xorq %r8,%r8 - - - - movq %rax,%rbp - mulq 0(%rsi) - addq %rax,%r9 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 8(%rsi) - addq %rcx,%r10 - adcq $0,%rdx - addq %rax,%r10 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 16(%rsi) - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 24(%rsi) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %r9,%rax - adcq %rdx,%r13 - adcq $0,%r8 - - - - movq %r9,%rbp - shlq $32,%r9 - mulq %r15 - shrq $32,%rbp - addq %r9,%r10 - adcq %rbp,%r11 - adcq %rax,%r12 - movq 16(%rbx),%rax - adcq %rdx,%r13 - adcq $0,%r8 - xorq %r9,%r9 - - - - movq %rax,%rbp - mulq 0(%rsi) - addq %rax,%r10 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 8(%rsi) - addq %rcx,%r11 - adcq $0,%rdx - addq %rax,%r11 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 16(%rsi) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 24(%rsi) - addq %rcx,%r13 - adcq $0,%rdx - addq %rax,%r13 - movq %r10,%rax - adcq %rdx,%r8 - adcq $0,%r9 - - - - movq %r10,%rbp - shlq $32,%r10 - mulq %r15 - shrq $32,%rbp - addq %r10,%r11 - adcq %rbp,%r12 - adcq %rax,%r13 - movq 24(%rbx),%rax - adcq %rdx,%r8 - adcq $0,%r9 - xorq %r10,%r10 - - - - movq %rax,%rbp - mulq 0(%rsi) - addq %rax,%r11 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 8(%rsi) - addq %rcx,%r12 - adcq $0,%rdx - addq %rax,%r12 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 16(%rsi) - addq %rcx,%r13 - adcq $0,%rdx - addq %rax,%r13 - movq %rbp,%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq 24(%rsi) - addq %rcx,%r8 - adcq $0,%rdx - addq %rax,%r8 - movq %r11,%rax - adcq %rdx,%r9 - adcq $0,%r10 - - - - movq %r11,%rbp - shlq $32,%r11 - mulq %r15 - shrq $32,%rbp - addq %r11,%r12 - adcq %rbp,%r13 - movq %r12,%rcx - adcq %rax,%r8 - adcq %rdx,%r9 - movq %r13,%rbp - adcq $0,%r10 - - - - subq $-1,%r12 - movq %r8,%rbx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%rdx - sbbq %r15,%r9 - sbbq $0,%r10 - - cmovcq %rcx,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rbx,%r8 - movq %r13,8(%rdi) - cmovcq %rdx,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - - - - - - - - -.globl _ecp_nistz256_sqr_mont -.private_extern _ecp_nistz256_sqr_mont - -.p2align 5 -_ecp_nistz256_sqr_mont: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$sqr_body: - cmpl $0x80100,%ecx - je L$sqr_montx - movq 0(%rsi),%rax - movq 8(%rsi),%r14 - movq 16(%rsi),%r15 - movq 24(%rsi),%r8 - - call __ecp_nistz256_sqr_montq - jmp L$sqr_mont_done - -.p2align 5 -L$sqr_montx: - movq 0(%rsi),%rdx - movq 8(%rsi),%r14 - movq 16(%rsi),%r15 - movq 24(%rsi),%r8 - leaq -128(%rsi),%rsi - - call __ecp_nistz256_sqr_montx -L$sqr_mont_done: - movq 0(%rsp),%r15 - - movq 8(%rsp),%r14 - - movq 16(%rsp),%r13 - - movq 24(%rsp),%r12 - - movq 32(%rsp),%rbx - - movq 40(%rsp),%rbp - - leaq 48(%rsp),%rsp - -L$sqr_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_sqr_montq: - - movq %rax,%r13 - mulq %r14 - movq %rax,%r9 - movq %r15,%rax - movq %rdx,%r10 - - mulq %r13 - addq %rax,%r10 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %r13 - addq %rax,%r11 - movq %r15,%rax - adcq $0,%rdx - movq %rdx,%r12 - - - mulq %r14 - addq %rax,%r11 - movq %r8,%rax - adcq $0,%rdx - movq %rdx,%rbp - - mulq %r14 - addq %rax,%r12 - movq %r8,%rax - adcq $0,%rdx - addq %rbp,%r12 - movq %rdx,%r13 - adcq $0,%r13 - - - mulq %r15 - xorq %r15,%r15 - addq %rax,%r13 - movq 0(%rsi),%rax - movq %rdx,%r14 - adcq $0,%r14 - - addq %r9,%r9 - adcq %r10,%r10 - adcq %r11,%r11 - adcq %r12,%r12 - adcq %r13,%r13 - adcq %r14,%r14 - adcq $0,%r15 - - mulq %rax - movq %rax,%r8 - movq 8(%rsi),%rax - movq %rdx,%rcx - - mulq %rax - addq %rcx,%r9 - adcq %rax,%r10 - movq 16(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq %rax - addq %rcx,%r11 - adcq %rax,%r12 - movq 24(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rcx - - mulq %rax - addq %rcx,%r13 - adcq %rax,%r14 - movq %r8,%rax - adcq %rdx,%r15 - - movq L$poly+8(%rip),%rsi - movq L$poly+24(%rip),%rbp - - - - - movq %r8,%rcx - shlq $32,%r8 - mulq %rbp - shrq $32,%rcx - addq %r8,%r9 - adcq %rcx,%r10 - adcq %rax,%r11 - movq %r9,%rax - adcq $0,%rdx - - - - movq %r9,%rcx - shlq $32,%r9 - movq %rdx,%r8 - mulq %rbp - shrq $32,%rcx - addq %r9,%r10 - adcq %rcx,%r11 - adcq %rax,%r8 - movq %r10,%rax - adcq $0,%rdx - - - - movq %r10,%rcx - shlq $32,%r10 - movq %rdx,%r9 - mulq %rbp - shrq $32,%rcx - addq %r10,%r11 - adcq %rcx,%r8 - adcq %rax,%r9 - movq %r11,%rax - adcq $0,%rdx - - - - movq %r11,%rcx - shlq $32,%r11 - movq %rdx,%r10 - mulq %rbp - shrq $32,%rcx - addq %r11,%r8 - adcq %rcx,%r9 - adcq %rax,%r10 - adcq $0,%rdx - xorq %r11,%r11 - - - - addq %r8,%r12 - adcq %r9,%r13 - movq %r12,%r8 - adcq %r10,%r14 - adcq %rdx,%r15 - movq %r13,%r9 - adcq $0,%r11 - - subq $-1,%r12 - movq %r14,%r10 - sbbq %rsi,%r13 - sbbq $0,%r14 - movq %r15,%rcx - sbbq %rbp,%r15 - sbbq $0,%r11 - - cmovcq %r8,%r12 - cmovcq %r9,%r13 - movq %r12,0(%rdi) - cmovcq %r10,%r14 - movq %r13,8(%rdi) - cmovcq %rcx,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - - .byte 0xf3,0xc3 - - - -.p2align 5 -__ecp_nistz256_mul_montx: - - - - mulxq %r9,%r8,%r9 - mulxq %r10,%rcx,%r10 - movq $32,%r14 - xorq %r13,%r13 - mulxq %r11,%rbp,%r11 - movq L$poly+24(%rip),%r15 - adcq %rcx,%r9 - mulxq %r12,%rcx,%r12 - movq %r8,%rdx - adcq %rbp,%r10 - shlxq %r14,%r8,%rbp - adcq %rcx,%r11 - shrxq %r14,%r8,%rcx - adcq $0,%r12 - - - - addq %rbp,%r9 - adcq %rcx,%r10 - - mulxq %r15,%rcx,%rbp - movq 8(%rbx),%rdx - adcq %rcx,%r11 - adcq %rbp,%r12 - adcq $0,%r13 - xorq %r8,%r8 - - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r9 - adoxq %rbp,%r10 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r9,%rdx - adcxq %rcx,%r12 - shlxq %r14,%r9,%rcx - adoxq %rbp,%r13 - shrxq %r14,%r9,%rbp - - adcxq %r8,%r13 - adoxq %r8,%r8 - adcq $0,%r8 - - - - addq %rcx,%r10 - adcq %rbp,%r11 - - mulxq %r15,%rcx,%rbp - movq 16(%rbx),%rdx - adcq %rcx,%r12 - adcq %rbp,%r13 - adcq $0,%r8 - xorq %r9,%r9 - - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r10 - adoxq %rbp,%r11 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r10,%rdx - adcxq %rcx,%r13 - shlxq %r14,%r10,%rcx - adoxq %rbp,%r8 - shrxq %r14,%r10,%rbp - - adcxq %r9,%r8 - adoxq %r9,%r9 - adcq $0,%r9 - - - - addq %rcx,%r11 - adcq %rbp,%r12 - - mulxq %r15,%rcx,%rbp - movq 24(%rbx),%rdx - adcq %rcx,%r13 - adcq %rbp,%r8 - adcq $0,%r9 - xorq %r10,%r10 - - - - mulxq 0+128(%rsi),%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq 8+128(%rsi),%rcx,%rbp - adcxq %rcx,%r12 - adoxq %rbp,%r13 - - mulxq 16+128(%rsi),%rcx,%rbp - adcxq %rcx,%r13 - adoxq %rbp,%r8 - - mulxq 24+128(%rsi),%rcx,%rbp - movq %r11,%rdx - adcxq %rcx,%r8 - shlxq %r14,%r11,%rcx - adoxq %rbp,%r9 - shrxq %r14,%r11,%rbp - - adcxq %r10,%r9 - adoxq %r10,%r10 - adcq $0,%r10 - - - - addq %rcx,%r12 - adcq %rbp,%r13 - - mulxq %r15,%rcx,%rbp - movq %r12,%rbx - movq L$poly+8(%rip),%r14 - adcq %rcx,%r8 - movq %r13,%rdx - adcq %rbp,%r9 - adcq $0,%r10 - - - - xorl %eax,%eax - movq %r8,%rcx - sbbq $-1,%r12 - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%rbp - sbbq %r15,%r9 - sbbq $0,%r10 - - cmovcq %rbx,%r12 - cmovcq %rdx,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %rbp,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_sqr_montx: - - mulxq %r14,%r9,%r10 - mulxq %r15,%rcx,%r11 - xorl %eax,%eax - adcq %rcx,%r10 - mulxq %r8,%rbp,%r12 - movq %r14,%rdx - adcq %rbp,%r11 - adcq $0,%r12 - xorq %r13,%r13 - - - mulxq %r15,%rcx,%rbp - adcxq %rcx,%r11 - adoxq %rbp,%r12 - - mulxq %r8,%rcx,%rbp - movq %r15,%rdx - adcxq %rcx,%r12 - adoxq %rbp,%r13 - adcq $0,%r13 - - - mulxq %r8,%rcx,%r14 - movq 0+128(%rsi),%rdx - xorq %r15,%r15 - adcxq %r9,%r9 - adoxq %rcx,%r13 - adcxq %r10,%r10 - adoxq %r15,%r14 - - mulxq %rdx,%r8,%rbp - movq 8+128(%rsi),%rdx - adcxq %r11,%r11 - adoxq %rbp,%r9 - adcxq %r12,%r12 - mulxq %rdx,%rcx,%rax - movq 16+128(%rsi),%rdx - adcxq %r13,%r13 - adoxq %rcx,%r10 - adcxq %r14,%r14 -.byte 0x67 - mulxq %rdx,%rcx,%rbp - movq 24+128(%rsi),%rdx - adoxq %rax,%r11 - adcxq %r15,%r15 - adoxq %rcx,%r12 - movq $32,%rsi - adoxq %rbp,%r13 -.byte 0x67,0x67 - mulxq %rdx,%rcx,%rax - movq L$poly+24(%rip),%rdx - adoxq %rcx,%r14 - shlxq %rsi,%r8,%rcx - adoxq %rax,%r15 - shrxq %rsi,%r8,%rax - movq %rdx,%rbp - - - addq %rcx,%r9 - adcq %rax,%r10 - - mulxq %r8,%rcx,%r8 - adcq %rcx,%r11 - shlxq %rsi,%r9,%rcx - adcq $0,%r8 - shrxq %rsi,%r9,%rax - - - addq %rcx,%r10 - adcq %rax,%r11 - - mulxq %r9,%rcx,%r9 - adcq %rcx,%r8 - shlxq %rsi,%r10,%rcx - adcq $0,%r9 - shrxq %rsi,%r10,%rax - - - addq %rcx,%r11 - adcq %rax,%r8 - - mulxq %r10,%rcx,%r10 - adcq %rcx,%r9 - shlxq %rsi,%r11,%rcx - adcq $0,%r10 - shrxq %rsi,%r11,%rax - - - addq %rcx,%r8 - adcq %rax,%r9 - - mulxq %r11,%rcx,%r11 - adcq %rcx,%r10 - adcq $0,%r11 - - xorq %rdx,%rdx - addq %r8,%r12 - movq L$poly+8(%rip),%rsi - adcq %r9,%r13 - movq %r12,%r8 - adcq %r10,%r14 - adcq %r11,%r15 - movq %r13,%r9 - adcq $0,%rdx - - subq $-1,%r12 - movq %r14,%r10 - sbbq %rsi,%r13 - sbbq $0,%r14 - movq %r15,%r11 - sbbq %rbp,%r15 - sbbq $0,%rdx - - cmovcq %r8,%r12 - cmovcq %r9,%r13 - movq %r12,0(%rdi) - cmovcq %r10,%r14 - movq %r13,8(%rdi) - cmovcq %r11,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - - .byte 0xf3,0xc3 - - - - -.globl _ecp_nistz256_select_w5 -.private_extern _ecp_nistz256_select_w5 - -.p2align 5 -_ecp_nistz256_select_w5: - - leaq _OPENSSL_ia32cap_P(%rip),%rax - movq 8(%rax),%rax - testl $32,%eax - jnz L$avx2_select_w5 - movdqa L$One(%rip),%xmm0 - movd %edx,%xmm1 - - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - - movdqa %xmm0,%xmm8 - pshufd $0,%xmm1,%xmm1 - - movq $16,%rax -L$select_loop_sse_w5: - - movdqa %xmm8,%xmm15 - paddd %xmm0,%xmm8 - pcmpeqd %xmm1,%xmm15 - - movdqa 0(%rsi),%xmm9 - movdqa 16(%rsi),%xmm10 - movdqa 32(%rsi),%xmm11 - movdqa 48(%rsi),%xmm12 - movdqa 64(%rsi),%xmm13 - movdqa 80(%rsi),%xmm14 - leaq 96(%rsi),%rsi - - pand %xmm15,%xmm9 - pand %xmm15,%xmm10 - por %xmm9,%xmm2 - pand %xmm15,%xmm11 - por %xmm10,%xmm3 - pand %xmm15,%xmm12 - por %xmm11,%xmm4 - pand %xmm15,%xmm13 - por %xmm12,%xmm5 - pand %xmm15,%xmm14 - por %xmm13,%xmm6 - por %xmm14,%xmm7 - - decq %rax - jnz L$select_loop_sse_w5 - - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - movdqu %xmm4,32(%rdi) - movdqu %xmm5,48(%rdi) - movdqu %xmm6,64(%rdi) - movdqu %xmm7,80(%rdi) - .byte 0xf3,0xc3 - -L$SEH_end_ecp_nistz256_select_w5: - - - - -.globl _ecp_nistz256_select_w7 -.private_extern _ecp_nistz256_select_w7 - -.p2align 5 -_ecp_nistz256_select_w7: - - leaq _OPENSSL_ia32cap_P(%rip),%rax - movq 8(%rax),%rax - testl $32,%eax - jnz L$avx2_select_w7 - movdqa L$One(%rip),%xmm8 - movd %edx,%xmm1 - - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - - movdqa %xmm8,%xmm0 - pshufd $0,%xmm1,%xmm1 - movq $64,%rax - -L$select_loop_sse_w7: - movdqa %xmm8,%xmm15 - paddd %xmm0,%xmm8 - movdqa 0(%rsi),%xmm9 - movdqa 16(%rsi),%xmm10 - pcmpeqd %xmm1,%xmm15 - movdqa 32(%rsi),%xmm11 - movdqa 48(%rsi),%xmm12 - leaq 64(%rsi),%rsi - - pand %xmm15,%xmm9 - pand %xmm15,%xmm10 - por %xmm9,%xmm2 - pand %xmm15,%xmm11 - por %xmm10,%xmm3 - pand %xmm15,%xmm12 - por %xmm11,%xmm4 - prefetcht0 255(%rsi) - por %xmm12,%xmm5 - - decq %rax - jnz L$select_loop_sse_w7 - - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - movdqu %xmm4,32(%rdi) - movdqu %xmm5,48(%rdi) - .byte 0xf3,0xc3 - -L$SEH_end_ecp_nistz256_select_w7: - - - - -.p2align 5 -ecp_nistz256_avx2_select_w5: - -L$avx2_select_w5: - vzeroupper - vmovdqa L$Two(%rip),%ymm0 - - vpxor %ymm2,%ymm2,%ymm2 - vpxor %ymm3,%ymm3,%ymm3 - vpxor %ymm4,%ymm4,%ymm4 - - vmovdqa L$One(%rip),%ymm5 - vmovdqa L$Two(%rip),%ymm10 - - vmovd %edx,%xmm1 - vpermd %ymm1,%ymm2,%ymm1 - - movq $8,%rax -L$select_loop_avx2_w5: - - vmovdqa 0(%rsi),%ymm6 - vmovdqa 32(%rsi),%ymm7 - vmovdqa 64(%rsi),%ymm8 - - vmovdqa 96(%rsi),%ymm11 - vmovdqa 128(%rsi),%ymm12 - vmovdqa 160(%rsi),%ymm13 - - vpcmpeqd %ymm1,%ymm5,%ymm9 - vpcmpeqd %ymm1,%ymm10,%ymm14 - - vpaddd %ymm0,%ymm5,%ymm5 - vpaddd %ymm0,%ymm10,%ymm10 - leaq 192(%rsi),%rsi - - vpand %ymm9,%ymm6,%ymm6 - vpand %ymm9,%ymm7,%ymm7 - vpand %ymm9,%ymm8,%ymm8 - vpand %ymm14,%ymm11,%ymm11 - vpand %ymm14,%ymm12,%ymm12 - vpand %ymm14,%ymm13,%ymm13 - - vpxor %ymm6,%ymm2,%ymm2 - vpxor %ymm7,%ymm3,%ymm3 - vpxor %ymm8,%ymm4,%ymm4 - vpxor %ymm11,%ymm2,%ymm2 - vpxor %ymm12,%ymm3,%ymm3 - vpxor %ymm13,%ymm4,%ymm4 - - decq %rax - jnz L$select_loop_avx2_w5 - - vmovdqu %ymm2,0(%rdi) - vmovdqu %ymm3,32(%rdi) - vmovdqu %ymm4,64(%rdi) - vzeroupper - .byte 0xf3,0xc3 - -L$SEH_end_ecp_nistz256_avx2_select_w5: - - - - -.globl _ecp_nistz256_avx2_select_w7 -.private_extern _ecp_nistz256_avx2_select_w7 - -.p2align 5 -_ecp_nistz256_avx2_select_w7: - -L$avx2_select_w7: - vzeroupper - vmovdqa L$Three(%rip),%ymm0 - - vpxor %ymm2,%ymm2,%ymm2 - vpxor %ymm3,%ymm3,%ymm3 - - vmovdqa L$One(%rip),%ymm4 - vmovdqa L$Two(%rip),%ymm8 - vmovdqa L$Three(%rip),%ymm12 - - vmovd %edx,%xmm1 - vpermd %ymm1,%ymm2,%ymm1 - - - movq $21,%rax -L$select_loop_avx2_w7: - - vmovdqa 0(%rsi),%ymm5 - vmovdqa 32(%rsi),%ymm6 - - vmovdqa 64(%rsi),%ymm9 - vmovdqa 96(%rsi),%ymm10 - - vmovdqa 128(%rsi),%ymm13 - vmovdqa 160(%rsi),%ymm14 - - vpcmpeqd %ymm1,%ymm4,%ymm7 - vpcmpeqd %ymm1,%ymm8,%ymm11 - vpcmpeqd %ymm1,%ymm12,%ymm15 - - vpaddd %ymm0,%ymm4,%ymm4 - vpaddd %ymm0,%ymm8,%ymm8 - vpaddd %ymm0,%ymm12,%ymm12 - leaq 192(%rsi),%rsi - - vpand %ymm7,%ymm5,%ymm5 - vpand %ymm7,%ymm6,%ymm6 - vpand %ymm11,%ymm9,%ymm9 - vpand %ymm11,%ymm10,%ymm10 - vpand %ymm15,%ymm13,%ymm13 - vpand %ymm15,%ymm14,%ymm14 - - vpxor %ymm5,%ymm2,%ymm2 - vpxor %ymm6,%ymm3,%ymm3 - vpxor %ymm9,%ymm2,%ymm2 - vpxor %ymm10,%ymm3,%ymm3 - vpxor %ymm13,%ymm2,%ymm2 - vpxor %ymm14,%ymm3,%ymm3 - - decq %rax - jnz L$select_loop_avx2_w7 - - - vmovdqa 0(%rsi),%ymm5 - vmovdqa 32(%rsi),%ymm6 - - vpcmpeqd %ymm1,%ymm4,%ymm7 - - vpand %ymm7,%ymm5,%ymm5 - vpand %ymm7,%ymm6,%ymm6 - - vpxor %ymm5,%ymm2,%ymm2 - vpxor %ymm6,%ymm3,%ymm3 - - vmovdqu %ymm2,0(%rdi) - vmovdqu %ymm3,32(%rdi) - vzeroupper - .byte 0xf3,0xc3 - -L$SEH_end_ecp_nistz256_avx2_select_w7: - - -.p2align 5 -__ecp_nistz256_add_toq: - - xorq %r11,%r11 - addq 0(%rbx),%r12 - adcq 8(%rbx),%r13 - movq %r12,%rax - adcq 16(%rbx),%r8 - adcq 24(%rbx),%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_sub_fromq: - - subq 0(%rbx),%r12 - sbbq 8(%rbx),%r13 - movq %r12,%rax - sbbq 16(%rbx),%r8 - sbbq 24(%rbx),%r9 - movq %r13,%rbp - sbbq %r11,%r11 - - addq $-1,%r12 - movq %r8,%rcx - adcq %r14,%r13 - adcq $0,%r8 - movq %r9,%r10 - adcq %r15,%r9 - testq %r11,%r11 - - cmovzq %rax,%r12 - cmovzq %rbp,%r13 - movq %r12,0(%rdi) - cmovzq %rcx,%r8 - movq %r13,8(%rdi) - cmovzq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_subq: - - subq %r12,%rax - sbbq %r13,%rbp - movq %rax,%r12 - sbbq %r8,%rcx - sbbq %r9,%r10 - movq %rbp,%r13 - sbbq %r11,%r11 - - addq $-1,%rax - movq %rcx,%r8 - adcq %r14,%rbp - adcq $0,%rcx - movq %r10,%r9 - adcq %r15,%r10 - testq %r11,%r11 - - cmovnzq %rax,%r12 - cmovnzq %rbp,%r13 - cmovnzq %rcx,%r8 - cmovnzq %r10,%r9 - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_mul_by_2q: - - xorq %r11,%r11 - addq %r12,%r12 - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - -.globl _ecp_nistz256_point_double -.private_extern _ecp_nistz256_point_double - -.p2align 5 -_ecp_nistz256_point_double: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$point_doublex - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $160+8,%rsp - -L$point_doubleq_body: - -L$point_double_shortcutq: - movdqu 0(%rsi),%xmm0 - movq %rsi,%rbx - movdqu 16(%rsi),%xmm1 - movq 32+0(%rsi),%r12 - movq 32+8(%rsi),%r13 - movq 32+16(%rsi),%r8 - movq 32+24(%rsi),%r9 - movq L$poly+8(%rip),%r14 - movq L$poly+24(%rip),%r15 - movdqa %xmm0,96(%rsp) - movdqa %xmm1,96+16(%rsp) - leaq 32(%rdi),%r10 - leaq 64(%rdi),%r11 -.byte 102,72,15,110,199 -.byte 102,73,15,110,202 -.byte 102,73,15,110,211 - - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_by_2q - - movq 64+0(%rsi),%rax - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - leaq 64-0(%rsi),%rsi - leaq 64(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 0+0(%rsp),%rax - movq 8+0(%rsp),%r14 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 0(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 32(%rbx),%rax - movq 64+0(%rbx),%r9 - movq 64+8(%rbx),%r10 - movq 64+16(%rbx),%r11 - movq 64+24(%rbx),%r12 - leaq 64-0(%rbx),%rsi - leaq 32(%rbx),%rbx -.byte 102,72,15,126,215 - call __ecp_nistz256_mul_montq - call __ecp_nistz256_mul_by_2q - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_toq - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 0+0(%rsp),%rax - movq 8+0(%rsp),%r14 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 -.byte 102,72,15,126,207 - call __ecp_nistz256_sqr_montq - xorq %r9,%r9 - movq %r12,%rax - addq $-1,%r12 - movq %r13,%r10 - adcq %rsi,%r13 - movq %r14,%rcx - adcq $0,%r14 - movq %r15,%r8 - adcq %rbp,%r15 - adcq $0,%r9 - xorq %rsi,%rsi - testq $1,%rax - - cmovzq %rax,%r12 - cmovzq %r10,%r13 - cmovzq %rcx,%r14 - cmovzq %r8,%r15 - cmovzq %rsi,%r9 - - movq %r13,%rax - shrq $1,%r12 - shlq $63,%rax - movq %r14,%r10 - shrq $1,%r13 - orq %rax,%r12 - shlq $63,%r10 - movq %r15,%rcx - shrq $1,%r14 - orq %r10,%r13 - shlq $63,%rcx - movq %r12,0(%rdi) - shrq $1,%r15 - movq %r13,8(%rdi) - shlq $63,%r9 - orq %rcx,%r14 - orq %r9,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - movq 64(%rsp),%rax - leaq 64(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2q - - leaq 32(%rsp),%rbx - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_toq - - movq 96(%rsp),%rax - leaq 96(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2q - - movq 0+32(%rsp),%rax - movq 8+32(%rsp),%r14 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r15 - movq 24+32(%rsp),%r8 -.byte 102,72,15,126,199 - call __ecp_nistz256_sqr_montq - - leaq 128(%rsp),%rbx - movq %r14,%r8 - movq %r15,%r9 - movq %rsi,%r14 - movq %rbp,%r15 - call __ecp_nistz256_sub_fromq - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 0(%rsp),%rdi - call __ecp_nistz256_subq - - movq 32(%rsp),%rax - leaq 32(%rsp),%rbx - movq %r12,%r14 - xorl %ecx,%ecx - movq %r12,0+0(%rsp) - movq %r13,%r10 - movq %r13,0+8(%rsp) - cmovzq %r8,%r11 - movq %r8,0+16(%rsp) - leaq 0-0(%rsp),%rsi - cmovzq %r9,%r12 - movq %r9,0+24(%rsp) - movq %r14,%r9 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - -.byte 102,72,15,126,203 -.byte 102,72,15,126,207 - call __ecp_nistz256_sub_fromq - - leaq 160+56(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbx - - movq -8(%rsi),%rbp - - leaq (%rsi),%rsp - -L$point_doubleq_epilogue: - .byte 0xf3,0xc3 - - -.globl _ecp_nistz256_point_add -.private_extern _ecp_nistz256_point_add - -.p2align 5 -_ecp_nistz256_point_add: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$point_addx - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $576+8,%rsp - -L$point_addq_body: - - movdqu 0(%rsi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq %rsi,%rbx - movq %rdx,%rsi - movdqa %xmm0,384(%rsp) - movdqa %xmm1,384+16(%rsp) - movdqa %xmm2,416(%rsp) - movdqa %xmm3,416+16(%rsp) - movdqa %xmm4,448(%rsp) - movdqa %xmm5,448+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rsi),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rsi),%xmm3 - movq 64+0(%rsi),%rax - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,480(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,480+16(%rsp) - movdqu 64(%rsi),%xmm0 - movdqu 80(%rsi),%xmm1 - movdqa %xmm2,512(%rsp) - movdqa %xmm3,512+16(%rsp) - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - - leaq 64-0(%rsi),%rsi - movq %rax,544+0(%rsp) - movq %r14,544+8(%rsp) - movq %r15,544+16(%rsp) - movq %r8,544+24(%rsp) - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm1,%xmm4 - por %xmm1,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - movq 64+0(%rbx),%rax - movq 64+8(%rbx),%r14 - movq 64+16(%rbx),%r15 - movq 64+24(%rbx),%r8 -.byte 102,72,15,110,203 - - leaq 64-0(%rbx),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 544(%rsp),%rax - leaq 544(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq 0+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 448(%rsp),%rax - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 416(%rsp),%rax - leaq 416(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq 0+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 512(%rsp),%rax - leaq 512(%rsp),%rbx - movq 0+256(%rsp),%r9 - movq 8+256(%rsp),%r10 - leaq 0+256(%rsp),%rsi - movq 16+256(%rsp),%r11 - movq 24+256(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 224(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - orq %r13,%r12 - movdqa %xmm4,%xmm2 - orq %r8,%r12 - orq %r9,%r12 - por %xmm5,%xmm2 -.byte 102,73,15,110,220 - - movq 384(%rsp),%rax - leaq 384(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq 0+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 480(%rsp),%rax - leaq 480(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 160(%rsp),%rbx - leaq 0(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - orq %r13,%r12 - orq %r8,%r12 - orq %r9,%r12 - -.byte 102,73,15,126,208 -.byte 102,73,15,126,217 - orq %r8,%r12 -.byte 0x3e - jnz L$add_proceedq - - - - testq %r9,%r9 - jz L$add_doubleq - - - - - - -.byte 102,72,15,126,199 - pxor %xmm0,%xmm0 - movdqu %xmm0,0(%rdi) - movdqu %xmm0,16(%rdi) - movdqu %xmm0,32(%rdi) - movdqu %xmm0,48(%rdi) - movdqu %xmm0,64(%rdi) - movdqu %xmm0,80(%rdi) - jmp L$add_doneq - -.p2align 5 -L$add_doubleq: -.byte 102,72,15,126,206 -.byte 102,72,15,126,199 - addq $416,%rsp - - jmp L$point_double_shortcutq - - -.p2align 5 -L$add_proceedq: - movq 0+64(%rsp),%rax - movq 8+64(%rsp),%r14 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 448(%rsp),%rax - leaq 448(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 0+0(%rsp),%rax - movq 8+0(%rsp),%r14 - leaq 0+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 544(%rsp),%rax - leaq 544(%rsp),%rbx - movq 0+352(%rsp),%r9 - movq 8+352(%rsp),%r10 - leaq 0+352(%rsp),%rsi - movq 16+352(%rsp),%r11 - movq 24+352(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 0(%rsp),%rax - leaq 0(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 160(%rsp),%rax - leaq 160(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montq - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 96(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subq - - leaq 128(%rsp),%rbx - leaq 288(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 192+0(%rsp),%rax - movq 192+8(%rsp),%rbp - movq 192+16(%rsp),%rcx - movq 192+24(%rsp),%r10 - leaq 320(%rsp),%rdi - - call __ecp_nistz256_subq - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 128(%rsp),%rax - leaq 128(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq 0+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 320(%rsp),%rax - leaq 320(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 320(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 256(%rsp),%rbx - leaq 320(%rsp),%rdi - call __ecp_nistz256_sub_fromq - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 352(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 352+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 544(%rsp),%xmm2 - pand 544+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 480(%rsp),%xmm2 - pand 480+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 320(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 320+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 512(%rsp),%xmm2 - pand 512+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - -L$add_doneq: - leaq 576+56(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbx - - movq -8(%rsi),%rbp - - leaq (%rsi),%rsp - -L$point_addq_epilogue: - .byte 0xf3,0xc3 - - -.globl _ecp_nistz256_point_add_affine -.private_extern _ecp_nistz256_point_add_affine - -.p2align 5 -_ecp_nistz256_point_add_affine: - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$point_add_affinex - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $480+8,%rsp - -L$add_affineq_body: - - movdqu 0(%rsi),%xmm0 - movq %rdx,%rbx - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq 64+0(%rsi),%rax - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,320(%rsp) - movdqa %xmm1,320+16(%rsp) - movdqa %xmm2,352(%rsp) - movdqa %xmm3,352+16(%rsp) - movdqa %xmm4,384(%rsp) - movdqa %xmm5,384+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rbx),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rbx),%xmm1 - movdqu 32(%rbx),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rbx),%xmm3 - movdqa %xmm0,416(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,416+16(%rsp) - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - movdqa %xmm2,448(%rsp) - movdqa %xmm3,448+16(%rsp) - por %xmm2,%xmm3 - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm1,%xmm3 - - leaq 64-0(%rsi),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm3,%xmm4 - movq 0(%rbx),%rax - - movq %r12,%r9 - por %xmm3,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - movq %r13,%r10 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - movq %r14,%r11 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - - leaq 32-0(%rsp),%rsi - movq %r15,%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 320(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 384(%rsp),%rax - leaq 384(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 384(%rsp),%rax - leaq 384(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 288(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 448(%rsp),%rax - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq 0+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 352(%rsp),%rbx - leaq 96(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 0+64(%rsp),%rax - movq 8+64(%rsp),%r14 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 128(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 0+96(%rsp),%rax - movq 8+96(%rsp),%r14 - leaq 0+96(%rsp),%rsi - movq 16+96(%rsp),%r15 - movq 24+96(%rsp),%r8 - leaq 192(%rsp),%rdi - call __ecp_nistz256_sqr_montq - - movq 128(%rsp),%rax - leaq 128(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 320(%rsp),%rax - leaq 320(%rsp),%rbx - movq 0+128(%rsp),%r9 - movq 8+128(%rsp),%r10 - leaq 0+128(%rsp),%rsi - movq 16+128(%rsp),%r11 - movq 24+128(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montq - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 192(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subq - - leaq 160(%rsp),%rbx - leaq 224(%rsp),%rdi - call __ecp_nistz256_sub_fromq - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 64(%rsp),%rdi - - call __ecp_nistz256_subq - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 352(%rsp),%rax - leaq 352(%rsp),%rbx - movq 0+160(%rsp),%r9 - movq 8+160(%rsp),%r10 - leaq 0+160(%rsp),%rsi - movq 16+160(%rsp),%r11 - movq 24+160(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montq - - movq 96(%rsp),%rax - leaq 96(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq 0+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 64(%rsp),%rdi - call __ecp_nistz256_mul_montq - - leaq 32(%rsp),%rbx - leaq 256(%rsp),%rdi - call __ecp_nistz256_sub_fromq - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand L$ONE_mont(%rip),%xmm2 - pand L$ONE_mont+16(%rip),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 224(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 224+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 320(%rsp),%xmm2 - pand 320+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 256(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 256+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 352(%rsp),%xmm2 - pand 352+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - - leaq 480+56(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbx - - movq -8(%rsi),%rbp - - leaq (%rsi),%rsp - -L$add_affineq_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -__ecp_nistz256_add_tox: - - xorq %r11,%r11 - adcq 0(%rbx),%r12 - adcq 8(%rbx),%r13 - movq %r12,%rax - adcq 16(%rbx),%r8 - adcq 24(%rbx),%r9 - movq %r13,%rbp - adcq $0,%r11 - - xorq %r10,%r10 - sbbq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_sub_fromx: - - xorq %r11,%r11 - sbbq 0(%rbx),%r12 - sbbq 8(%rbx),%r13 - movq %r12,%rax - sbbq 16(%rbx),%r8 - sbbq 24(%rbx),%r9 - movq %r13,%rbp - sbbq $0,%r11 - - xorq %r10,%r10 - adcq $-1,%r12 - movq %r8,%rcx - adcq %r14,%r13 - adcq $0,%r8 - movq %r9,%r10 - adcq %r15,%r9 - - btq $0,%r11 - cmovncq %rax,%r12 - cmovncq %rbp,%r13 - movq %r12,0(%rdi) - cmovncq %rcx,%r8 - movq %r13,8(%rdi) - cmovncq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_subx: - - xorq %r11,%r11 - sbbq %r12,%rax - sbbq %r13,%rbp - movq %rax,%r12 - sbbq %r8,%rcx - sbbq %r9,%r10 - movq %rbp,%r13 - sbbq $0,%r11 - - xorq %r9,%r9 - adcq $-1,%rax - movq %rcx,%r8 - adcq %r14,%rbp - adcq $0,%rcx - movq %r10,%r9 - adcq %r15,%r10 - - btq $0,%r11 - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - cmovcq %rcx,%r8 - cmovcq %r10,%r9 - - .byte 0xf3,0xc3 - - - - -.p2align 5 -__ecp_nistz256_mul_by_2x: - - xorq %r11,%r11 - adcq %r12,%r12 - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - xorq %r10,%r10 - sbbq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - cmovcq %rbp,%r13 - movq %r12,0(%rdi) - cmovcq %rcx,%r8 - movq %r13,8(%rdi) - cmovcq %r10,%r9 - movq %r8,16(%rdi) - movq %r9,24(%rdi) - - .byte 0xf3,0xc3 - - - -.p2align 5 -ecp_nistz256_point_doublex: - -L$point_doublex: - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $160+8,%rsp - -L$point_doublex_body: - -L$point_double_shortcutx: - movdqu 0(%rsi),%xmm0 - movq %rsi,%rbx - movdqu 16(%rsi),%xmm1 - movq 32+0(%rsi),%r12 - movq 32+8(%rsi),%r13 - movq 32+16(%rsi),%r8 - movq 32+24(%rsi),%r9 - movq L$poly+8(%rip),%r14 - movq L$poly+24(%rip),%r15 - movdqa %xmm0,96(%rsp) - movdqa %xmm1,96+16(%rsp) - leaq 32(%rdi),%r10 - leaq 64(%rdi),%r11 -.byte 102,72,15,110,199 -.byte 102,73,15,110,202 -.byte 102,73,15,110,211 - - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_by_2x - - movq 64+0(%rsi),%rdx - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - leaq 64-128(%rsi),%rsi - leaq 64(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 0+0(%rsp),%rdx - movq 8+0(%rsp),%r14 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 0(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 32(%rbx),%rdx - movq 64+0(%rbx),%r9 - movq 64+8(%rbx),%r10 - movq 64+16(%rbx),%r11 - movq 64+24(%rbx),%r12 - leaq 64-128(%rbx),%rsi - leaq 32(%rbx),%rbx -.byte 102,72,15,126,215 - call __ecp_nistz256_mul_montx - call __ecp_nistz256_mul_by_2x - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_tox - - movq 96+0(%rsp),%r12 - movq 96+8(%rsp),%r13 - leaq 64(%rsp),%rbx - movq 96+16(%rsp),%r8 - movq 96+24(%rsp),%r9 - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 0+0(%rsp),%rdx - movq 8+0(%rsp),%r14 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 -.byte 102,72,15,126,207 - call __ecp_nistz256_sqr_montx - xorq %r9,%r9 - movq %r12,%rax - addq $-1,%r12 - movq %r13,%r10 - adcq %rsi,%r13 - movq %r14,%rcx - adcq $0,%r14 - movq %r15,%r8 - adcq %rbp,%r15 - adcq $0,%r9 - xorq %rsi,%rsi - testq $1,%rax - - cmovzq %rax,%r12 - cmovzq %r10,%r13 - cmovzq %rcx,%r14 - cmovzq %r8,%r15 - cmovzq %rsi,%r9 - - movq %r13,%rax - shrq $1,%r12 - shlq $63,%rax - movq %r14,%r10 - shrq $1,%r13 - orq %rax,%r12 - shlq $63,%r10 - movq %r15,%rcx - shrq $1,%r14 - orq %r10,%r13 - shlq $63,%rcx - movq %r12,0(%rdi) - shrq $1,%r15 - movq %r13,8(%rdi) - shlq $63,%r9 - orq %rcx,%r14 - orq %r9,%r15 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - movq 64(%rsp),%rdx - leaq 64(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2x - - leaq 32(%rsp),%rbx - leaq 32(%rsp),%rdi - call __ecp_nistz256_add_tox - - movq 96(%rsp),%rdx - leaq 96(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_by_2x - - movq 0+32(%rsp),%rdx - movq 8+32(%rsp),%r14 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r15 - movq 24+32(%rsp),%r8 -.byte 102,72,15,126,199 - call __ecp_nistz256_sqr_montx - - leaq 128(%rsp),%rbx - movq %r14,%r8 - movq %r15,%r9 - movq %rsi,%r14 - movq %rbp,%r15 - call __ecp_nistz256_sub_fromx - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 0(%rsp),%rdi - call __ecp_nistz256_subx - - movq 32(%rsp),%rdx - leaq 32(%rsp),%rbx - movq %r12,%r14 - xorl %ecx,%ecx - movq %r12,0+0(%rsp) - movq %r13,%r10 - movq %r13,0+8(%rsp) - cmovzq %r8,%r11 - movq %r8,0+16(%rsp) - leaq 0-128(%rsp),%rsi - cmovzq %r9,%r12 - movq %r9,0+24(%rsp) - movq %r14,%r9 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - -.byte 102,72,15,126,203 -.byte 102,72,15,126,207 - call __ecp_nistz256_sub_fromx - - leaq 160+56(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbx - - movq -8(%rsi),%rbp - - leaq (%rsi),%rsp - -L$point_doublex_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -ecp_nistz256_point_addx: - -L$point_addx: - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $576+8,%rsp - -L$point_addx_body: - - movdqu 0(%rsi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq %rsi,%rbx - movq %rdx,%rsi - movdqa %xmm0,384(%rsp) - movdqa %xmm1,384+16(%rsp) - movdqa %xmm2,416(%rsp) - movdqa %xmm3,416+16(%rsp) - movdqa %xmm4,448(%rsp) - movdqa %xmm5,448+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rsi),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rsi),%xmm3 - movq 64+0(%rsi),%rdx - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,480(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,480+16(%rsp) - movdqu 64(%rsi),%xmm0 - movdqu 80(%rsi),%xmm1 - movdqa %xmm2,512(%rsp) - movdqa %xmm3,512+16(%rsp) - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - - leaq 64-128(%rsi),%rsi - movq %rdx,544+0(%rsp) - movq %r14,544+8(%rsp) - movq %r15,544+16(%rsp) - movq %r8,544+24(%rsp) - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm1,%xmm4 - por %xmm1,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - movq 64+0(%rbx),%rdx - movq 64+8(%rbx),%r14 - movq 64+16(%rbx),%r15 - movq 64+24(%rbx),%r8 -.byte 102,72,15,110,203 - - leaq 64-128(%rbx),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 544(%rsp),%rdx - leaq 544(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq -128+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 448(%rsp),%rdx - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 416(%rsp),%rdx - leaq 416(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq -128+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 224(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 512(%rsp),%rdx - leaq 512(%rsp),%rbx - movq 0+256(%rsp),%r9 - movq 8+256(%rsp),%r10 - leaq -128+256(%rsp),%rsi - movq 16+256(%rsp),%r11 - movq 24+256(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 224(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - orq %r13,%r12 - movdqa %xmm4,%xmm2 - orq %r8,%r12 - orq %r9,%r12 - por %xmm5,%xmm2 -.byte 102,73,15,110,220 - - movq 384(%rsp),%rdx - leaq 384(%rsp),%rbx - movq 0+96(%rsp),%r9 - movq 8+96(%rsp),%r10 - leaq -128+96(%rsp),%rsi - movq 16+96(%rsp),%r11 - movq 24+96(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 480(%rsp),%rdx - leaq 480(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 160(%rsp),%rbx - leaq 0(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - orq %r13,%r12 - orq %r8,%r12 - orq %r9,%r12 - -.byte 102,73,15,126,208 -.byte 102,73,15,126,217 - orq %r8,%r12 -.byte 0x3e - jnz L$add_proceedx - - - - testq %r9,%r9 - jz L$add_doublex - - - - - - -.byte 102,72,15,126,199 - pxor %xmm0,%xmm0 - movdqu %xmm0,0(%rdi) - movdqu %xmm0,16(%rdi) - movdqu %xmm0,32(%rdi) - movdqu %xmm0,48(%rdi) - movdqu %xmm0,64(%rdi) - movdqu %xmm0,80(%rdi) - jmp L$add_donex - -.p2align 5 -L$add_doublex: -.byte 102,72,15,126,206 -.byte 102,72,15,126,199 - addq $416,%rsp - - jmp L$point_double_shortcutx - - -.p2align 5 -L$add_proceedx: - movq 0+64(%rsp),%rdx - movq 8+64(%rsp),%r14 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 96(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 448(%rsp),%rdx - leaq 448(%rsp),%rbx - movq 0+0(%rsp),%r9 - movq 8+0(%rsp),%r10 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r11 - movq 24+0(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 0+0(%rsp),%rdx - movq 8+0(%rsp),%r14 - leaq -128+0(%rsp),%rsi - movq 16+0(%rsp),%r15 - movq 24+0(%rsp),%r8 - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 544(%rsp),%rdx - leaq 544(%rsp),%rbx - movq 0+352(%rsp),%r9 - movq 8+352(%rsp),%r10 - leaq -128+352(%rsp),%rsi - movq 16+352(%rsp),%r11 - movq 24+352(%rsp),%r12 - leaq 352(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 0(%rsp),%rdx - leaq 0(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 128(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 160(%rsp),%rdx - leaq 160(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 192(%rsp),%rdi - call __ecp_nistz256_mul_montx - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 96(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subx - - leaq 128(%rsp),%rbx - leaq 288(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 192+0(%rsp),%rax - movq 192+8(%rsp),%rbp - movq 192+16(%rsp),%rcx - movq 192+24(%rsp),%r10 - leaq 320(%rsp),%rdi - - call __ecp_nistz256_subx - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 128(%rsp),%rdx - leaq 128(%rsp),%rbx - movq 0+224(%rsp),%r9 - movq 8+224(%rsp),%r10 - leaq -128+224(%rsp),%rsi - movq 16+224(%rsp),%r11 - movq 24+224(%rsp),%r12 - leaq 256(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 320(%rsp),%rdx - leaq 320(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 320(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 256(%rsp),%rbx - leaq 320(%rsp),%rdi - call __ecp_nistz256_sub_fromx - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 352(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 352+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 544(%rsp),%xmm2 - pand 544+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 480(%rsp),%xmm2 - pand 480+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 320(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 320+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 512(%rsp),%xmm2 - pand 512+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - -L$add_donex: - leaq 576+56(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbx - - movq -8(%rsi),%rbp - - leaq (%rsi),%rsp - -L$point_addx_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -ecp_nistz256_point_add_affinex: - -L$point_add_affinex: - pushq %rbp - - pushq %rbx - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - subq $480+8,%rsp - -L$add_affinex_body: - - movdqu 0(%rsi),%xmm0 - movq %rdx,%rbx - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 - movdqu 48(%rsi),%xmm3 - movdqu 64(%rsi),%xmm4 - movdqu 80(%rsi),%xmm5 - movq 64+0(%rsi),%rdx - movq 64+8(%rsi),%r14 - movq 64+16(%rsi),%r15 - movq 64+24(%rsi),%r8 - movdqa %xmm0,320(%rsp) - movdqa %xmm1,320+16(%rsp) - movdqa %xmm2,352(%rsp) - movdqa %xmm3,352+16(%rsp) - movdqa %xmm4,384(%rsp) - movdqa %xmm5,384+16(%rsp) - por %xmm4,%xmm5 - - movdqu 0(%rbx),%xmm0 - pshufd $0xb1,%xmm5,%xmm3 - movdqu 16(%rbx),%xmm1 - movdqu 32(%rbx),%xmm2 - por %xmm3,%xmm5 - movdqu 48(%rbx),%xmm3 - movdqa %xmm0,416(%rsp) - pshufd $0x1e,%xmm5,%xmm4 - movdqa %xmm1,416+16(%rsp) - por %xmm0,%xmm1 -.byte 102,72,15,110,199 - movdqa %xmm2,448(%rsp) - movdqa %xmm3,448+16(%rsp) - por %xmm2,%xmm3 - por %xmm4,%xmm5 - pxor %xmm4,%xmm4 - por %xmm1,%xmm3 - - leaq 64-128(%rsi),%rsi - leaq 32(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - pcmpeqd %xmm4,%xmm5 - pshufd $0xb1,%xmm3,%xmm4 - movq 0(%rbx),%rdx - - movq %r12,%r9 - por %xmm3,%xmm4 - pshufd $0,%xmm5,%xmm5 - pshufd $0x1e,%xmm4,%xmm3 - movq %r13,%r10 - por %xmm3,%xmm4 - pxor %xmm3,%xmm3 - movq %r14,%r11 - pcmpeqd %xmm3,%xmm4 - pshufd $0,%xmm4,%xmm4 - - leaq 32-128(%rsp),%rsi - movq %r15,%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 320(%rsp),%rbx - leaq 64(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 384(%rsp),%rdx - leaq 384(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 384(%rsp),%rdx - leaq 384(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 288(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 448(%rsp),%rdx - leaq 448(%rsp),%rbx - movq 0+32(%rsp),%r9 - movq 8+32(%rsp),%r10 - leaq -128+32(%rsp),%rsi - movq 16+32(%rsp),%r11 - movq 24+32(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 352(%rsp),%rbx - leaq 96(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 0+64(%rsp),%rdx - movq 8+64(%rsp),%r14 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r15 - movq 24+64(%rsp),%r8 - leaq 128(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 0+96(%rsp),%rdx - movq 8+96(%rsp),%r14 - leaq -128+96(%rsp),%rsi - movq 16+96(%rsp),%r15 - movq 24+96(%rsp),%r8 - leaq 192(%rsp),%rdi - call __ecp_nistz256_sqr_montx - - movq 128(%rsp),%rdx - leaq 128(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 160(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 320(%rsp),%rdx - leaq 320(%rsp),%rbx - movq 0+128(%rsp),%r9 - movq 8+128(%rsp),%r10 - leaq -128+128(%rsp),%rsi - movq 16+128(%rsp),%r11 - movq 24+128(%rsp),%r12 - leaq 0(%rsp),%rdi - call __ecp_nistz256_mul_montx - - - - - xorq %r11,%r11 - addq %r12,%r12 - leaq 192(%rsp),%rsi - adcq %r13,%r13 - movq %r12,%rax - adcq %r8,%r8 - adcq %r9,%r9 - movq %r13,%rbp - adcq $0,%r11 - - subq $-1,%r12 - movq %r8,%rcx - sbbq %r14,%r13 - sbbq $0,%r8 - movq %r9,%r10 - sbbq %r15,%r9 - sbbq $0,%r11 - - cmovcq %rax,%r12 - movq 0(%rsi),%rax - cmovcq %rbp,%r13 - movq 8(%rsi),%rbp - cmovcq %rcx,%r8 - movq 16(%rsi),%rcx - cmovcq %r10,%r9 - movq 24(%rsi),%r10 - - call __ecp_nistz256_subx - - leaq 160(%rsp),%rbx - leaq 224(%rsp),%rdi - call __ecp_nistz256_sub_fromx - - movq 0+0(%rsp),%rax - movq 0+8(%rsp),%rbp - movq 0+16(%rsp),%rcx - movq 0+24(%rsp),%r10 - leaq 64(%rsp),%rdi - - call __ecp_nistz256_subx - - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r8,16(%rdi) - movq %r9,24(%rdi) - movq 352(%rsp),%rdx - leaq 352(%rsp),%rbx - movq 0+160(%rsp),%r9 - movq 8+160(%rsp),%r10 - leaq -128+160(%rsp),%rsi - movq 16+160(%rsp),%r11 - movq 24+160(%rsp),%r12 - leaq 32(%rsp),%rdi - call __ecp_nistz256_mul_montx - - movq 96(%rsp),%rdx - leaq 96(%rsp),%rbx - movq 0+64(%rsp),%r9 - movq 8+64(%rsp),%r10 - leaq -128+64(%rsp),%rsi - movq 16+64(%rsp),%r11 - movq 24+64(%rsp),%r12 - leaq 64(%rsp),%rdi - call __ecp_nistz256_mul_montx - - leaq 32(%rsp),%rbx - leaq 256(%rsp),%rdi - call __ecp_nistz256_sub_fromx - -.byte 102,72,15,126,199 - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 288(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 288+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand L$ONE_mont(%rip),%xmm2 - pand L$ONE_mont+16(%rip),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 384(%rsp),%xmm2 - pand 384+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,64(%rdi) - movdqu %xmm3,80(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 224(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 224+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 416(%rsp),%xmm2 - pand 416+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 320(%rsp),%xmm2 - pand 320+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,0(%rdi) - movdqu %xmm3,16(%rdi) - - movdqa %xmm5,%xmm0 - movdqa %xmm5,%xmm1 - pandn 256(%rsp),%xmm0 - movdqa %xmm5,%xmm2 - pandn 256+16(%rsp),%xmm1 - movdqa %xmm5,%xmm3 - pand 448(%rsp),%xmm2 - pand 448+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - - movdqa %xmm4,%xmm0 - movdqa %xmm4,%xmm1 - pandn %xmm2,%xmm0 - movdqa %xmm4,%xmm2 - pandn %xmm3,%xmm1 - movdqa %xmm4,%xmm3 - pand 352(%rsp),%xmm2 - pand 352+16(%rsp),%xmm3 - por %xmm0,%xmm2 - por %xmm1,%xmm3 - movdqu %xmm2,32(%rdi) - movdqu %xmm3,48(%rdi) - - leaq 480+56(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbx - - movq -8(%rsi),%rbp - - leaq (%rsi),%rsp - -L$add_affinex_epilogue: - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S deleted file mode 100644 index 66fcfa3305..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S +++ /dev/null @@ -1,328 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.private_extern _beeu_mod_inverse_vartime -.globl _beeu_mod_inverse_vartime -.private_extern _beeu_mod_inverse_vartime -.p2align 5 -_beeu_mod_inverse_vartime: - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - pushq %rbx - - pushq %rsi - - - subq $80,%rsp - - movq %rdi,0(%rsp) - - - movq $1,%r8 - xorq %r9,%r9 - xorq %r10,%r10 - xorq %r11,%r11 - xorq %rdi,%rdi - - xorq %r12,%r12 - xorq %r13,%r13 - xorq %r14,%r14 - xorq %r15,%r15 - xorq %rbp,%rbp - - - vmovdqu 0(%rsi),%xmm0 - vmovdqu 16(%rsi),%xmm1 - vmovdqu %xmm0,48(%rsp) - vmovdqu %xmm1,64(%rsp) - - vmovdqu 0(%rdx),%xmm0 - vmovdqu 16(%rdx),%xmm1 - vmovdqu %xmm0,16(%rsp) - vmovdqu %xmm1,32(%rsp) - -L$beeu_loop: - xorq %rbx,%rbx - orq 48(%rsp),%rbx - orq 56(%rsp),%rbx - orq 64(%rsp),%rbx - orq 72(%rsp),%rbx - jz L$beeu_loop_end - - - - - - - - - - - movq $1,%rcx - - -L$beeu_shift_loop_XB: - movq %rcx,%rbx - andq 48(%rsp),%rbx - jnz L$beeu_shift_loop_end_XB - - - movq $1,%rbx - andq %r8,%rbx - jz L$shift1_0 - addq 0(%rdx),%r8 - adcq 8(%rdx),%r9 - adcq 16(%rdx),%r10 - adcq 24(%rdx),%r11 - adcq $0,%rdi - -L$shift1_0: - shrdq $1,%r9,%r8 - shrdq $1,%r10,%r9 - shrdq $1,%r11,%r10 - shrdq $1,%rdi,%r11 - shrq $1,%rdi - - shlq $1,%rcx - - - - - - cmpq $0x8000000,%rcx - jne L$beeu_shift_loop_XB - -L$beeu_shift_loop_end_XB: - bsfq %rcx,%rcx - testq %rcx,%rcx - jz L$beeu_no_shift_XB - - - - movq 8+48(%rsp),%rax - movq 16+48(%rsp),%rbx - movq 24+48(%rsp),%rsi - - shrdq %cl,%rax,0+48(%rsp) - shrdq %cl,%rbx,8+48(%rsp) - shrdq %cl,%rsi,16+48(%rsp) - - shrq %cl,%rsi - movq %rsi,24+48(%rsp) - - -L$beeu_no_shift_XB: - - movq $1,%rcx - - -L$beeu_shift_loop_YA: - movq %rcx,%rbx - andq 16(%rsp),%rbx - jnz L$beeu_shift_loop_end_YA - - - movq $1,%rbx - andq %r12,%rbx - jz L$shift1_1 - addq 0(%rdx),%r12 - adcq 8(%rdx),%r13 - adcq 16(%rdx),%r14 - adcq 24(%rdx),%r15 - adcq $0,%rbp - -L$shift1_1: - shrdq $1,%r13,%r12 - shrdq $1,%r14,%r13 - shrdq $1,%r15,%r14 - shrdq $1,%rbp,%r15 - shrq $1,%rbp - - shlq $1,%rcx - - - - - - cmpq $0x8000000,%rcx - jne L$beeu_shift_loop_YA - -L$beeu_shift_loop_end_YA: - bsfq %rcx,%rcx - testq %rcx,%rcx - jz L$beeu_no_shift_YA - - - - movq 8+16(%rsp),%rax - movq 16+16(%rsp),%rbx - movq 24+16(%rsp),%rsi - - shrdq %cl,%rax,0+16(%rsp) - shrdq %cl,%rbx,8+16(%rsp) - shrdq %cl,%rsi,16+16(%rsp) - - shrq %cl,%rsi - movq %rsi,24+16(%rsp) - - -L$beeu_no_shift_YA: - - movq 48(%rsp),%rax - movq 56(%rsp),%rbx - movq 64(%rsp),%rsi - movq 72(%rsp),%rcx - subq 16(%rsp),%rax - sbbq 24(%rsp),%rbx - sbbq 32(%rsp),%rsi - sbbq 40(%rsp),%rcx - jnc L$beeu_B_bigger_than_A - - - movq 16(%rsp),%rax - movq 24(%rsp),%rbx - movq 32(%rsp),%rsi - movq 40(%rsp),%rcx - subq 48(%rsp),%rax - sbbq 56(%rsp),%rbx - sbbq 64(%rsp),%rsi - sbbq 72(%rsp),%rcx - movq %rax,16(%rsp) - movq %rbx,24(%rsp) - movq %rsi,32(%rsp) - movq %rcx,40(%rsp) - - - addq %r8,%r12 - adcq %r9,%r13 - adcq %r10,%r14 - adcq %r11,%r15 - adcq %rdi,%rbp - jmp L$beeu_loop - -L$beeu_B_bigger_than_A: - - movq %rax,48(%rsp) - movq %rbx,56(%rsp) - movq %rsi,64(%rsp) - movq %rcx,72(%rsp) - - - addq %r12,%r8 - adcq %r13,%r9 - adcq %r14,%r10 - adcq %r15,%r11 - adcq %rbp,%rdi - - jmp L$beeu_loop - -L$beeu_loop_end: - - - - - movq 16(%rsp),%rbx - subq $1,%rbx - orq 24(%rsp),%rbx - orq 32(%rsp),%rbx - orq 40(%rsp),%rbx - - jnz L$beeu_err - - - - - movq 0(%rdx),%r8 - movq 8(%rdx),%r9 - movq 16(%rdx),%r10 - movq 24(%rdx),%r11 - xorq %rdi,%rdi - -L$beeu_reduction_loop: - movq %r12,16(%rsp) - movq %r13,24(%rsp) - movq %r14,32(%rsp) - movq %r15,40(%rsp) - movq %rbp,48(%rsp) - - - subq %r8,%r12 - sbbq %r9,%r13 - sbbq %r10,%r14 - sbbq %r11,%r15 - sbbq $0,%rbp - - - cmovcq 16(%rsp),%r12 - cmovcq 24(%rsp),%r13 - cmovcq 32(%rsp),%r14 - cmovcq 40(%rsp),%r15 - jnc L$beeu_reduction_loop - - - subq %r12,%r8 - sbbq %r13,%r9 - sbbq %r14,%r10 - sbbq %r15,%r11 - -L$beeu_save: - - movq 0(%rsp),%rdi - - movq %r8,0(%rdi) - movq %r9,8(%rdi) - movq %r10,16(%rdi) - movq %r11,24(%rdi) - - - movq $1,%rax - jmp L$beeu_finish - -L$beeu_err: - - xorq %rax,%rax - -L$beeu_finish: - addq $80,%rsp - - popq %rsi - - popq %rbx - - popq %r15 - - popq %r14 - - popq %r13 - - popq %r12 - - popq %rbp - - .byte 0xf3,0xc3 - - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S deleted file mode 100644 index f6f2be7ae1..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S +++ /dev/null @@ -1,62 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - -.globl _CRYPTO_rdrand -.private_extern _CRYPTO_rdrand - -.p2align 4 -_CRYPTO_rdrand: - - xorq %rax,%rax -.byte 72,15,199,242 - - adcq %rax,%rax - movq %rdx,0(%rdi) - .byte 0xf3,0xc3 - - - - - - - -.globl _CRYPTO_rdrand_multiple8_buf -.private_extern _CRYPTO_rdrand_multiple8_buf - -.p2align 4 -_CRYPTO_rdrand_multiple8_buf: - - testq %rsi,%rsi - jz L$out - movq $8,%rdx -L$loop: -.byte 72,15,199,241 - jnc L$err - movq %rcx,0(%rdi) - addq %rdx,%rdi - subq %rdx,%rsi - jnz L$loop -L$out: - movq $1,%rax - .byte 0xf3,0xc3 -L$err: - xorq %rax,%rax - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/rsaz-avx2.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/rsaz-avx2.S deleted file mode 100644 index e9cae78c5d..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/rsaz-avx2.S +++ /dev/null @@ -1,1748 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - -.globl _rsaz_1024_sqr_avx2 -.private_extern _rsaz_1024_sqr_avx2 - -.p2align 6 -_rsaz_1024_sqr_avx2: - - leaq (%rsp),%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - vzeroupper - movq %rax,%rbp - - movq %rdx,%r13 - subq $832,%rsp - movq %r13,%r15 - subq $-128,%rdi - subq $-128,%rsi - subq $-128,%r13 - - andq $4095,%r15 - addq $320,%r15 - shrq $12,%r15 - vpxor %ymm9,%ymm9,%ymm9 - jz L$sqr_1024_no_n_copy - - - - - - subq $320,%rsp - vmovdqu 0-128(%r13),%ymm0 - andq $-2048,%rsp - vmovdqu 32-128(%r13),%ymm1 - vmovdqu 64-128(%r13),%ymm2 - vmovdqu 96-128(%r13),%ymm3 - vmovdqu 128-128(%r13),%ymm4 - vmovdqu 160-128(%r13),%ymm5 - vmovdqu 192-128(%r13),%ymm6 - vmovdqu 224-128(%r13),%ymm7 - vmovdqu 256-128(%r13),%ymm8 - leaq 832+128(%rsp),%r13 - vmovdqu %ymm0,0-128(%r13) - vmovdqu %ymm1,32-128(%r13) - vmovdqu %ymm2,64-128(%r13) - vmovdqu %ymm3,96-128(%r13) - vmovdqu %ymm4,128-128(%r13) - vmovdqu %ymm5,160-128(%r13) - vmovdqu %ymm6,192-128(%r13) - vmovdqu %ymm7,224-128(%r13) - vmovdqu %ymm8,256-128(%r13) - vmovdqu %ymm9,288-128(%r13) - -L$sqr_1024_no_n_copy: - andq $-1024,%rsp - - vmovdqu 32-128(%rsi),%ymm1 - vmovdqu 64-128(%rsi),%ymm2 - vmovdqu 96-128(%rsi),%ymm3 - vmovdqu 128-128(%rsi),%ymm4 - vmovdqu 160-128(%rsi),%ymm5 - vmovdqu 192-128(%rsi),%ymm6 - vmovdqu 224-128(%rsi),%ymm7 - vmovdqu 256-128(%rsi),%ymm8 - - leaq 192(%rsp),%rbx - vmovdqu L$and_mask(%rip),%ymm15 - jmp L$OOP_GRANDE_SQR_1024 - -.p2align 5 -L$OOP_GRANDE_SQR_1024: - leaq 576+128(%rsp),%r9 - leaq 448(%rsp),%r12 - - - - - vpaddq %ymm1,%ymm1,%ymm1 - vpbroadcastq 0-128(%rsi),%ymm10 - vpaddq %ymm2,%ymm2,%ymm2 - vmovdqa %ymm1,0-128(%r9) - vpaddq %ymm3,%ymm3,%ymm3 - vmovdqa %ymm2,32-128(%r9) - vpaddq %ymm4,%ymm4,%ymm4 - vmovdqa %ymm3,64-128(%r9) - vpaddq %ymm5,%ymm5,%ymm5 - vmovdqa %ymm4,96-128(%r9) - vpaddq %ymm6,%ymm6,%ymm6 - vmovdqa %ymm5,128-128(%r9) - vpaddq %ymm7,%ymm7,%ymm7 - vmovdqa %ymm6,160-128(%r9) - vpaddq %ymm8,%ymm8,%ymm8 - vmovdqa %ymm7,192-128(%r9) - vpxor %ymm9,%ymm9,%ymm9 - vmovdqa %ymm8,224-128(%r9) - - vpmuludq 0-128(%rsi),%ymm10,%ymm0 - vpbroadcastq 32-128(%rsi),%ymm11 - vmovdqu %ymm9,288-192(%rbx) - vpmuludq %ymm10,%ymm1,%ymm1 - vmovdqu %ymm9,320-448(%r12) - vpmuludq %ymm10,%ymm2,%ymm2 - vmovdqu %ymm9,352-448(%r12) - vpmuludq %ymm10,%ymm3,%ymm3 - vmovdqu %ymm9,384-448(%r12) - vpmuludq %ymm10,%ymm4,%ymm4 - vmovdqu %ymm9,416-448(%r12) - vpmuludq %ymm10,%ymm5,%ymm5 - vmovdqu %ymm9,448-448(%r12) - vpmuludq %ymm10,%ymm6,%ymm6 - vmovdqu %ymm9,480-448(%r12) - vpmuludq %ymm10,%ymm7,%ymm7 - vmovdqu %ymm9,512-448(%r12) - vpmuludq %ymm10,%ymm8,%ymm8 - vpbroadcastq 64-128(%rsi),%ymm10 - vmovdqu %ymm9,544-448(%r12) - - movq %rsi,%r15 - movl $4,%r14d - jmp L$sqr_entry_1024 -.p2align 5 -L$OOP_SQR_1024: - vpbroadcastq 32-128(%r15),%ymm11 - vpmuludq 0-128(%rsi),%ymm10,%ymm0 - vpaddq 0-192(%rbx),%ymm0,%ymm0 - vpmuludq 0-128(%r9),%ymm10,%ymm1 - vpaddq 32-192(%rbx),%ymm1,%ymm1 - vpmuludq 32-128(%r9),%ymm10,%ymm2 - vpaddq 64-192(%rbx),%ymm2,%ymm2 - vpmuludq 64-128(%r9),%ymm10,%ymm3 - vpaddq 96-192(%rbx),%ymm3,%ymm3 - vpmuludq 96-128(%r9),%ymm10,%ymm4 - vpaddq 128-192(%rbx),%ymm4,%ymm4 - vpmuludq 128-128(%r9),%ymm10,%ymm5 - vpaddq 160-192(%rbx),%ymm5,%ymm5 - vpmuludq 160-128(%r9),%ymm10,%ymm6 - vpaddq 192-192(%rbx),%ymm6,%ymm6 - vpmuludq 192-128(%r9),%ymm10,%ymm7 - vpaddq 224-192(%rbx),%ymm7,%ymm7 - vpmuludq 224-128(%r9),%ymm10,%ymm8 - vpbroadcastq 64-128(%r15),%ymm10 - vpaddq 256-192(%rbx),%ymm8,%ymm8 -L$sqr_entry_1024: - vmovdqu %ymm0,0-192(%rbx) - vmovdqu %ymm1,32-192(%rbx) - - vpmuludq 32-128(%rsi),%ymm11,%ymm12 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 32-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm3,%ymm3 - vpmuludq 64-128(%r9),%ymm11,%ymm13 - vpaddq %ymm13,%ymm4,%ymm4 - vpmuludq 96-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 128-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm6,%ymm6 - vpmuludq 160-128(%r9),%ymm11,%ymm13 - vpaddq %ymm13,%ymm7,%ymm7 - vpmuludq 192-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq 224-128(%r9),%ymm11,%ymm0 - vpbroadcastq 96-128(%r15),%ymm11 - vpaddq 288-192(%rbx),%ymm0,%ymm0 - - vmovdqu %ymm2,64-192(%rbx) - vmovdqu %ymm3,96-192(%rbx) - - vpmuludq 64-128(%rsi),%ymm10,%ymm13 - vpaddq %ymm13,%ymm4,%ymm4 - vpmuludq 64-128(%r9),%ymm10,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 96-128(%r9),%ymm10,%ymm14 - vpaddq %ymm14,%ymm6,%ymm6 - vpmuludq 128-128(%r9),%ymm10,%ymm13 - vpaddq %ymm13,%ymm7,%ymm7 - vpmuludq 160-128(%r9),%ymm10,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq 192-128(%r9),%ymm10,%ymm14 - vpaddq %ymm14,%ymm0,%ymm0 - vpmuludq 224-128(%r9),%ymm10,%ymm1 - vpbroadcastq 128-128(%r15),%ymm10 - vpaddq 320-448(%r12),%ymm1,%ymm1 - - vmovdqu %ymm4,128-192(%rbx) - vmovdqu %ymm5,160-192(%rbx) - - vpmuludq 96-128(%rsi),%ymm11,%ymm12 - vpaddq %ymm12,%ymm6,%ymm6 - vpmuludq 96-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm7,%ymm7 - vpmuludq 128-128(%r9),%ymm11,%ymm13 - vpaddq %ymm13,%ymm8,%ymm8 - vpmuludq 160-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm0,%ymm0 - vpmuludq 192-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm1,%ymm1 - vpmuludq 224-128(%r9),%ymm11,%ymm2 - vpbroadcastq 160-128(%r15),%ymm11 - vpaddq 352-448(%r12),%ymm2,%ymm2 - - vmovdqu %ymm6,192-192(%rbx) - vmovdqu %ymm7,224-192(%rbx) - - vpmuludq 128-128(%rsi),%ymm10,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq 128-128(%r9),%ymm10,%ymm14 - vpaddq %ymm14,%ymm0,%ymm0 - vpmuludq 160-128(%r9),%ymm10,%ymm13 - vpaddq %ymm13,%ymm1,%ymm1 - vpmuludq 192-128(%r9),%ymm10,%ymm12 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 224-128(%r9),%ymm10,%ymm3 - vpbroadcastq 192-128(%r15),%ymm10 - vpaddq 384-448(%r12),%ymm3,%ymm3 - - vmovdqu %ymm8,256-192(%rbx) - vmovdqu %ymm0,288-192(%rbx) - leaq 8(%rbx),%rbx - - vpmuludq 160-128(%rsi),%ymm11,%ymm13 - vpaddq %ymm13,%ymm1,%ymm1 - vpmuludq 160-128(%r9),%ymm11,%ymm12 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 192-128(%r9),%ymm11,%ymm14 - vpaddq %ymm14,%ymm3,%ymm3 - vpmuludq 224-128(%r9),%ymm11,%ymm4 - vpbroadcastq 224-128(%r15),%ymm11 - vpaddq 416-448(%r12),%ymm4,%ymm4 - - vmovdqu %ymm1,320-448(%r12) - vmovdqu %ymm2,352-448(%r12) - - vpmuludq 192-128(%rsi),%ymm10,%ymm12 - vpaddq %ymm12,%ymm3,%ymm3 - vpmuludq 192-128(%r9),%ymm10,%ymm14 - vpbroadcastq 256-128(%r15),%ymm0 - vpaddq %ymm14,%ymm4,%ymm4 - vpmuludq 224-128(%r9),%ymm10,%ymm5 - vpbroadcastq 0+8-128(%r15),%ymm10 - vpaddq 448-448(%r12),%ymm5,%ymm5 - - vmovdqu %ymm3,384-448(%r12) - vmovdqu %ymm4,416-448(%r12) - leaq 8(%r15),%r15 - - vpmuludq 224-128(%rsi),%ymm11,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 224-128(%r9),%ymm11,%ymm6 - vpaddq 480-448(%r12),%ymm6,%ymm6 - - vpmuludq 256-128(%rsi),%ymm0,%ymm7 - vmovdqu %ymm5,448-448(%r12) - vpaddq 512-448(%r12),%ymm7,%ymm7 - vmovdqu %ymm6,480-448(%r12) - vmovdqu %ymm7,512-448(%r12) - leaq 8(%r12),%r12 - - decl %r14d - jnz L$OOP_SQR_1024 - - vmovdqu 256(%rsp),%ymm8 - vmovdqu 288(%rsp),%ymm1 - vmovdqu 320(%rsp),%ymm2 - leaq 192(%rsp),%rbx - - vpsrlq $29,%ymm8,%ymm14 - vpand %ymm15,%ymm8,%ymm8 - vpsrlq $29,%ymm1,%ymm11 - vpand %ymm15,%ymm1,%ymm1 - - vpermq $0x93,%ymm14,%ymm14 - vpxor %ymm9,%ymm9,%ymm9 - vpermq $0x93,%ymm11,%ymm11 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm8,%ymm8 - vpblendd $3,%ymm11,%ymm9,%ymm11 - vpaddq %ymm14,%ymm1,%ymm1 - vpaddq %ymm11,%ymm2,%ymm2 - vmovdqu %ymm1,288-192(%rbx) - vmovdqu %ymm2,320-192(%rbx) - - movq (%rsp),%rax - movq 8(%rsp),%r10 - movq 16(%rsp),%r11 - movq 24(%rsp),%r12 - vmovdqu 32(%rsp),%ymm1 - vmovdqu 64-192(%rbx),%ymm2 - vmovdqu 96-192(%rbx),%ymm3 - vmovdqu 128-192(%rbx),%ymm4 - vmovdqu 160-192(%rbx),%ymm5 - vmovdqu 192-192(%rbx),%ymm6 - vmovdqu 224-192(%rbx),%ymm7 - - movq %rax,%r9 - imull %ecx,%eax - andl $0x1fffffff,%eax - vmovd %eax,%xmm12 - - movq %rax,%rdx - imulq -128(%r13),%rax - vpbroadcastq %xmm12,%ymm12 - addq %rax,%r9 - movq %rdx,%rax - imulq 8-128(%r13),%rax - shrq $29,%r9 - addq %rax,%r10 - movq %rdx,%rax - imulq 16-128(%r13),%rax - addq %r9,%r10 - addq %rax,%r11 - imulq 24-128(%r13),%rdx - addq %rdx,%r12 - - movq %r10,%rax - imull %ecx,%eax - andl $0x1fffffff,%eax - - movl $9,%r14d - jmp L$OOP_REDUCE_1024 - -.p2align 5 -L$OOP_REDUCE_1024: - vmovd %eax,%xmm13 - vpbroadcastq %xmm13,%ymm13 - - vpmuludq 32-128(%r13),%ymm12,%ymm10 - movq %rax,%rdx - imulq -128(%r13),%rax - vpaddq %ymm10,%ymm1,%ymm1 - addq %rax,%r10 - vpmuludq 64-128(%r13),%ymm12,%ymm14 - movq %rdx,%rax - imulq 8-128(%r13),%rax - vpaddq %ymm14,%ymm2,%ymm2 - vpmuludq 96-128(%r13),%ymm12,%ymm11 -.byte 0x67 - addq %rax,%r11 -.byte 0x67 - movq %rdx,%rax - imulq 16-128(%r13),%rax - shrq $29,%r10 - vpaddq %ymm11,%ymm3,%ymm3 - vpmuludq 128-128(%r13),%ymm12,%ymm10 - addq %rax,%r12 - addq %r10,%r11 - vpaddq %ymm10,%ymm4,%ymm4 - vpmuludq 160-128(%r13),%ymm12,%ymm14 - movq %r11,%rax - imull %ecx,%eax - vpaddq %ymm14,%ymm5,%ymm5 - vpmuludq 192-128(%r13),%ymm12,%ymm11 - andl $0x1fffffff,%eax - vpaddq %ymm11,%ymm6,%ymm6 - vpmuludq 224-128(%r13),%ymm12,%ymm10 - vpaddq %ymm10,%ymm7,%ymm7 - vpmuludq 256-128(%r13),%ymm12,%ymm14 - vmovd %eax,%xmm12 - - vpaddq %ymm14,%ymm8,%ymm8 - - vpbroadcastq %xmm12,%ymm12 - - vpmuludq 32-8-128(%r13),%ymm13,%ymm11 - vmovdqu 96-8-128(%r13),%ymm14 - movq %rax,%rdx - imulq -128(%r13),%rax - vpaddq %ymm11,%ymm1,%ymm1 - vpmuludq 64-8-128(%r13),%ymm13,%ymm10 - vmovdqu 128-8-128(%r13),%ymm11 - addq %rax,%r11 - movq %rdx,%rax - imulq 8-128(%r13),%rax - vpaddq %ymm10,%ymm2,%ymm2 - addq %r12,%rax - shrq $29,%r11 - vpmuludq %ymm13,%ymm14,%ymm14 - vmovdqu 160-8-128(%r13),%ymm10 - addq %r11,%rax - vpaddq %ymm14,%ymm3,%ymm3 - vpmuludq %ymm13,%ymm11,%ymm11 - vmovdqu 192-8-128(%r13),%ymm14 -.byte 0x67 - movq %rax,%r12 - imull %ecx,%eax - vpaddq %ymm11,%ymm4,%ymm4 - vpmuludq %ymm13,%ymm10,%ymm10 -.byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 - andl $0x1fffffff,%eax - vpaddq %ymm10,%ymm5,%ymm5 - vpmuludq %ymm13,%ymm14,%ymm14 - vmovdqu 256-8-128(%r13),%ymm10 - vpaddq %ymm14,%ymm6,%ymm6 - vpmuludq %ymm13,%ymm11,%ymm11 - vmovdqu 288-8-128(%r13),%ymm9 - vmovd %eax,%xmm0 - imulq -128(%r13),%rax - vpaddq %ymm11,%ymm7,%ymm7 - vpmuludq %ymm13,%ymm10,%ymm10 - vmovdqu 32-16-128(%r13),%ymm14 - vpbroadcastq %xmm0,%ymm0 - vpaddq %ymm10,%ymm8,%ymm8 - vpmuludq %ymm13,%ymm9,%ymm9 - vmovdqu 64-16-128(%r13),%ymm11 - addq %rax,%r12 - - vmovdqu 32-24-128(%r13),%ymm13 - vpmuludq %ymm12,%ymm14,%ymm14 - vmovdqu 96-16-128(%r13),%ymm10 - vpaddq %ymm14,%ymm1,%ymm1 - vpmuludq %ymm0,%ymm13,%ymm13 - vpmuludq %ymm12,%ymm11,%ymm11 -.byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff - vpaddq %ymm1,%ymm13,%ymm13 - vpaddq %ymm11,%ymm2,%ymm2 - vpmuludq %ymm12,%ymm10,%ymm10 - vmovdqu 160-16-128(%r13),%ymm11 -.byte 0x67 - vmovq %xmm13,%rax - vmovdqu %ymm13,(%rsp) - vpaddq %ymm10,%ymm3,%ymm3 - vpmuludq %ymm12,%ymm14,%ymm14 - vmovdqu 192-16-128(%r13),%ymm10 - vpaddq %ymm14,%ymm4,%ymm4 - vpmuludq %ymm12,%ymm11,%ymm11 - vmovdqu 224-16-128(%r13),%ymm14 - vpaddq %ymm11,%ymm5,%ymm5 - vpmuludq %ymm12,%ymm10,%ymm10 - vmovdqu 256-16-128(%r13),%ymm11 - vpaddq %ymm10,%ymm6,%ymm6 - vpmuludq %ymm12,%ymm14,%ymm14 - shrq $29,%r12 - vmovdqu 288-16-128(%r13),%ymm10 - addq %r12,%rax - vpaddq %ymm14,%ymm7,%ymm7 - vpmuludq %ymm12,%ymm11,%ymm11 - - movq %rax,%r9 - imull %ecx,%eax - vpaddq %ymm11,%ymm8,%ymm8 - vpmuludq %ymm12,%ymm10,%ymm10 - andl $0x1fffffff,%eax - vmovd %eax,%xmm12 - vmovdqu 96-24-128(%r13),%ymm11 -.byte 0x67 - vpaddq %ymm10,%ymm9,%ymm9 - vpbroadcastq %xmm12,%ymm12 - - vpmuludq 64-24-128(%r13),%ymm0,%ymm14 - vmovdqu 128-24-128(%r13),%ymm10 - movq %rax,%rdx - imulq -128(%r13),%rax - movq 8(%rsp),%r10 - vpaddq %ymm14,%ymm2,%ymm1 - vpmuludq %ymm0,%ymm11,%ymm11 - vmovdqu 160-24-128(%r13),%ymm14 - addq %rax,%r9 - movq %rdx,%rax - imulq 8-128(%r13),%rax -.byte 0x67 - shrq $29,%r9 - movq 16(%rsp),%r11 - vpaddq %ymm11,%ymm3,%ymm2 - vpmuludq %ymm0,%ymm10,%ymm10 - vmovdqu 192-24-128(%r13),%ymm11 - addq %rax,%r10 - movq %rdx,%rax - imulq 16-128(%r13),%rax - vpaddq %ymm10,%ymm4,%ymm3 - vpmuludq %ymm0,%ymm14,%ymm14 - vmovdqu 224-24-128(%r13),%ymm10 - imulq 24-128(%r13),%rdx - addq %rax,%r11 - leaq (%r9,%r10,1),%rax - vpaddq %ymm14,%ymm5,%ymm4 - vpmuludq %ymm0,%ymm11,%ymm11 - vmovdqu 256-24-128(%r13),%ymm14 - movq %rax,%r10 - imull %ecx,%eax - vpmuludq %ymm0,%ymm10,%ymm10 - vpaddq %ymm11,%ymm6,%ymm5 - vmovdqu 288-24-128(%r13),%ymm11 - andl $0x1fffffff,%eax - vpaddq %ymm10,%ymm7,%ymm6 - vpmuludq %ymm0,%ymm14,%ymm14 - addq 24(%rsp),%rdx - vpaddq %ymm14,%ymm8,%ymm7 - vpmuludq %ymm0,%ymm11,%ymm11 - vpaddq %ymm11,%ymm9,%ymm8 - vmovq %r12,%xmm9 - movq %rdx,%r12 - - decl %r14d - jnz L$OOP_REDUCE_1024 - leaq 448(%rsp),%r12 - vpaddq %ymm9,%ymm13,%ymm0 - vpxor %ymm9,%ymm9,%ymm9 - - vpaddq 288-192(%rbx),%ymm0,%ymm0 - vpaddq 320-448(%r12),%ymm1,%ymm1 - vpaddq 352-448(%r12),%ymm2,%ymm2 - vpaddq 384-448(%r12),%ymm3,%ymm3 - vpaddq 416-448(%r12),%ymm4,%ymm4 - vpaddq 448-448(%r12),%ymm5,%ymm5 - vpaddq 480-448(%r12),%ymm6,%ymm6 - vpaddq 512-448(%r12),%ymm7,%ymm7 - vpaddq 544-448(%r12),%ymm8,%ymm8 - - vpsrlq $29,%ymm0,%ymm14 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm11 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm3,%ymm3 - vpermq $0x93,%ymm12,%ymm12 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm13,%ymm13 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm0,%ymm0 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm1,%ymm1 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm2,%ymm2 - vpblendd $3,%ymm13,%ymm9,%ymm13 - vpaddq %ymm12,%ymm3,%ymm3 - vpaddq %ymm13,%ymm4,%ymm4 - - vpsrlq $29,%ymm0,%ymm14 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm11 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm3,%ymm3 - vpermq $0x93,%ymm12,%ymm12 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm13,%ymm13 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm0,%ymm0 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm1,%ymm1 - vmovdqu %ymm0,0-128(%rdi) - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm2,%ymm2 - vmovdqu %ymm1,32-128(%rdi) - vpblendd $3,%ymm13,%ymm9,%ymm13 - vpaddq %ymm12,%ymm3,%ymm3 - vmovdqu %ymm2,64-128(%rdi) - vpaddq %ymm13,%ymm4,%ymm4 - vmovdqu %ymm3,96-128(%rdi) - vpsrlq $29,%ymm4,%ymm14 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm11 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm13,%ymm13 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm4,%ymm4 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm5,%ymm5 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm6,%ymm6 - vpblendd $3,%ymm13,%ymm0,%ymm13 - vpaddq %ymm12,%ymm7,%ymm7 - vpaddq %ymm13,%ymm8,%ymm8 - - vpsrlq $29,%ymm4,%ymm14 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm11 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm12 - vpermq $0x93,%ymm14,%ymm14 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm13 - vpermq $0x93,%ymm11,%ymm11 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm13,%ymm13 - - vpblendd $3,%ymm9,%ymm14,%ymm10 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm14,%ymm11,%ymm14 - vpaddq %ymm10,%ymm4,%ymm4 - vpblendd $3,%ymm11,%ymm12,%ymm11 - vpaddq %ymm14,%ymm5,%ymm5 - vmovdqu %ymm4,128-128(%rdi) - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm11,%ymm6,%ymm6 - vmovdqu %ymm5,160-128(%rdi) - vpblendd $3,%ymm13,%ymm0,%ymm13 - vpaddq %ymm12,%ymm7,%ymm7 - vmovdqu %ymm6,192-128(%rdi) - vpaddq %ymm13,%ymm8,%ymm8 - vmovdqu %ymm7,224-128(%rdi) - vmovdqu %ymm8,256-128(%rdi) - - movq %rdi,%rsi - decl %r8d - jne L$OOP_GRANDE_SQR_1024 - - vzeroall - movq %rbp,%rax - - movq -48(%rax),%r15 - - movq -40(%rax),%r14 - - movq -32(%rax),%r13 - - movq -24(%rax),%r12 - - movq -16(%rax),%rbp - - movq -8(%rax),%rbx - - leaq (%rax),%rsp - -L$sqr_1024_epilogue: - .byte 0xf3,0xc3 - - -.globl _rsaz_1024_mul_avx2 -.private_extern _rsaz_1024_mul_avx2 - -.p2align 6 -_rsaz_1024_mul_avx2: - - leaq (%rsp),%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - movq %rax,%rbp - - vzeroall - movq %rdx,%r13 - subq $64,%rsp - - - - - - -.byte 0x67,0x67 - movq %rsi,%r15 - andq $4095,%r15 - addq $320,%r15 - shrq $12,%r15 - movq %rsi,%r15 - cmovnzq %r13,%rsi - cmovnzq %r15,%r13 - - movq %rcx,%r15 - subq $-128,%rsi - subq $-128,%rcx - subq $-128,%rdi - - andq $4095,%r15 - addq $320,%r15 -.byte 0x67,0x67 - shrq $12,%r15 - jz L$mul_1024_no_n_copy - - - - - - subq $320,%rsp - vmovdqu 0-128(%rcx),%ymm0 - andq $-512,%rsp - vmovdqu 32-128(%rcx),%ymm1 - vmovdqu 64-128(%rcx),%ymm2 - vmovdqu 96-128(%rcx),%ymm3 - vmovdqu 128-128(%rcx),%ymm4 - vmovdqu 160-128(%rcx),%ymm5 - vmovdqu 192-128(%rcx),%ymm6 - vmovdqu 224-128(%rcx),%ymm7 - vmovdqu 256-128(%rcx),%ymm8 - leaq 64+128(%rsp),%rcx - vmovdqu %ymm0,0-128(%rcx) - vpxor %ymm0,%ymm0,%ymm0 - vmovdqu %ymm1,32-128(%rcx) - vpxor %ymm1,%ymm1,%ymm1 - vmovdqu %ymm2,64-128(%rcx) - vpxor %ymm2,%ymm2,%ymm2 - vmovdqu %ymm3,96-128(%rcx) - vpxor %ymm3,%ymm3,%ymm3 - vmovdqu %ymm4,128-128(%rcx) - vpxor %ymm4,%ymm4,%ymm4 - vmovdqu %ymm5,160-128(%rcx) - vpxor %ymm5,%ymm5,%ymm5 - vmovdqu %ymm6,192-128(%rcx) - vpxor %ymm6,%ymm6,%ymm6 - vmovdqu %ymm7,224-128(%rcx) - vpxor %ymm7,%ymm7,%ymm7 - vmovdqu %ymm8,256-128(%rcx) - vmovdqa %ymm0,%ymm8 - vmovdqu %ymm9,288-128(%rcx) -L$mul_1024_no_n_copy: - andq $-64,%rsp - - movq (%r13),%rbx - vpbroadcastq (%r13),%ymm10 - vmovdqu %ymm0,(%rsp) - xorq %r9,%r9 -.byte 0x67 - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r12,%r12 - - vmovdqu L$and_mask(%rip),%ymm15 - movl $9,%r14d - vmovdqu %ymm9,288-128(%rdi) - jmp L$oop_mul_1024 - -.p2align 5 -L$oop_mul_1024: - vpsrlq $29,%ymm3,%ymm9 - movq %rbx,%rax - imulq -128(%rsi),%rax - addq %r9,%rax - movq %rbx,%r10 - imulq 8-128(%rsi),%r10 - addq 8(%rsp),%r10 - - movq %rax,%r9 - imull %r8d,%eax - andl $0x1fffffff,%eax - - movq %rbx,%r11 - imulq 16-128(%rsi),%r11 - addq 16(%rsp),%r11 - - movq %rbx,%r12 - imulq 24-128(%rsi),%r12 - addq 24(%rsp),%r12 - vpmuludq 32-128(%rsi),%ymm10,%ymm0 - vmovd %eax,%xmm11 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq 64-128(%rsi),%ymm10,%ymm12 - vpbroadcastq %xmm11,%ymm11 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq 96-128(%rsi),%ymm10,%ymm13 - vpand %ymm15,%ymm3,%ymm3 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq 128-128(%rsi),%ymm10,%ymm0 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq 160-128(%rsi),%ymm10,%ymm12 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq 192-128(%rsi),%ymm10,%ymm13 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq 224-128(%rsi),%ymm10,%ymm0 - vpermq $0x93,%ymm9,%ymm9 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq 256-128(%rsi),%ymm10,%ymm12 - vpbroadcastq 8(%r13),%ymm10 - vpaddq %ymm12,%ymm8,%ymm8 - - movq %rax,%rdx - imulq -128(%rcx),%rax - addq %rax,%r9 - movq %rdx,%rax - imulq 8-128(%rcx),%rax - addq %rax,%r10 - movq %rdx,%rax - imulq 16-128(%rcx),%rax - addq %rax,%r11 - shrq $29,%r9 - imulq 24-128(%rcx),%rdx - addq %rdx,%r12 - addq %r9,%r10 - - vpmuludq 32-128(%rcx),%ymm11,%ymm13 - vmovq %xmm10,%rbx - vpaddq %ymm13,%ymm1,%ymm1 - vpmuludq 64-128(%rcx),%ymm11,%ymm0 - vpaddq %ymm0,%ymm2,%ymm2 - vpmuludq 96-128(%rcx),%ymm11,%ymm12 - vpaddq %ymm12,%ymm3,%ymm3 - vpmuludq 128-128(%rcx),%ymm11,%ymm13 - vpaddq %ymm13,%ymm4,%ymm4 - vpmuludq 160-128(%rcx),%ymm11,%ymm0 - vpaddq %ymm0,%ymm5,%ymm5 - vpmuludq 192-128(%rcx),%ymm11,%ymm12 - vpaddq %ymm12,%ymm6,%ymm6 - vpmuludq 224-128(%rcx),%ymm11,%ymm13 - vpblendd $3,%ymm14,%ymm9,%ymm12 - vpaddq %ymm13,%ymm7,%ymm7 - vpmuludq 256-128(%rcx),%ymm11,%ymm0 - vpaddq %ymm12,%ymm3,%ymm3 - vpaddq %ymm0,%ymm8,%ymm8 - - movq %rbx,%rax - imulq -128(%rsi),%rax - addq %rax,%r10 - vmovdqu -8+32-128(%rsi),%ymm12 - movq %rbx,%rax - imulq 8-128(%rsi),%rax - addq %rax,%r11 - vmovdqu -8+64-128(%rsi),%ymm13 - - movq %r10,%rax - vpblendd $0xfc,%ymm14,%ymm9,%ymm9 - imull %r8d,%eax - vpaddq %ymm9,%ymm4,%ymm4 - andl $0x1fffffff,%eax - - imulq 16-128(%rsi),%rbx - addq %rbx,%r12 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovd %eax,%xmm11 - vmovdqu -8+96-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm1,%ymm1 - vpmuludq %ymm10,%ymm13,%ymm13 - vpbroadcastq %xmm11,%ymm11 - vmovdqu -8+128-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm2,%ymm2 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -8+160-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm3,%ymm3 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -8+192-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm4,%ymm4 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -8+224-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm5,%ymm5 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -8+256-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm6,%ymm6 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -8+288-128(%rsi),%ymm9 - vpaddq %ymm12,%ymm7,%ymm7 - vpmuludq %ymm10,%ymm13,%ymm13 - vpaddq %ymm13,%ymm8,%ymm8 - vpmuludq %ymm10,%ymm9,%ymm9 - vpbroadcastq 16(%r13),%ymm10 - - movq %rax,%rdx - imulq -128(%rcx),%rax - addq %rax,%r10 - vmovdqu -8+32-128(%rcx),%ymm0 - movq %rdx,%rax - imulq 8-128(%rcx),%rax - addq %rax,%r11 - vmovdqu -8+64-128(%rcx),%ymm12 - shrq $29,%r10 - imulq 16-128(%rcx),%rdx - addq %rdx,%r12 - addq %r10,%r11 - - vpmuludq %ymm11,%ymm0,%ymm0 - vmovq %xmm10,%rbx - vmovdqu -8+96-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -8+128-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -8+160-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -8+192-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -8+224-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -8+256-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -8+288-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm11,%ymm12,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm11,%ymm13,%ymm13 - vpaddq %ymm13,%ymm9,%ymm9 - - vmovdqu -16+32-128(%rsi),%ymm0 - movq %rbx,%rax - imulq -128(%rsi),%rax - addq %r11,%rax - - vmovdqu -16+64-128(%rsi),%ymm12 - movq %rax,%r11 - imull %r8d,%eax - andl $0x1fffffff,%eax - - imulq 8-128(%rsi),%rbx - addq %rbx,%r12 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovd %eax,%xmm11 - vmovdqu -16+96-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm10,%ymm12,%ymm12 - vpbroadcastq %xmm11,%ymm11 - vmovdqu -16+128-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -16+160-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -16+192-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -16+224-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -16+256-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -16+288-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm10,%ymm12,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm10,%ymm13,%ymm13 - vpbroadcastq 24(%r13),%ymm10 - vpaddq %ymm13,%ymm9,%ymm9 - - vmovdqu -16+32-128(%rcx),%ymm0 - movq %rax,%rdx - imulq -128(%rcx),%rax - addq %rax,%r11 - vmovdqu -16+64-128(%rcx),%ymm12 - imulq 8-128(%rcx),%rdx - addq %rdx,%r12 - shrq $29,%r11 - - vpmuludq %ymm11,%ymm0,%ymm0 - vmovq %xmm10,%rbx - vmovdqu -16+96-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -16+128-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -16+160-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -16+192-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -16+224-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -16+256-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -16+288-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -24+32-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -24+64-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm9,%ymm9 - - addq %r11,%r12 - imulq -128(%rsi),%rbx - addq %rbx,%r12 - - movq %r12,%rax - imull %r8d,%eax - andl $0x1fffffff,%eax - - vpmuludq %ymm10,%ymm0,%ymm0 - vmovd %eax,%xmm11 - vmovdqu -24+96-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm1,%ymm1 - vpmuludq %ymm10,%ymm12,%ymm12 - vpbroadcastq %xmm11,%ymm11 - vmovdqu -24+128-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm2,%ymm2 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -24+160-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm3,%ymm3 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -24+192-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm4,%ymm4 - vpmuludq %ymm10,%ymm12,%ymm12 - vmovdqu -24+224-128(%rsi),%ymm0 - vpaddq %ymm12,%ymm5,%ymm5 - vpmuludq %ymm10,%ymm13,%ymm13 - vmovdqu -24+256-128(%rsi),%ymm12 - vpaddq %ymm13,%ymm6,%ymm6 - vpmuludq %ymm10,%ymm0,%ymm0 - vmovdqu -24+288-128(%rsi),%ymm13 - vpaddq %ymm0,%ymm7,%ymm7 - vpmuludq %ymm10,%ymm12,%ymm12 - vpaddq %ymm12,%ymm8,%ymm8 - vpmuludq %ymm10,%ymm13,%ymm13 - vpbroadcastq 32(%r13),%ymm10 - vpaddq %ymm13,%ymm9,%ymm9 - addq $32,%r13 - - vmovdqu -24+32-128(%rcx),%ymm0 - imulq -128(%rcx),%rax - addq %rax,%r12 - shrq $29,%r12 - - vmovdqu -24+64-128(%rcx),%ymm12 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovq %xmm10,%rbx - vmovdqu -24+96-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm1,%ymm0 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu %ymm0,(%rsp) - vpaddq %ymm12,%ymm2,%ymm1 - vmovdqu -24+128-128(%rcx),%ymm0 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -24+160-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm3,%ymm2 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -24+192-128(%rcx),%ymm13 - vpaddq %ymm0,%ymm4,%ymm3 - vpmuludq %ymm11,%ymm12,%ymm12 - vmovdqu -24+224-128(%rcx),%ymm0 - vpaddq %ymm12,%ymm5,%ymm4 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovdqu -24+256-128(%rcx),%ymm12 - vpaddq %ymm13,%ymm6,%ymm5 - vpmuludq %ymm11,%ymm0,%ymm0 - vmovdqu -24+288-128(%rcx),%ymm13 - movq %r12,%r9 - vpaddq %ymm0,%ymm7,%ymm6 - vpmuludq %ymm11,%ymm12,%ymm12 - addq (%rsp),%r9 - vpaddq %ymm12,%ymm8,%ymm7 - vpmuludq %ymm11,%ymm13,%ymm13 - vmovq %r12,%xmm12 - vpaddq %ymm13,%ymm9,%ymm8 - - decl %r14d - jnz L$oop_mul_1024 - vpaddq (%rsp),%ymm12,%ymm0 - - vpsrlq $29,%ymm0,%ymm12 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm13 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm3,%ymm3 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm10,%ymm10 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpermq $0x93,%ymm11,%ymm11 - vpaddq %ymm9,%ymm0,%ymm0 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm1,%ymm1 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm2,%ymm2 - vpblendd $3,%ymm11,%ymm14,%ymm11 - vpaddq %ymm10,%ymm3,%ymm3 - vpaddq %ymm11,%ymm4,%ymm4 - - vpsrlq $29,%ymm0,%ymm12 - vpand %ymm15,%ymm0,%ymm0 - vpsrlq $29,%ymm1,%ymm13 - vpand %ymm15,%ymm1,%ymm1 - vpsrlq $29,%ymm2,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm2,%ymm2 - vpsrlq $29,%ymm3,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm3,%ymm3 - vpermq $0x93,%ymm10,%ymm10 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm11,%ymm11 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm9,%ymm0,%ymm0 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm1,%ymm1 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm2,%ymm2 - vpblendd $3,%ymm11,%ymm14,%ymm11 - vpaddq %ymm10,%ymm3,%ymm3 - vpaddq %ymm11,%ymm4,%ymm4 - - vmovdqu %ymm0,0-128(%rdi) - vmovdqu %ymm1,32-128(%rdi) - vmovdqu %ymm2,64-128(%rdi) - vmovdqu %ymm3,96-128(%rdi) - vpsrlq $29,%ymm4,%ymm12 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm13 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm10,%ymm10 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm11,%ymm11 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm9,%ymm4,%ymm4 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm5,%ymm5 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm6,%ymm6 - vpblendd $3,%ymm11,%ymm0,%ymm11 - vpaddq %ymm10,%ymm7,%ymm7 - vpaddq %ymm11,%ymm8,%ymm8 - - vpsrlq $29,%ymm4,%ymm12 - vpand %ymm15,%ymm4,%ymm4 - vpsrlq $29,%ymm5,%ymm13 - vpand %ymm15,%ymm5,%ymm5 - vpsrlq $29,%ymm6,%ymm10 - vpermq $0x93,%ymm12,%ymm12 - vpand %ymm15,%ymm6,%ymm6 - vpsrlq $29,%ymm7,%ymm11 - vpermq $0x93,%ymm13,%ymm13 - vpand %ymm15,%ymm7,%ymm7 - vpsrlq $29,%ymm8,%ymm0 - vpermq $0x93,%ymm10,%ymm10 - vpand %ymm15,%ymm8,%ymm8 - vpermq $0x93,%ymm11,%ymm11 - - vpblendd $3,%ymm14,%ymm12,%ymm9 - vpermq $0x93,%ymm0,%ymm0 - vpblendd $3,%ymm12,%ymm13,%ymm12 - vpaddq %ymm9,%ymm4,%ymm4 - vpblendd $3,%ymm13,%ymm10,%ymm13 - vpaddq %ymm12,%ymm5,%ymm5 - vpblendd $3,%ymm10,%ymm11,%ymm10 - vpaddq %ymm13,%ymm6,%ymm6 - vpblendd $3,%ymm11,%ymm0,%ymm11 - vpaddq %ymm10,%ymm7,%ymm7 - vpaddq %ymm11,%ymm8,%ymm8 - - vmovdqu %ymm4,128-128(%rdi) - vmovdqu %ymm5,160-128(%rdi) - vmovdqu %ymm6,192-128(%rdi) - vmovdqu %ymm7,224-128(%rdi) - vmovdqu %ymm8,256-128(%rdi) - vzeroupper - - movq %rbp,%rax - - movq -48(%rax),%r15 - - movq -40(%rax),%r14 - - movq -32(%rax),%r13 - - movq -24(%rax),%r12 - - movq -16(%rax),%rbp - - movq -8(%rax),%rbx - - leaq (%rax),%rsp - -L$mul_1024_epilogue: - .byte 0xf3,0xc3 - - -.globl _rsaz_1024_red2norm_avx2 -.private_extern _rsaz_1024_red2norm_avx2 - -.p2align 5 -_rsaz_1024_red2norm_avx2: - - subq $-128,%rsi - xorq %rax,%rax - movq -128(%rsi),%r8 - movq -120(%rsi),%r9 - movq -112(%rsi),%r10 - shlq $0,%r8 - shlq $29,%r9 - movq %r10,%r11 - shlq $58,%r10 - shrq $6,%r11 - addq %r8,%rax - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,0(%rdi) - movq %r11,%rax - movq -104(%rsi),%r8 - movq -96(%rsi),%r9 - shlq $23,%r8 - movq %r9,%r10 - shlq $52,%r9 - shrq $12,%r10 - addq %r8,%rax - addq %r9,%rax - adcq $0,%r10 - movq %rax,8(%rdi) - movq %r10,%rax - movq -88(%rsi),%r11 - movq -80(%rsi),%r8 - shlq $17,%r11 - movq %r8,%r9 - shlq $46,%r8 - shrq $18,%r9 - addq %r11,%rax - addq %r8,%rax - adcq $0,%r9 - movq %rax,16(%rdi) - movq %r9,%rax - movq -72(%rsi),%r10 - movq -64(%rsi),%r11 - shlq $11,%r10 - movq %r11,%r8 - shlq $40,%r11 - shrq $24,%r8 - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,24(%rdi) - movq %r8,%rax - movq -56(%rsi),%r9 - movq -48(%rsi),%r10 - movq -40(%rsi),%r11 - shlq $5,%r9 - shlq $34,%r10 - movq %r11,%r8 - shlq $63,%r11 - shrq $1,%r8 - addq %r9,%rax - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,32(%rdi) - movq %r8,%rax - movq -32(%rsi),%r9 - movq -24(%rsi),%r10 - shlq $28,%r9 - movq %r10,%r11 - shlq $57,%r10 - shrq $7,%r11 - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,40(%rdi) - movq %r11,%rax - movq -16(%rsi),%r8 - movq -8(%rsi),%r9 - shlq $22,%r8 - movq %r9,%r10 - shlq $51,%r9 - shrq $13,%r10 - addq %r8,%rax - addq %r9,%rax - adcq $0,%r10 - movq %rax,48(%rdi) - movq %r10,%rax - movq 0(%rsi),%r11 - movq 8(%rsi),%r8 - shlq $16,%r11 - movq %r8,%r9 - shlq $45,%r8 - shrq $19,%r9 - addq %r11,%rax - addq %r8,%rax - adcq $0,%r9 - movq %rax,56(%rdi) - movq %r9,%rax - movq 16(%rsi),%r10 - movq 24(%rsi),%r11 - shlq $10,%r10 - movq %r11,%r8 - shlq $39,%r11 - shrq $25,%r8 - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,64(%rdi) - movq %r8,%rax - movq 32(%rsi),%r9 - movq 40(%rsi),%r10 - movq 48(%rsi),%r11 - shlq $4,%r9 - shlq $33,%r10 - movq %r11,%r8 - shlq $62,%r11 - shrq $2,%r8 - addq %r9,%rax - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,72(%rdi) - movq %r8,%rax - movq 56(%rsi),%r9 - movq 64(%rsi),%r10 - shlq $27,%r9 - movq %r10,%r11 - shlq $56,%r10 - shrq $8,%r11 - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,80(%rdi) - movq %r11,%rax - movq 72(%rsi),%r8 - movq 80(%rsi),%r9 - shlq $21,%r8 - movq %r9,%r10 - shlq $50,%r9 - shrq $14,%r10 - addq %r8,%rax - addq %r9,%rax - adcq $0,%r10 - movq %rax,88(%rdi) - movq %r10,%rax - movq 88(%rsi),%r11 - movq 96(%rsi),%r8 - shlq $15,%r11 - movq %r8,%r9 - shlq $44,%r8 - shrq $20,%r9 - addq %r11,%rax - addq %r8,%rax - adcq $0,%r9 - movq %rax,96(%rdi) - movq %r9,%rax - movq 104(%rsi),%r10 - movq 112(%rsi),%r11 - shlq $9,%r10 - movq %r11,%r8 - shlq $38,%r11 - shrq $26,%r8 - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,104(%rdi) - movq %r8,%rax - movq 120(%rsi),%r9 - movq 128(%rsi),%r10 - movq 136(%rsi),%r11 - shlq $3,%r9 - shlq $32,%r10 - movq %r11,%r8 - shlq $61,%r11 - shrq $3,%r8 - addq %r9,%rax - addq %r10,%rax - addq %r11,%rax - adcq $0,%r8 - movq %rax,112(%rdi) - movq %r8,%rax - movq 144(%rsi),%r9 - movq 152(%rsi),%r10 - shlq $26,%r9 - movq %r10,%r11 - shlq $55,%r10 - shrq $9,%r11 - addq %r9,%rax - addq %r10,%rax - adcq $0,%r11 - movq %rax,120(%rdi) - movq %r11,%rax - .byte 0xf3,0xc3 - - - -.globl _rsaz_1024_norm2red_avx2 -.private_extern _rsaz_1024_norm2red_avx2 - -.p2align 5 -_rsaz_1024_norm2red_avx2: - - subq $-128,%rdi - movq (%rsi),%r8 - movl $0x1fffffff,%eax - movq 8(%rsi),%r9 - movq %r8,%r11 - shrq $0,%r11 - andq %rax,%r11 - movq %r11,-128(%rdi) - movq %r8,%r10 - shrq $29,%r10 - andq %rax,%r10 - movq %r10,-120(%rdi) - shrdq $58,%r9,%r8 - andq %rax,%r8 - movq %r8,-112(%rdi) - movq 16(%rsi),%r10 - movq %r9,%r8 - shrq $23,%r8 - andq %rax,%r8 - movq %r8,-104(%rdi) - shrdq $52,%r10,%r9 - andq %rax,%r9 - movq %r9,-96(%rdi) - movq 24(%rsi),%r11 - movq %r10,%r9 - shrq $17,%r9 - andq %rax,%r9 - movq %r9,-88(%rdi) - shrdq $46,%r11,%r10 - andq %rax,%r10 - movq %r10,-80(%rdi) - movq 32(%rsi),%r8 - movq %r11,%r10 - shrq $11,%r10 - andq %rax,%r10 - movq %r10,-72(%rdi) - shrdq $40,%r8,%r11 - andq %rax,%r11 - movq %r11,-64(%rdi) - movq 40(%rsi),%r9 - movq %r8,%r11 - shrq $5,%r11 - andq %rax,%r11 - movq %r11,-56(%rdi) - movq %r8,%r10 - shrq $34,%r10 - andq %rax,%r10 - movq %r10,-48(%rdi) - shrdq $63,%r9,%r8 - andq %rax,%r8 - movq %r8,-40(%rdi) - movq 48(%rsi),%r10 - movq %r9,%r8 - shrq $28,%r8 - andq %rax,%r8 - movq %r8,-32(%rdi) - shrdq $57,%r10,%r9 - andq %rax,%r9 - movq %r9,-24(%rdi) - movq 56(%rsi),%r11 - movq %r10,%r9 - shrq $22,%r9 - andq %rax,%r9 - movq %r9,-16(%rdi) - shrdq $51,%r11,%r10 - andq %rax,%r10 - movq %r10,-8(%rdi) - movq 64(%rsi),%r8 - movq %r11,%r10 - shrq $16,%r10 - andq %rax,%r10 - movq %r10,0(%rdi) - shrdq $45,%r8,%r11 - andq %rax,%r11 - movq %r11,8(%rdi) - movq 72(%rsi),%r9 - movq %r8,%r11 - shrq $10,%r11 - andq %rax,%r11 - movq %r11,16(%rdi) - shrdq $39,%r9,%r8 - andq %rax,%r8 - movq %r8,24(%rdi) - movq 80(%rsi),%r10 - movq %r9,%r8 - shrq $4,%r8 - andq %rax,%r8 - movq %r8,32(%rdi) - movq %r9,%r11 - shrq $33,%r11 - andq %rax,%r11 - movq %r11,40(%rdi) - shrdq $62,%r10,%r9 - andq %rax,%r9 - movq %r9,48(%rdi) - movq 88(%rsi),%r11 - movq %r10,%r9 - shrq $27,%r9 - andq %rax,%r9 - movq %r9,56(%rdi) - shrdq $56,%r11,%r10 - andq %rax,%r10 - movq %r10,64(%rdi) - movq 96(%rsi),%r8 - movq %r11,%r10 - shrq $21,%r10 - andq %rax,%r10 - movq %r10,72(%rdi) - shrdq $50,%r8,%r11 - andq %rax,%r11 - movq %r11,80(%rdi) - movq 104(%rsi),%r9 - movq %r8,%r11 - shrq $15,%r11 - andq %rax,%r11 - movq %r11,88(%rdi) - shrdq $44,%r9,%r8 - andq %rax,%r8 - movq %r8,96(%rdi) - movq 112(%rsi),%r10 - movq %r9,%r8 - shrq $9,%r8 - andq %rax,%r8 - movq %r8,104(%rdi) - shrdq $38,%r10,%r9 - andq %rax,%r9 - movq %r9,112(%rdi) - movq 120(%rsi),%r11 - movq %r10,%r9 - shrq $3,%r9 - andq %rax,%r9 - movq %r9,120(%rdi) - movq %r10,%r8 - shrq $32,%r8 - andq %rax,%r8 - movq %r8,128(%rdi) - shrdq $61,%r11,%r10 - andq %rax,%r10 - movq %r10,136(%rdi) - xorq %r8,%r8 - movq %r11,%r10 - shrq $26,%r10 - andq %rax,%r10 - movq %r10,144(%rdi) - shrdq $55,%r8,%r11 - andq %rax,%r11 - movq %r11,152(%rdi) - movq %r8,160(%rdi) - movq %r8,168(%rdi) - movq %r8,176(%rdi) - movq %r8,184(%rdi) - .byte 0xf3,0xc3 - - -.globl _rsaz_1024_scatter5_avx2 -.private_extern _rsaz_1024_scatter5_avx2 - -.p2align 5 -_rsaz_1024_scatter5_avx2: - - vzeroupper - vmovdqu L$scatter_permd(%rip),%ymm5 - shll $4,%edx - leaq (%rdi,%rdx,1),%rdi - movl $9,%eax - jmp L$oop_scatter_1024 - -.p2align 5 -L$oop_scatter_1024: - vmovdqu (%rsi),%ymm0 - leaq 32(%rsi),%rsi - vpermd %ymm0,%ymm5,%ymm0 - vmovdqu %xmm0,(%rdi) - leaq 512(%rdi),%rdi - decl %eax - jnz L$oop_scatter_1024 - - vzeroupper - .byte 0xf3,0xc3 - - - -.globl _rsaz_1024_gather5_avx2 -.private_extern _rsaz_1024_gather5_avx2 - -.p2align 5 -_rsaz_1024_gather5_avx2: - - vzeroupper - movq %rsp,%r11 - - leaq -256(%rsp),%rsp - andq $-32,%rsp - leaq L$inc(%rip),%r10 - leaq -128(%rsp),%rax - - vmovd %edx,%xmm4 - vmovdqa (%r10),%ymm0 - vmovdqa 32(%r10),%ymm1 - vmovdqa 64(%r10),%ymm5 - vpbroadcastd %xmm4,%ymm4 - - vpaddd %ymm5,%ymm0,%ymm2 - vpcmpeqd %ymm4,%ymm0,%ymm0 - vpaddd %ymm5,%ymm1,%ymm3 - vpcmpeqd %ymm4,%ymm1,%ymm1 - vmovdqa %ymm0,0+128(%rax) - vpaddd %ymm5,%ymm2,%ymm0 - vpcmpeqd %ymm4,%ymm2,%ymm2 - vmovdqa %ymm1,32+128(%rax) - vpaddd %ymm5,%ymm3,%ymm1 - vpcmpeqd %ymm4,%ymm3,%ymm3 - vmovdqa %ymm2,64+128(%rax) - vpaddd %ymm5,%ymm0,%ymm2 - vpcmpeqd %ymm4,%ymm0,%ymm0 - vmovdqa %ymm3,96+128(%rax) - vpaddd %ymm5,%ymm1,%ymm3 - vpcmpeqd %ymm4,%ymm1,%ymm1 - vmovdqa %ymm0,128+128(%rax) - vpaddd %ymm5,%ymm2,%ymm8 - vpcmpeqd %ymm4,%ymm2,%ymm2 - vmovdqa %ymm1,160+128(%rax) - vpaddd %ymm5,%ymm3,%ymm9 - vpcmpeqd %ymm4,%ymm3,%ymm3 - vmovdqa %ymm2,192+128(%rax) - vpaddd %ymm5,%ymm8,%ymm10 - vpcmpeqd %ymm4,%ymm8,%ymm8 - vmovdqa %ymm3,224+128(%rax) - vpaddd %ymm5,%ymm9,%ymm11 - vpcmpeqd %ymm4,%ymm9,%ymm9 - vpaddd %ymm5,%ymm10,%ymm12 - vpcmpeqd %ymm4,%ymm10,%ymm10 - vpaddd %ymm5,%ymm11,%ymm13 - vpcmpeqd %ymm4,%ymm11,%ymm11 - vpaddd %ymm5,%ymm12,%ymm14 - vpcmpeqd %ymm4,%ymm12,%ymm12 - vpaddd %ymm5,%ymm13,%ymm15 - vpcmpeqd %ymm4,%ymm13,%ymm13 - vpcmpeqd %ymm4,%ymm14,%ymm14 - vpcmpeqd %ymm4,%ymm15,%ymm15 - - vmovdqa -32(%r10),%ymm7 - leaq 128(%rsi),%rsi - movl $9,%edx - -L$oop_gather_1024: - vmovdqa 0-128(%rsi),%ymm0 - vmovdqa 32-128(%rsi),%ymm1 - vmovdqa 64-128(%rsi),%ymm2 - vmovdqa 96-128(%rsi),%ymm3 - vpand 0+128(%rax),%ymm0,%ymm0 - vpand 32+128(%rax),%ymm1,%ymm1 - vpand 64+128(%rax),%ymm2,%ymm2 - vpor %ymm0,%ymm1,%ymm4 - vpand 96+128(%rax),%ymm3,%ymm3 - vmovdqa 128-128(%rsi),%ymm0 - vmovdqa 160-128(%rsi),%ymm1 - vpor %ymm2,%ymm3,%ymm5 - vmovdqa 192-128(%rsi),%ymm2 - vmovdqa 224-128(%rsi),%ymm3 - vpand 128+128(%rax),%ymm0,%ymm0 - vpand 160+128(%rax),%ymm1,%ymm1 - vpand 192+128(%rax),%ymm2,%ymm2 - vpor %ymm0,%ymm4,%ymm4 - vpand 224+128(%rax),%ymm3,%ymm3 - vpand 256-128(%rsi),%ymm8,%ymm0 - vpor %ymm1,%ymm5,%ymm5 - vpand 288-128(%rsi),%ymm9,%ymm1 - vpor %ymm2,%ymm4,%ymm4 - vpand 320-128(%rsi),%ymm10,%ymm2 - vpor %ymm3,%ymm5,%ymm5 - vpand 352-128(%rsi),%ymm11,%ymm3 - vpor %ymm0,%ymm4,%ymm4 - vpand 384-128(%rsi),%ymm12,%ymm0 - vpor %ymm1,%ymm5,%ymm5 - vpand 416-128(%rsi),%ymm13,%ymm1 - vpor %ymm2,%ymm4,%ymm4 - vpand 448-128(%rsi),%ymm14,%ymm2 - vpor %ymm3,%ymm5,%ymm5 - vpand 480-128(%rsi),%ymm15,%ymm3 - leaq 512(%rsi),%rsi - vpor %ymm0,%ymm4,%ymm4 - vpor %ymm1,%ymm5,%ymm5 - vpor %ymm2,%ymm4,%ymm4 - vpor %ymm3,%ymm5,%ymm5 - - vpor %ymm5,%ymm4,%ymm4 - vextracti128 $1,%ymm4,%xmm5 - vpor %xmm4,%xmm5,%xmm5 - vpermd %ymm5,%ymm7,%ymm5 - vmovdqu %ymm5,(%rdi) - leaq 32(%rdi),%rdi - decl %edx - jnz L$oop_gather_1024 - - vpxor %ymm0,%ymm0,%ymm0 - vmovdqu %ymm0,(%rdi) - vzeroupper - leaq (%r11),%rsp - - .byte 0xf3,0xc3 - -L$SEH_end_rsaz_1024_gather5: - -.p2align 6 -L$and_mask: -.quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff -L$scatter_permd: -.long 0,2,4,6,7,7,7,7 -L$gather_permd: -.long 0,7,1,7,2,7,3,7 -L$inc: -.long 0,0,0,0, 1,1,1,1 -.long 2,2,2,2, 3,3,3,3 -.long 4,4,4,4, 4,4,4,4 -.p2align 6 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha1-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha1-x86_64.S deleted file mode 100644 index ace121e359..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha1-x86_64.S +++ /dev/null @@ -1,3599 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.globl _sha1_block_data_order -.private_extern _sha1_block_data_order - -.p2align 4 -_sha1_block_data_order: - - leaq _OPENSSL_ia32cap_P(%rip),%r10 - movl 0(%r10),%r9d - movl 4(%r10),%r8d - movl 8(%r10),%r10d - testl $512,%r8d - jz L$ialu - andl $268435456,%r8d - andl $1073741824,%r9d - orl %r9d,%r8d - cmpl $1342177280,%r8d - je _avx_shortcut - jmp _ssse3_shortcut - -.p2align 4 -L$ialu: - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - movq %rdi,%r8 - subq $72,%rsp - movq %rsi,%r9 - andq $-64,%rsp - movq %rdx,%r10 - movq %rax,64(%rsp) - -L$prologue: - - movl 0(%r8),%esi - movl 4(%r8),%edi - movl 8(%r8),%r11d - movl 12(%r8),%r12d - movl 16(%r8),%r13d - jmp L$loop - -.p2align 4 -L$loop: - movl 0(%r9),%edx - bswapl %edx - movl 4(%r9),%ebp - movl %r12d,%eax - movl %edx,0(%rsp) - movl %esi,%ecx - bswapl %ebp - xorl %r11d,%eax - roll $5,%ecx - andl %edi,%eax - leal 1518500249(%rdx,%r13,1),%r13d - addl %ecx,%r13d - xorl %r12d,%eax - roll $30,%edi - addl %eax,%r13d - movl 8(%r9),%r14d - movl %r11d,%eax - movl %ebp,4(%rsp) - movl %r13d,%ecx - bswapl %r14d - xorl %edi,%eax - roll $5,%ecx - andl %esi,%eax - leal 1518500249(%rbp,%r12,1),%r12d - addl %ecx,%r12d - xorl %r11d,%eax - roll $30,%esi - addl %eax,%r12d - movl 12(%r9),%edx - movl %edi,%eax - movl %r14d,8(%rsp) - movl %r12d,%ecx - bswapl %edx - xorl %esi,%eax - roll $5,%ecx - andl %r13d,%eax - leal 1518500249(%r14,%r11,1),%r11d - addl %ecx,%r11d - xorl %edi,%eax - roll $30,%r13d - addl %eax,%r11d - movl 16(%r9),%ebp - movl %esi,%eax - movl %edx,12(%rsp) - movl %r11d,%ecx - bswapl %ebp - xorl %r13d,%eax - roll $5,%ecx - andl %r12d,%eax - leal 1518500249(%rdx,%rdi,1),%edi - addl %ecx,%edi - xorl %esi,%eax - roll $30,%r12d - addl %eax,%edi - movl 20(%r9),%r14d - movl %r13d,%eax - movl %ebp,16(%rsp) - movl %edi,%ecx - bswapl %r14d - xorl %r12d,%eax - roll $5,%ecx - andl %r11d,%eax - leal 1518500249(%rbp,%rsi,1),%esi - addl %ecx,%esi - xorl %r13d,%eax - roll $30,%r11d - addl %eax,%esi - movl 24(%r9),%edx - movl %r12d,%eax - movl %r14d,20(%rsp) - movl %esi,%ecx - bswapl %edx - xorl %r11d,%eax - roll $5,%ecx - andl %edi,%eax - leal 1518500249(%r14,%r13,1),%r13d - addl %ecx,%r13d - xorl %r12d,%eax - roll $30,%edi - addl %eax,%r13d - movl 28(%r9),%ebp - movl %r11d,%eax - movl %edx,24(%rsp) - movl %r13d,%ecx - bswapl %ebp - xorl %edi,%eax - roll $5,%ecx - andl %esi,%eax - leal 1518500249(%rdx,%r12,1),%r12d - addl %ecx,%r12d - xorl %r11d,%eax - roll $30,%esi - addl %eax,%r12d - movl 32(%r9),%r14d - movl %edi,%eax - movl %ebp,28(%rsp) - movl %r12d,%ecx - bswapl %r14d - xorl %esi,%eax - roll $5,%ecx - andl %r13d,%eax - leal 1518500249(%rbp,%r11,1),%r11d - addl %ecx,%r11d - xorl %edi,%eax - roll $30,%r13d - addl %eax,%r11d - movl 36(%r9),%edx - movl %esi,%eax - movl %r14d,32(%rsp) - movl %r11d,%ecx - bswapl %edx - xorl %r13d,%eax - roll $5,%ecx - andl %r12d,%eax - leal 1518500249(%r14,%rdi,1),%edi - addl %ecx,%edi - xorl %esi,%eax - roll $30,%r12d - addl %eax,%edi - movl 40(%r9),%ebp - movl %r13d,%eax - movl %edx,36(%rsp) - movl %edi,%ecx - bswapl %ebp - xorl %r12d,%eax - roll $5,%ecx - andl %r11d,%eax - leal 1518500249(%rdx,%rsi,1),%esi - addl %ecx,%esi - xorl %r13d,%eax - roll $30,%r11d - addl %eax,%esi - movl 44(%r9),%r14d - movl %r12d,%eax - movl %ebp,40(%rsp) - movl %esi,%ecx - bswapl %r14d - xorl %r11d,%eax - roll $5,%ecx - andl %edi,%eax - leal 1518500249(%rbp,%r13,1),%r13d - addl %ecx,%r13d - xorl %r12d,%eax - roll $30,%edi - addl %eax,%r13d - movl 48(%r9),%edx - movl %r11d,%eax - movl %r14d,44(%rsp) - movl %r13d,%ecx - bswapl %edx - xorl %edi,%eax - roll $5,%ecx - andl %esi,%eax - leal 1518500249(%r14,%r12,1),%r12d - addl %ecx,%r12d - xorl %r11d,%eax - roll $30,%esi - addl %eax,%r12d - movl 52(%r9),%ebp - movl %edi,%eax - movl %edx,48(%rsp) - movl %r12d,%ecx - bswapl %ebp - xorl %esi,%eax - roll $5,%ecx - andl %r13d,%eax - leal 1518500249(%rdx,%r11,1),%r11d - addl %ecx,%r11d - xorl %edi,%eax - roll $30,%r13d - addl %eax,%r11d - movl 56(%r9),%r14d - movl %esi,%eax - movl %ebp,52(%rsp) - movl %r11d,%ecx - bswapl %r14d - xorl %r13d,%eax - roll $5,%ecx - andl %r12d,%eax - leal 1518500249(%rbp,%rdi,1),%edi - addl %ecx,%edi - xorl %esi,%eax - roll $30,%r12d - addl %eax,%edi - movl 60(%r9),%edx - movl %r13d,%eax - movl %r14d,56(%rsp) - movl %edi,%ecx - bswapl %edx - xorl %r12d,%eax - roll $5,%ecx - andl %r11d,%eax - leal 1518500249(%r14,%rsi,1),%esi - addl %ecx,%esi - xorl %r13d,%eax - roll $30,%r11d - addl %eax,%esi - xorl 0(%rsp),%ebp - movl %r12d,%eax - movl %edx,60(%rsp) - movl %esi,%ecx - xorl 8(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 32(%rsp),%ebp - andl %edi,%eax - leal 1518500249(%rdx,%r13,1),%r13d - roll $30,%edi - xorl %r12d,%eax - addl %ecx,%r13d - roll $1,%ebp - addl %eax,%r13d - xorl 4(%rsp),%r14d - movl %r11d,%eax - movl %ebp,0(%rsp) - movl %r13d,%ecx - xorl 12(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 36(%rsp),%r14d - andl %esi,%eax - leal 1518500249(%rbp,%r12,1),%r12d - roll $30,%esi - xorl %r11d,%eax - addl %ecx,%r12d - roll $1,%r14d - addl %eax,%r12d - xorl 8(%rsp),%edx - movl %edi,%eax - movl %r14d,4(%rsp) - movl %r12d,%ecx - xorl 16(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 40(%rsp),%edx - andl %r13d,%eax - leal 1518500249(%r14,%r11,1),%r11d - roll $30,%r13d - xorl %edi,%eax - addl %ecx,%r11d - roll $1,%edx - addl %eax,%r11d - xorl 12(%rsp),%ebp - movl %esi,%eax - movl %edx,8(%rsp) - movl %r11d,%ecx - xorl 20(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 44(%rsp),%ebp - andl %r12d,%eax - leal 1518500249(%rdx,%rdi,1),%edi - roll $30,%r12d - xorl %esi,%eax - addl %ecx,%edi - roll $1,%ebp - addl %eax,%edi - xorl 16(%rsp),%r14d - movl %r13d,%eax - movl %ebp,12(%rsp) - movl %edi,%ecx - xorl 24(%rsp),%r14d - xorl %r12d,%eax - roll $5,%ecx - xorl 48(%rsp),%r14d - andl %r11d,%eax - leal 1518500249(%rbp,%rsi,1),%esi - roll $30,%r11d - xorl %r13d,%eax - addl %ecx,%esi - roll $1,%r14d - addl %eax,%esi - xorl 20(%rsp),%edx - movl %edi,%eax - movl %r14d,16(%rsp) - movl %esi,%ecx - xorl 28(%rsp),%edx - xorl %r12d,%eax - roll $5,%ecx - xorl 52(%rsp),%edx - leal 1859775393(%r14,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%edx - xorl 24(%rsp),%ebp - movl %esi,%eax - movl %edx,20(%rsp) - movl %r13d,%ecx - xorl 32(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 56(%rsp),%ebp - leal 1859775393(%rdx,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%ebp - xorl 28(%rsp),%r14d - movl %r13d,%eax - movl %ebp,24(%rsp) - movl %r12d,%ecx - xorl 36(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 60(%rsp),%r14d - leal 1859775393(%rbp,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%r14d - xorl 32(%rsp),%edx - movl %r12d,%eax - movl %r14d,28(%rsp) - movl %r11d,%ecx - xorl 40(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 0(%rsp),%edx - leal 1859775393(%r14,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%edx - xorl 36(%rsp),%ebp - movl %r11d,%eax - movl %edx,32(%rsp) - movl %edi,%ecx - xorl 44(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 4(%rsp),%ebp - leal 1859775393(%rdx,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%ebp - xorl 40(%rsp),%r14d - movl %edi,%eax - movl %ebp,36(%rsp) - movl %esi,%ecx - xorl 48(%rsp),%r14d - xorl %r12d,%eax - roll $5,%ecx - xorl 8(%rsp),%r14d - leal 1859775393(%rbp,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%r14d - xorl 44(%rsp),%edx - movl %esi,%eax - movl %r14d,40(%rsp) - movl %r13d,%ecx - xorl 52(%rsp),%edx - xorl %r11d,%eax - roll $5,%ecx - xorl 12(%rsp),%edx - leal 1859775393(%r14,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%edx - xorl 48(%rsp),%ebp - movl %r13d,%eax - movl %edx,44(%rsp) - movl %r12d,%ecx - xorl 56(%rsp),%ebp - xorl %edi,%eax - roll $5,%ecx - xorl 16(%rsp),%ebp - leal 1859775393(%rdx,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%ebp - xorl 52(%rsp),%r14d - movl %r12d,%eax - movl %ebp,48(%rsp) - movl %r11d,%ecx - xorl 60(%rsp),%r14d - xorl %esi,%eax - roll $5,%ecx - xorl 20(%rsp),%r14d - leal 1859775393(%rbp,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%r14d - xorl 56(%rsp),%edx - movl %r11d,%eax - movl %r14d,52(%rsp) - movl %edi,%ecx - xorl 0(%rsp),%edx - xorl %r13d,%eax - roll $5,%ecx - xorl 24(%rsp),%edx - leal 1859775393(%r14,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%edx - xorl 60(%rsp),%ebp - movl %edi,%eax - movl %edx,56(%rsp) - movl %esi,%ecx - xorl 4(%rsp),%ebp - xorl %r12d,%eax - roll $5,%ecx - xorl 28(%rsp),%ebp - leal 1859775393(%rdx,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%ebp - xorl 0(%rsp),%r14d - movl %esi,%eax - movl %ebp,60(%rsp) - movl %r13d,%ecx - xorl 8(%rsp),%r14d - xorl %r11d,%eax - roll $5,%ecx - xorl 32(%rsp),%r14d - leal 1859775393(%rbp,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%r14d - xorl 4(%rsp),%edx - movl %r13d,%eax - movl %r14d,0(%rsp) - movl %r12d,%ecx - xorl 12(%rsp),%edx - xorl %edi,%eax - roll $5,%ecx - xorl 36(%rsp),%edx - leal 1859775393(%r14,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%edx - xorl 8(%rsp),%ebp - movl %r12d,%eax - movl %edx,4(%rsp) - movl %r11d,%ecx - xorl 16(%rsp),%ebp - xorl %esi,%eax - roll $5,%ecx - xorl 40(%rsp),%ebp - leal 1859775393(%rdx,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%ebp - xorl 12(%rsp),%r14d - movl %r11d,%eax - movl %ebp,8(%rsp) - movl %edi,%ecx - xorl 20(%rsp),%r14d - xorl %r13d,%eax - roll $5,%ecx - xorl 44(%rsp),%r14d - leal 1859775393(%rbp,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%r14d - xorl 16(%rsp),%edx - movl %edi,%eax - movl %r14d,12(%rsp) - movl %esi,%ecx - xorl 24(%rsp),%edx - xorl %r12d,%eax - roll $5,%ecx - xorl 48(%rsp),%edx - leal 1859775393(%r14,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%edx - xorl 20(%rsp),%ebp - movl %esi,%eax - movl %edx,16(%rsp) - movl %r13d,%ecx - xorl 28(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 52(%rsp),%ebp - leal 1859775393(%rdx,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%ebp - xorl 24(%rsp),%r14d - movl %r13d,%eax - movl %ebp,20(%rsp) - movl %r12d,%ecx - xorl 32(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 56(%rsp),%r14d - leal 1859775393(%rbp,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%r14d - xorl 28(%rsp),%edx - movl %r12d,%eax - movl %r14d,24(%rsp) - movl %r11d,%ecx - xorl 36(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 60(%rsp),%edx - leal 1859775393(%r14,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%edx - xorl 32(%rsp),%ebp - movl %r11d,%eax - movl %edx,28(%rsp) - movl %edi,%ecx - xorl 40(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 0(%rsp),%ebp - leal 1859775393(%rdx,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%ebp - xorl 36(%rsp),%r14d - movl %r12d,%eax - movl %ebp,32(%rsp) - movl %r12d,%ebx - xorl 44(%rsp),%r14d - andl %r11d,%eax - movl %esi,%ecx - xorl 4(%rsp),%r14d - leal -1894007588(%rbp,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%r14d - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 40(%rsp),%edx - movl %r11d,%eax - movl %r14d,36(%rsp) - movl %r11d,%ebx - xorl 48(%rsp),%edx - andl %edi,%eax - movl %r13d,%ecx - xorl 8(%rsp),%edx - leal -1894007588(%r14,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%edx - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 44(%rsp),%ebp - movl %edi,%eax - movl %edx,40(%rsp) - movl %edi,%ebx - xorl 52(%rsp),%ebp - andl %esi,%eax - movl %r12d,%ecx - xorl 12(%rsp),%ebp - leal -1894007588(%rdx,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%ebp - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 48(%rsp),%r14d - movl %esi,%eax - movl %ebp,44(%rsp) - movl %esi,%ebx - xorl 56(%rsp),%r14d - andl %r13d,%eax - movl %r11d,%ecx - xorl 16(%rsp),%r14d - leal -1894007588(%rbp,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%r14d - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 52(%rsp),%edx - movl %r13d,%eax - movl %r14d,48(%rsp) - movl %r13d,%ebx - xorl 60(%rsp),%edx - andl %r12d,%eax - movl %edi,%ecx - xorl 20(%rsp),%edx - leal -1894007588(%r14,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%edx - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 56(%rsp),%ebp - movl %r12d,%eax - movl %edx,52(%rsp) - movl %r12d,%ebx - xorl 0(%rsp),%ebp - andl %r11d,%eax - movl %esi,%ecx - xorl 24(%rsp),%ebp - leal -1894007588(%rdx,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%ebp - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 60(%rsp),%r14d - movl %r11d,%eax - movl %ebp,56(%rsp) - movl %r11d,%ebx - xorl 4(%rsp),%r14d - andl %edi,%eax - movl %r13d,%ecx - xorl 28(%rsp),%r14d - leal -1894007588(%rbp,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%r14d - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 0(%rsp),%edx - movl %edi,%eax - movl %r14d,60(%rsp) - movl %edi,%ebx - xorl 8(%rsp),%edx - andl %esi,%eax - movl %r12d,%ecx - xorl 32(%rsp),%edx - leal -1894007588(%r14,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%edx - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 4(%rsp),%ebp - movl %esi,%eax - movl %edx,0(%rsp) - movl %esi,%ebx - xorl 12(%rsp),%ebp - andl %r13d,%eax - movl %r11d,%ecx - xorl 36(%rsp),%ebp - leal -1894007588(%rdx,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%ebp - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 8(%rsp),%r14d - movl %r13d,%eax - movl %ebp,4(%rsp) - movl %r13d,%ebx - xorl 16(%rsp),%r14d - andl %r12d,%eax - movl %edi,%ecx - xorl 40(%rsp),%r14d - leal -1894007588(%rbp,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%r14d - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 12(%rsp),%edx - movl %r12d,%eax - movl %r14d,8(%rsp) - movl %r12d,%ebx - xorl 20(%rsp),%edx - andl %r11d,%eax - movl %esi,%ecx - xorl 44(%rsp),%edx - leal -1894007588(%r14,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%edx - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 16(%rsp),%ebp - movl %r11d,%eax - movl %edx,12(%rsp) - movl %r11d,%ebx - xorl 24(%rsp),%ebp - andl %edi,%eax - movl %r13d,%ecx - xorl 48(%rsp),%ebp - leal -1894007588(%rdx,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%ebp - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 20(%rsp),%r14d - movl %edi,%eax - movl %ebp,16(%rsp) - movl %edi,%ebx - xorl 28(%rsp),%r14d - andl %esi,%eax - movl %r12d,%ecx - xorl 52(%rsp),%r14d - leal -1894007588(%rbp,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%r14d - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 24(%rsp),%edx - movl %esi,%eax - movl %r14d,20(%rsp) - movl %esi,%ebx - xorl 32(%rsp),%edx - andl %r13d,%eax - movl %r11d,%ecx - xorl 56(%rsp),%edx - leal -1894007588(%r14,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%edx - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 28(%rsp),%ebp - movl %r13d,%eax - movl %edx,24(%rsp) - movl %r13d,%ebx - xorl 36(%rsp),%ebp - andl %r12d,%eax - movl %edi,%ecx - xorl 60(%rsp),%ebp - leal -1894007588(%rdx,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%ebp - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 32(%rsp),%r14d - movl %r12d,%eax - movl %ebp,28(%rsp) - movl %r12d,%ebx - xorl 40(%rsp),%r14d - andl %r11d,%eax - movl %esi,%ecx - xorl 0(%rsp),%r14d - leal -1894007588(%rbp,%r13,1),%r13d - xorl %r11d,%ebx - roll $5,%ecx - addl %eax,%r13d - roll $1,%r14d - andl %edi,%ebx - addl %ecx,%r13d - roll $30,%edi - addl %ebx,%r13d - xorl 36(%rsp),%edx - movl %r11d,%eax - movl %r14d,32(%rsp) - movl %r11d,%ebx - xorl 44(%rsp),%edx - andl %edi,%eax - movl %r13d,%ecx - xorl 4(%rsp),%edx - leal -1894007588(%r14,%r12,1),%r12d - xorl %edi,%ebx - roll $5,%ecx - addl %eax,%r12d - roll $1,%edx - andl %esi,%ebx - addl %ecx,%r12d - roll $30,%esi - addl %ebx,%r12d - xorl 40(%rsp),%ebp - movl %edi,%eax - movl %edx,36(%rsp) - movl %edi,%ebx - xorl 48(%rsp),%ebp - andl %esi,%eax - movl %r12d,%ecx - xorl 8(%rsp),%ebp - leal -1894007588(%rdx,%r11,1),%r11d - xorl %esi,%ebx - roll $5,%ecx - addl %eax,%r11d - roll $1,%ebp - andl %r13d,%ebx - addl %ecx,%r11d - roll $30,%r13d - addl %ebx,%r11d - xorl 44(%rsp),%r14d - movl %esi,%eax - movl %ebp,40(%rsp) - movl %esi,%ebx - xorl 52(%rsp),%r14d - andl %r13d,%eax - movl %r11d,%ecx - xorl 12(%rsp),%r14d - leal -1894007588(%rbp,%rdi,1),%edi - xorl %r13d,%ebx - roll $5,%ecx - addl %eax,%edi - roll $1,%r14d - andl %r12d,%ebx - addl %ecx,%edi - roll $30,%r12d - addl %ebx,%edi - xorl 48(%rsp),%edx - movl %r13d,%eax - movl %r14d,44(%rsp) - movl %r13d,%ebx - xorl 56(%rsp),%edx - andl %r12d,%eax - movl %edi,%ecx - xorl 16(%rsp),%edx - leal -1894007588(%r14,%rsi,1),%esi - xorl %r12d,%ebx - roll $5,%ecx - addl %eax,%esi - roll $1,%edx - andl %r11d,%ebx - addl %ecx,%esi - roll $30,%r11d - addl %ebx,%esi - xorl 52(%rsp),%ebp - movl %edi,%eax - movl %edx,48(%rsp) - movl %esi,%ecx - xorl 60(%rsp),%ebp - xorl %r12d,%eax - roll $5,%ecx - xorl 20(%rsp),%ebp - leal -899497514(%rdx,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%ebp - xorl 56(%rsp),%r14d - movl %esi,%eax - movl %ebp,52(%rsp) - movl %r13d,%ecx - xorl 0(%rsp),%r14d - xorl %r11d,%eax - roll $5,%ecx - xorl 24(%rsp),%r14d - leal -899497514(%rbp,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%r14d - xorl 60(%rsp),%edx - movl %r13d,%eax - movl %r14d,56(%rsp) - movl %r12d,%ecx - xorl 4(%rsp),%edx - xorl %edi,%eax - roll $5,%ecx - xorl 28(%rsp),%edx - leal -899497514(%r14,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%edx - xorl 0(%rsp),%ebp - movl %r12d,%eax - movl %edx,60(%rsp) - movl %r11d,%ecx - xorl 8(%rsp),%ebp - xorl %esi,%eax - roll $5,%ecx - xorl 32(%rsp),%ebp - leal -899497514(%rdx,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%ebp - xorl 4(%rsp),%r14d - movl %r11d,%eax - movl %ebp,0(%rsp) - movl %edi,%ecx - xorl 12(%rsp),%r14d - xorl %r13d,%eax - roll $5,%ecx - xorl 36(%rsp),%r14d - leal -899497514(%rbp,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%r14d - xorl 8(%rsp),%edx - movl %edi,%eax - movl %r14d,4(%rsp) - movl %esi,%ecx - xorl 16(%rsp),%edx - xorl %r12d,%eax - roll $5,%ecx - xorl 40(%rsp),%edx - leal -899497514(%r14,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%edx - xorl 12(%rsp),%ebp - movl %esi,%eax - movl %edx,8(%rsp) - movl %r13d,%ecx - xorl 20(%rsp),%ebp - xorl %r11d,%eax - roll $5,%ecx - xorl 44(%rsp),%ebp - leal -899497514(%rdx,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%ebp - xorl 16(%rsp),%r14d - movl %r13d,%eax - movl %ebp,12(%rsp) - movl %r12d,%ecx - xorl 24(%rsp),%r14d - xorl %edi,%eax - roll $5,%ecx - xorl 48(%rsp),%r14d - leal -899497514(%rbp,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%r14d - xorl 20(%rsp),%edx - movl %r12d,%eax - movl %r14d,16(%rsp) - movl %r11d,%ecx - xorl 28(%rsp),%edx - xorl %esi,%eax - roll $5,%ecx - xorl 52(%rsp),%edx - leal -899497514(%r14,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%edx - xorl 24(%rsp),%ebp - movl %r11d,%eax - movl %edx,20(%rsp) - movl %edi,%ecx - xorl 32(%rsp),%ebp - xorl %r13d,%eax - roll $5,%ecx - xorl 56(%rsp),%ebp - leal -899497514(%rdx,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%ebp - xorl 28(%rsp),%r14d - movl %edi,%eax - movl %ebp,24(%rsp) - movl %esi,%ecx - xorl 36(%rsp),%r14d - xorl %r12d,%eax - roll $5,%ecx - xorl 60(%rsp),%r14d - leal -899497514(%rbp,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%r14d - xorl 32(%rsp),%edx - movl %esi,%eax - movl %r14d,28(%rsp) - movl %r13d,%ecx - xorl 40(%rsp),%edx - xorl %r11d,%eax - roll $5,%ecx - xorl 0(%rsp),%edx - leal -899497514(%r14,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%edx - xorl 36(%rsp),%ebp - movl %r13d,%eax - - movl %r12d,%ecx - xorl 44(%rsp),%ebp - xorl %edi,%eax - roll $5,%ecx - xorl 4(%rsp),%ebp - leal -899497514(%rdx,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%ebp - xorl 40(%rsp),%r14d - movl %r12d,%eax - - movl %r11d,%ecx - xorl 48(%rsp),%r14d - xorl %esi,%eax - roll $5,%ecx - xorl 8(%rsp),%r14d - leal -899497514(%rbp,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%r14d - xorl 44(%rsp),%edx - movl %r11d,%eax - - movl %edi,%ecx - xorl 52(%rsp),%edx - xorl %r13d,%eax - roll $5,%ecx - xorl 12(%rsp),%edx - leal -899497514(%r14,%rsi,1),%esi - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - roll $1,%edx - xorl 48(%rsp),%ebp - movl %edi,%eax - - movl %esi,%ecx - xorl 56(%rsp),%ebp - xorl %r12d,%eax - roll $5,%ecx - xorl 16(%rsp),%ebp - leal -899497514(%rdx,%r13,1),%r13d - xorl %r11d,%eax - addl %ecx,%r13d - roll $30,%edi - addl %eax,%r13d - roll $1,%ebp - xorl 52(%rsp),%r14d - movl %esi,%eax - - movl %r13d,%ecx - xorl 60(%rsp),%r14d - xorl %r11d,%eax - roll $5,%ecx - xorl 20(%rsp),%r14d - leal -899497514(%rbp,%r12,1),%r12d - xorl %edi,%eax - addl %ecx,%r12d - roll $30,%esi - addl %eax,%r12d - roll $1,%r14d - xorl 56(%rsp),%edx - movl %r13d,%eax - - movl %r12d,%ecx - xorl 0(%rsp),%edx - xorl %edi,%eax - roll $5,%ecx - xorl 24(%rsp),%edx - leal -899497514(%r14,%r11,1),%r11d - xorl %esi,%eax - addl %ecx,%r11d - roll $30,%r13d - addl %eax,%r11d - roll $1,%edx - xorl 60(%rsp),%ebp - movl %r12d,%eax - - movl %r11d,%ecx - xorl 4(%rsp),%ebp - xorl %esi,%eax - roll $5,%ecx - xorl 28(%rsp),%ebp - leal -899497514(%rdx,%rdi,1),%edi - xorl %r13d,%eax - addl %ecx,%edi - roll $30,%r12d - addl %eax,%edi - roll $1,%ebp - movl %r11d,%eax - movl %edi,%ecx - xorl %r13d,%eax - leal -899497514(%rbp,%rsi,1),%esi - roll $5,%ecx - xorl %r12d,%eax - addl %ecx,%esi - roll $30,%r11d - addl %eax,%esi - addl 0(%r8),%esi - addl 4(%r8),%edi - addl 8(%r8),%r11d - addl 12(%r8),%r12d - addl 16(%r8),%r13d - movl %esi,0(%r8) - movl %edi,4(%r8) - movl %r11d,8(%r8) - movl %r12d,12(%r8) - movl %r13d,16(%r8) - - subq $1,%r10 - leaq 64(%r9),%r9 - jnz L$loop - - movq 64(%rsp),%rsi - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$epilogue: - .byte 0xf3,0xc3 - - - -.p2align 4 -sha1_block_data_order_ssse3: -_ssse3_shortcut: - - movq %rsp,%r11 - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - leaq -64(%rsp),%rsp - andq $-64,%rsp - movq %rdi,%r8 - movq %rsi,%r9 - movq %rdx,%r10 - - shlq $6,%r10 - addq %r9,%r10 - leaq K_XX_XX+64(%rip),%r14 - - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movl %ebx,%esi - movl 16(%r8),%ebp - movl %ecx,%edi - xorl %edx,%edi - andl %edi,%esi - - movdqa 64(%r14),%xmm6 - movdqa -64(%r14),%xmm9 - movdqu 0(%r9),%xmm0 - movdqu 16(%r9),%xmm1 - movdqu 32(%r9),%xmm2 - movdqu 48(%r9),%xmm3 -.byte 102,15,56,0,198 -.byte 102,15,56,0,206 -.byte 102,15,56,0,214 - addq $64,%r9 - paddd %xmm9,%xmm0 -.byte 102,15,56,0,222 - paddd %xmm9,%xmm1 - paddd %xmm9,%xmm2 - movdqa %xmm0,0(%rsp) - psubd %xmm9,%xmm0 - movdqa %xmm1,16(%rsp) - psubd %xmm9,%xmm1 - movdqa %xmm2,32(%rsp) - psubd %xmm9,%xmm2 - jmp L$oop_ssse3 -.p2align 4 -L$oop_ssse3: - rorl $2,%ebx - pshufd $238,%xmm0,%xmm4 - xorl %edx,%esi - movdqa %xmm3,%xmm8 - paddd %xmm3,%xmm9 - movl %eax,%edi - addl 0(%rsp),%ebp - punpcklqdq %xmm1,%xmm4 - xorl %ecx,%ebx - roll $5,%eax - addl %esi,%ebp - psrldq $4,%xmm8 - andl %ebx,%edi - xorl %ecx,%ebx - pxor %xmm0,%xmm4 - addl %eax,%ebp - rorl $7,%eax - pxor %xmm2,%xmm8 - xorl %ecx,%edi - movl %ebp,%esi - addl 4(%rsp),%edx - pxor %xmm8,%xmm4 - xorl %ebx,%eax - roll $5,%ebp - movdqa %xmm9,48(%rsp) - addl %edi,%edx - andl %eax,%esi - movdqa %xmm4,%xmm10 - xorl %ebx,%eax - addl %ebp,%edx - rorl $7,%ebp - movdqa %xmm4,%xmm8 - xorl %ebx,%esi - pslldq $12,%xmm10 - paddd %xmm4,%xmm4 - movl %edx,%edi - addl 8(%rsp),%ecx - psrld $31,%xmm8 - xorl %eax,%ebp - roll $5,%edx - addl %esi,%ecx - movdqa %xmm10,%xmm9 - andl %ebp,%edi - xorl %eax,%ebp - psrld $30,%xmm10 - addl %edx,%ecx - rorl $7,%edx - por %xmm8,%xmm4 - xorl %eax,%edi - movl %ecx,%esi - addl 12(%rsp),%ebx - pslld $2,%xmm9 - pxor %xmm10,%xmm4 - xorl %ebp,%edx - movdqa -64(%r14),%xmm10 - roll $5,%ecx - addl %edi,%ebx - andl %edx,%esi - pxor %xmm9,%xmm4 - xorl %ebp,%edx - addl %ecx,%ebx - rorl $7,%ecx - pshufd $238,%xmm1,%xmm5 - xorl %ebp,%esi - movdqa %xmm4,%xmm9 - paddd %xmm4,%xmm10 - movl %ebx,%edi - addl 16(%rsp),%eax - punpcklqdq %xmm2,%xmm5 - xorl %edx,%ecx - roll $5,%ebx - addl %esi,%eax - psrldq $4,%xmm9 - andl %ecx,%edi - xorl %edx,%ecx - pxor %xmm1,%xmm5 - addl %ebx,%eax - rorl $7,%ebx - pxor %xmm3,%xmm9 - xorl %edx,%edi - movl %eax,%esi - addl 20(%rsp),%ebp - pxor %xmm9,%xmm5 - xorl %ecx,%ebx - roll $5,%eax - movdqa %xmm10,0(%rsp) - addl %edi,%ebp - andl %ebx,%esi - movdqa %xmm5,%xmm8 - xorl %ecx,%ebx - addl %eax,%ebp - rorl $7,%eax - movdqa %xmm5,%xmm9 - xorl %ecx,%esi - pslldq $12,%xmm8 - paddd %xmm5,%xmm5 - movl %ebp,%edi - addl 24(%rsp),%edx - psrld $31,%xmm9 - xorl %ebx,%eax - roll $5,%ebp - addl %esi,%edx - movdqa %xmm8,%xmm10 - andl %eax,%edi - xorl %ebx,%eax - psrld $30,%xmm8 - addl %ebp,%edx - rorl $7,%ebp - por %xmm9,%xmm5 - xorl %ebx,%edi - movl %edx,%esi - addl 28(%rsp),%ecx - pslld $2,%xmm10 - pxor %xmm8,%xmm5 - xorl %eax,%ebp - movdqa -32(%r14),%xmm8 - roll $5,%edx - addl %edi,%ecx - andl %ebp,%esi - pxor %xmm10,%xmm5 - xorl %eax,%ebp - addl %edx,%ecx - rorl $7,%edx - pshufd $238,%xmm2,%xmm6 - xorl %eax,%esi - movdqa %xmm5,%xmm10 - paddd %xmm5,%xmm8 - movl %ecx,%edi - addl 32(%rsp),%ebx - punpcklqdq %xmm3,%xmm6 - xorl %ebp,%edx - roll $5,%ecx - addl %esi,%ebx - psrldq $4,%xmm10 - andl %edx,%edi - xorl %ebp,%edx - pxor %xmm2,%xmm6 - addl %ecx,%ebx - rorl $7,%ecx - pxor %xmm4,%xmm10 - xorl %ebp,%edi - movl %ebx,%esi - addl 36(%rsp),%eax - pxor %xmm10,%xmm6 - xorl %edx,%ecx - roll $5,%ebx - movdqa %xmm8,16(%rsp) - addl %edi,%eax - andl %ecx,%esi - movdqa %xmm6,%xmm9 - xorl %edx,%ecx - addl %ebx,%eax - rorl $7,%ebx - movdqa %xmm6,%xmm10 - xorl %edx,%esi - pslldq $12,%xmm9 - paddd %xmm6,%xmm6 - movl %eax,%edi - addl 40(%rsp),%ebp - psrld $31,%xmm10 - xorl %ecx,%ebx - roll $5,%eax - addl %esi,%ebp - movdqa %xmm9,%xmm8 - andl %ebx,%edi - xorl %ecx,%ebx - psrld $30,%xmm9 - addl %eax,%ebp - rorl $7,%eax - por %xmm10,%xmm6 - xorl %ecx,%edi - movl %ebp,%esi - addl 44(%rsp),%edx - pslld $2,%xmm8 - pxor %xmm9,%xmm6 - xorl %ebx,%eax - movdqa -32(%r14),%xmm9 - roll $5,%ebp - addl %edi,%edx - andl %eax,%esi - pxor %xmm8,%xmm6 - xorl %ebx,%eax - addl %ebp,%edx - rorl $7,%ebp - pshufd $238,%xmm3,%xmm7 - xorl %ebx,%esi - movdqa %xmm6,%xmm8 - paddd %xmm6,%xmm9 - movl %edx,%edi - addl 48(%rsp),%ecx - punpcklqdq %xmm4,%xmm7 - xorl %eax,%ebp - roll $5,%edx - addl %esi,%ecx - psrldq $4,%xmm8 - andl %ebp,%edi - xorl %eax,%ebp - pxor %xmm3,%xmm7 - addl %edx,%ecx - rorl $7,%edx - pxor %xmm5,%xmm8 - xorl %eax,%edi - movl %ecx,%esi - addl 52(%rsp),%ebx - pxor %xmm8,%xmm7 - xorl %ebp,%edx - roll $5,%ecx - movdqa %xmm9,32(%rsp) - addl %edi,%ebx - andl %edx,%esi - movdqa %xmm7,%xmm10 - xorl %ebp,%edx - addl %ecx,%ebx - rorl $7,%ecx - movdqa %xmm7,%xmm8 - xorl %ebp,%esi - pslldq $12,%xmm10 - paddd %xmm7,%xmm7 - movl %ebx,%edi - addl 56(%rsp),%eax - psrld $31,%xmm8 - xorl %edx,%ecx - roll $5,%ebx - addl %esi,%eax - movdqa %xmm10,%xmm9 - andl %ecx,%edi - xorl %edx,%ecx - psrld $30,%xmm10 - addl %ebx,%eax - rorl $7,%ebx - por %xmm8,%xmm7 - xorl %edx,%edi - movl %eax,%esi - addl 60(%rsp),%ebp - pslld $2,%xmm9 - pxor %xmm10,%xmm7 - xorl %ecx,%ebx - movdqa -32(%r14),%xmm10 - roll $5,%eax - addl %edi,%ebp - andl %ebx,%esi - pxor %xmm9,%xmm7 - pshufd $238,%xmm6,%xmm9 - xorl %ecx,%ebx - addl %eax,%ebp - rorl $7,%eax - pxor %xmm4,%xmm0 - xorl %ecx,%esi - movl %ebp,%edi - addl 0(%rsp),%edx - punpcklqdq %xmm7,%xmm9 - xorl %ebx,%eax - roll $5,%ebp - pxor %xmm1,%xmm0 - addl %esi,%edx - andl %eax,%edi - movdqa %xmm10,%xmm8 - xorl %ebx,%eax - paddd %xmm7,%xmm10 - addl %ebp,%edx - pxor %xmm9,%xmm0 - rorl $7,%ebp - xorl %ebx,%edi - movl %edx,%esi - addl 4(%rsp),%ecx - movdqa %xmm0,%xmm9 - xorl %eax,%ebp - roll $5,%edx - movdqa %xmm10,48(%rsp) - addl %edi,%ecx - andl %ebp,%esi - xorl %eax,%ebp - pslld $2,%xmm0 - addl %edx,%ecx - rorl $7,%edx - psrld $30,%xmm9 - xorl %eax,%esi - movl %ecx,%edi - addl 8(%rsp),%ebx - por %xmm9,%xmm0 - xorl %ebp,%edx - roll $5,%ecx - pshufd $238,%xmm7,%xmm10 - addl %esi,%ebx - andl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 12(%rsp),%eax - xorl %ebp,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - pxor %xmm5,%xmm1 - addl 16(%rsp),%ebp - xorl %ecx,%esi - punpcklqdq %xmm0,%xmm10 - movl %eax,%edi - roll $5,%eax - pxor %xmm2,%xmm1 - addl %esi,%ebp - xorl %ecx,%edi - movdqa %xmm8,%xmm9 - rorl $7,%ebx - paddd %xmm0,%xmm8 - addl %eax,%ebp - pxor %xmm10,%xmm1 - addl 20(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - movdqa %xmm1,%xmm10 - addl %edi,%edx - xorl %ebx,%esi - movdqa %xmm8,0(%rsp) - rorl $7,%eax - addl %ebp,%edx - addl 24(%rsp),%ecx - pslld $2,%xmm1 - xorl %eax,%esi - movl %edx,%edi - psrld $30,%xmm10 - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - por %xmm10,%xmm1 - addl %edx,%ecx - addl 28(%rsp),%ebx - pshufd $238,%xmm0,%xmm8 - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - addl %ecx,%ebx - pxor %xmm6,%xmm2 - addl 32(%rsp),%eax - xorl %edx,%esi - punpcklqdq %xmm1,%xmm8 - movl %ebx,%edi - roll $5,%ebx - pxor %xmm3,%xmm2 - addl %esi,%eax - xorl %edx,%edi - movdqa 0(%r14),%xmm10 - rorl $7,%ecx - paddd %xmm1,%xmm9 - addl %ebx,%eax - pxor %xmm8,%xmm2 - addl 36(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - movdqa %xmm2,%xmm8 - addl %edi,%ebp - xorl %ecx,%esi - movdqa %xmm9,16(%rsp) - rorl $7,%ebx - addl %eax,%ebp - addl 40(%rsp),%edx - pslld $2,%xmm2 - xorl %ebx,%esi - movl %ebp,%edi - psrld $30,%xmm8 - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - por %xmm8,%xmm2 - addl %ebp,%edx - addl 44(%rsp),%ecx - pshufd $238,%xmm1,%xmm9 - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - addl %edx,%ecx - pxor %xmm7,%xmm3 - addl 48(%rsp),%ebx - xorl %ebp,%esi - punpcklqdq %xmm2,%xmm9 - movl %ecx,%edi - roll $5,%ecx - pxor %xmm4,%xmm3 - addl %esi,%ebx - xorl %ebp,%edi - movdqa %xmm10,%xmm8 - rorl $7,%edx - paddd %xmm2,%xmm10 - addl %ecx,%ebx - pxor %xmm9,%xmm3 - addl 52(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - movdqa %xmm3,%xmm9 - addl %edi,%eax - xorl %edx,%esi - movdqa %xmm10,32(%rsp) - rorl $7,%ecx - addl %ebx,%eax - addl 56(%rsp),%ebp - pslld $2,%xmm3 - xorl %ecx,%esi - movl %eax,%edi - psrld $30,%xmm9 - roll $5,%eax - addl %esi,%ebp - xorl %ecx,%edi - rorl $7,%ebx - por %xmm9,%xmm3 - addl %eax,%ebp - addl 60(%rsp),%edx - pshufd $238,%xmm2,%xmm10 - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - addl %edi,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %ebp,%edx - pxor %xmm0,%xmm4 - addl 0(%rsp),%ecx - xorl %eax,%esi - punpcklqdq %xmm3,%xmm10 - movl %edx,%edi - roll $5,%edx - pxor %xmm5,%xmm4 - addl %esi,%ecx - xorl %eax,%edi - movdqa %xmm8,%xmm9 - rorl $7,%ebp - paddd %xmm3,%xmm8 - addl %edx,%ecx - pxor %xmm10,%xmm4 - addl 4(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - movdqa %xmm4,%xmm10 - addl %edi,%ebx - xorl %ebp,%esi - movdqa %xmm8,48(%rsp) - rorl $7,%edx - addl %ecx,%ebx - addl 8(%rsp),%eax - pslld $2,%xmm4 - xorl %edx,%esi - movl %ebx,%edi - psrld $30,%xmm10 - roll $5,%ebx - addl %esi,%eax - xorl %edx,%edi - rorl $7,%ecx - por %xmm10,%xmm4 - addl %ebx,%eax - addl 12(%rsp),%ebp - pshufd $238,%xmm3,%xmm8 - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - pxor %xmm1,%xmm5 - addl 16(%rsp),%edx - xorl %ebx,%esi - punpcklqdq %xmm4,%xmm8 - movl %ebp,%edi - roll $5,%ebp - pxor %xmm6,%xmm5 - addl %esi,%edx - xorl %ebx,%edi - movdqa %xmm9,%xmm10 - rorl $7,%eax - paddd %xmm4,%xmm9 - addl %ebp,%edx - pxor %xmm8,%xmm5 - addl 20(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - movdqa %xmm5,%xmm8 - addl %edi,%ecx - xorl %eax,%esi - movdqa %xmm9,0(%rsp) - rorl $7,%ebp - addl %edx,%ecx - addl 24(%rsp),%ebx - pslld $2,%xmm5 - xorl %ebp,%esi - movl %ecx,%edi - psrld $30,%xmm8 - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - por %xmm8,%xmm5 - addl %ecx,%ebx - addl 28(%rsp),%eax - pshufd $238,%xmm4,%xmm9 - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%edi - roll $5,%ebx - addl %edi,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - pxor %xmm2,%xmm6 - addl 32(%rsp),%ebp - andl %ecx,%esi - xorl %edx,%ecx - rorl $7,%ebx - punpcklqdq %xmm5,%xmm9 - movl %eax,%edi - xorl %ecx,%esi - pxor %xmm7,%xmm6 - roll $5,%eax - addl %esi,%ebp - movdqa %xmm10,%xmm8 - xorl %ebx,%edi - paddd %xmm5,%xmm10 - xorl %ecx,%ebx - pxor %xmm9,%xmm6 - addl %eax,%ebp - addl 36(%rsp),%edx - andl %ebx,%edi - xorl %ecx,%ebx - rorl $7,%eax - movdqa %xmm6,%xmm9 - movl %ebp,%esi - xorl %ebx,%edi - movdqa %xmm10,16(%rsp) - roll $5,%ebp - addl %edi,%edx - xorl %eax,%esi - pslld $2,%xmm6 - xorl %ebx,%eax - addl %ebp,%edx - psrld $30,%xmm9 - addl 40(%rsp),%ecx - andl %eax,%esi - xorl %ebx,%eax - por %xmm9,%xmm6 - rorl $7,%ebp - movl %edx,%edi - xorl %eax,%esi - roll $5,%edx - pshufd $238,%xmm5,%xmm10 - addl %esi,%ecx - xorl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - addl 44(%rsp),%ebx - andl %ebp,%edi - xorl %eax,%ebp - rorl $7,%edx - movl %ecx,%esi - xorl %ebp,%edi - roll $5,%ecx - addl %edi,%ebx - xorl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - pxor %xmm3,%xmm7 - addl 48(%rsp),%eax - andl %edx,%esi - xorl %ebp,%edx - rorl $7,%ecx - punpcklqdq %xmm6,%xmm10 - movl %ebx,%edi - xorl %edx,%esi - pxor %xmm0,%xmm7 - roll $5,%ebx - addl %esi,%eax - movdqa 32(%r14),%xmm9 - xorl %ecx,%edi - paddd %xmm6,%xmm8 - xorl %edx,%ecx - pxor %xmm10,%xmm7 - addl %ebx,%eax - addl 52(%rsp),%ebp - andl %ecx,%edi - xorl %edx,%ecx - rorl $7,%ebx - movdqa %xmm7,%xmm10 - movl %eax,%esi - xorl %ecx,%edi - movdqa %xmm8,32(%rsp) - roll $5,%eax - addl %edi,%ebp - xorl %ebx,%esi - pslld $2,%xmm7 - xorl %ecx,%ebx - addl %eax,%ebp - psrld $30,%xmm10 - addl 56(%rsp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - por %xmm10,%xmm7 - rorl $7,%eax - movl %ebp,%edi - xorl %ebx,%esi - roll $5,%ebp - pshufd $238,%xmm6,%xmm8 - addl %esi,%edx - xorl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - addl 60(%rsp),%ecx - andl %eax,%edi - xorl %ebx,%eax - rorl $7,%ebp - movl %edx,%esi - xorl %eax,%edi - roll $5,%edx - addl %edi,%ecx - xorl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - pxor %xmm4,%xmm0 - addl 0(%rsp),%ebx - andl %ebp,%esi - xorl %eax,%ebp - rorl $7,%edx - punpcklqdq %xmm7,%xmm8 - movl %ecx,%edi - xorl %ebp,%esi - pxor %xmm1,%xmm0 - roll $5,%ecx - addl %esi,%ebx - movdqa %xmm9,%xmm10 - xorl %edx,%edi - paddd %xmm7,%xmm9 - xorl %ebp,%edx - pxor %xmm8,%xmm0 - addl %ecx,%ebx - addl 4(%rsp),%eax - andl %edx,%edi - xorl %ebp,%edx - rorl $7,%ecx - movdqa %xmm0,%xmm8 - movl %ebx,%esi - xorl %edx,%edi - movdqa %xmm9,48(%rsp) - roll $5,%ebx - addl %edi,%eax - xorl %ecx,%esi - pslld $2,%xmm0 - xorl %edx,%ecx - addl %ebx,%eax - psrld $30,%xmm8 - addl 8(%rsp),%ebp - andl %ecx,%esi - xorl %edx,%ecx - por %xmm8,%xmm0 - rorl $7,%ebx - movl %eax,%edi - xorl %ecx,%esi - roll $5,%eax - pshufd $238,%xmm7,%xmm9 - addl %esi,%ebp - xorl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - addl 12(%rsp),%edx - andl %ebx,%edi - xorl %ecx,%ebx - rorl $7,%eax - movl %ebp,%esi - xorl %ebx,%edi - roll $5,%ebp - addl %edi,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - pxor %xmm5,%xmm1 - addl 16(%rsp),%ecx - andl %eax,%esi - xorl %ebx,%eax - rorl $7,%ebp - punpcklqdq %xmm0,%xmm9 - movl %edx,%edi - xorl %eax,%esi - pxor %xmm2,%xmm1 - roll $5,%edx - addl %esi,%ecx - movdqa %xmm10,%xmm8 - xorl %ebp,%edi - paddd %xmm0,%xmm10 - xorl %eax,%ebp - pxor %xmm9,%xmm1 - addl %edx,%ecx - addl 20(%rsp),%ebx - andl %ebp,%edi - xorl %eax,%ebp - rorl $7,%edx - movdqa %xmm1,%xmm9 - movl %ecx,%esi - xorl %ebp,%edi - movdqa %xmm10,0(%rsp) - roll $5,%ecx - addl %edi,%ebx - xorl %edx,%esi - pslld $2,%xmm1 - xorl %ebp,%edx - addl %ecx,%ebx - psrld $30,%xmm9 - addl 24(%rsp),%eax - andl %edx,%esi - xorl %ebp,%edx - por %xmm9,%xmm1 - rorl $7,%ecx - movl %ebx,%edi - xorl %edx,%esi - roll $5,%ebx - pshufd $238,%xmm0,%xmm10 - addl %esi,%eax - xorl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - addl 28(%rsp),%ebp - andl %ecx,%edi - xorl %edx,%ecx - rorl $7,%ebx - movl %eax,%esi - xorl %ecx,%edi - roll $5,%eax - addl %edi,%ebp - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - pxor %xmm6,%xmm2 - addl 32(%rsp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - rorl $7,%eax - punpcklqdq %xmm1,%xmm10 - movl %ebp,%edi - xorl %ebx,%esi - pxor %xmm3,%xmm2 - roll $5,%ebp - addl %esi,%edx - movdqa %xmm8,%xmm9 - xorl %eax,%edi - paddd %xmm1,%xmm8 - xorl %ebx,%eax - pxor %xmm10,%xmm2 - addl %ebp,%edx - addl 36(%rsp),%ecx - andl %eax,%edi - xorl %ebx,%eax - rorl $7,%ebp - movdqa %xmm2,%xmm10 - movl %edx,%esi - xorl %eax,%edi - movdqa %xmm8,16(%rsp) - roll $5,%edx - addl %edi,%ecx - xorl %ebp,%esi - pslld $2,%xmm2 - xorl %eax,%ebp - addl %edx,%ecx - psrld $30,%xmm10 - addl 40(%rsp),%ebx - andl %ebp,%esi - xorl %eax,%ebp - por %xmm10,%xmm2 - rorl $7,%edx - movl %ecx,%edi - xorl %ebp,%esi - roll $5,%ecx - pshufd $238,%xmm1,%xmm8 - addl %esi,%ebx - xorl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 44(%rsp),%eax - andl %edx,%edi - xorl %ebp,%edx - rorl $7,%ecx - movl %ebx,%esi - xorl %edx,%edi - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - addl %ebx,%eax - pxor %xmm7,%xmm3 - addl 48(%rsp),%ebp - xorl %ecx,%esi - punpcklqdq %xmm2,%xmm8 - movl %eax,%edi - roll $5,%eax - pxor %xmm4,%xmm3 - addl %esi,%ebp - xorl %ecx,%edi - movdqa %xmm9,%xmm10 - rorl $7,%ebx - paddd %xmm2,%xmm9 - addl %eax,%ebp - pxor %xmm8,%xmm3 - addl 52(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - movdqa %xmm3,%xmm8 - addl %edi,%edx - xorl %ebx,%esi - movdqa %xmm9,32(%rsp) - rorl $7,%eax - addl %ebp,%edx - addl 56(%rsp),%ecx - pslld $2,%xmm3 - xorl %eax,%esi - movl %edx,%edi - psrld $30,%xmm8 - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - por %xmm8,%xmm3 - addl %edx,%ecx - addl 60(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - addl %ecx,%ebx - addl 0(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - roll $5,%ebx - paddd %xmm3,%xmm10 - addl %esi,%eax - xorl %edx,%edi - movdqa %xmm10,48(%rsp) - rorl $7,%ecx - addl %ebx,%eax - addl 4(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - addl 8(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - addl %ebp,%edx - addl 12(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - addl %edx,%ecx - cmpq %r10,%r9 - je L$done_ssse3 - movdqa 64(%r14),%xmm6 - movdqa -64(%r14),%xmm9 - movdqu 0(%r9),%xmm0 - movdqu 16(%r9),%xmm1 - movdqu 32(%r9),%xmm2 - movdqu 48(%r9),%xmm3 -.byte 102,15,56,0,198 - addq $64,%r9 - addl 16(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi -.byte 102,15,56,0,206 - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - paddd %xmm9,%xmm0 - addl %ecx,%ebx - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - movdqa %xmm0,0(%rsp) - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - rorl $7,%ecx - psubd %xmm9,%xmm0 - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - roll $5,%eax - addl %esi,%ebp - xorl %ecx,%edi - rorl $7,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - addl %edi,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi -.byte 102,15,56,0,214 - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - paddd %xmm9,%xmm1 - addl %edx,%ecx - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - movdqa %xmm1,16(%rsp) - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - psubd %xmm9,%xmm1 - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - roll $5,%ebx - addl %esi,%eax - xorl %edx,%edi - rorl $7,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi -.byte 102,15,56,0,222 - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - paddd %xmm9,%xmm2 - addl %ebp,%edx - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - movdqa %xmm2,32(%rsp) - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - psubd %xmm9,%xmm2 - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - rorl $7,%ecx - addl %ebx,%eax - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - addl 12(%r8),%edx - movl %eax,0(%r8) - addl 16(%r8),%ebp - movl %esi,4(%r8) - movl %esi,%ebx - movl %ecx,8(%r8) - movl %ecx,%edi - movl %edx,12(%r8) - xorl %edx,%edi - movl %ebp,16(%r8) - andl %edi,%esi - jmp L$oop_ssse3 - -.p2align 4 -L$done_ssse3: - addl 16(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - addl %ecx,%ebx - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - xorl %edx,%esi - rorl $7,%ecx - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - roll $5,%eax - addl %esi,%ebp - xorl %ecx,%edi - rorl $7,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - roll $5,%ebp - addl %edi,%edx - xorl %ebx,%esi - rorl $7,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - roll $5,%edx - addl %esi,%ecx - xorl %eax,%edi - rorl $7,%ebp - addl %edx,%ecx - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - roll $5,%ecx - addl %edi,%ebx - xorl %ebp,%esi - rorl $7,%edx - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - roll $5,%ebx - addl %esi,%eax - xorl %edx,%edi - rorl $7,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - roll $5,%eax - addl %edi,%ebp - xorl %ecx,%esi - rorl $7,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - roll $5,%ebp - addl %esi,%edx - xorl %ebx,%edi - rorl $7,%eax - addl %ebp,%edx - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - roll $5,%edx - addl %edi,%ecx - xorl %eax,%esi - rorl $7,%ebp - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - roll $5,%ecx - addl %esi,%ebx - xorl %ebp,%edi - rorl $7,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - roll $5,%ebx - addl %edi,%eax - rorl $7,%ecx - addl %ebx,%eax - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - movl %eax,0(%r8) - addl 12(%r8),%edx - movl %esi,4(%r8) - addl 16(%r8),%ebp - movl %ecx,8(%r8) - movl %edx,12(%r8) - movl %ebp,16(%r8) - movq -40(%r11),%r14 - - movq -32(%r11),%r13 - - movq -24(%r11),%r12 - - movq -16(%r11),%rbp - - movq -8(%r11),%rbx - - leaq (%r11),%rsp - -L$epilogue_ssse3: - .byte 0xf3,0xc3 - - - -.p2align 4 -sha1_block_data_order_avx: -_avx_shortcut: - - movq %rsp,%r11 - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - leaq -64(%rsp),%rsp - vzeroupper - andq $-64,%rsp - movq %rdi,%r8 - movq %rsi,%r9 - movq %rdx,%r10 - - shlq $6,%r10 - addq %r9,%r10 - leaq K_XX_XX+64(%rip),%r14 - - movl 0(%r8),%eax - movl 4(%r8),%ebx - movl 8(%r8),%ecx - movl 12(%r8),%edx - movl %ebx,%esi - movl 16(%r8),%ebp - movl %ecx,%edi - xorl %edx,%edi - andl %edi,%esi - - vmovdqa 64(%r14),%xmm6 - vmovdqa -64(%r14),%xmm11 - vmovdqu 0(%r9),%xmm0 - vmovdqu 16(%r9),%xmm1 - vmovdqu 32(%r9),%xmm2 - vmovdqu 48(%r9),%xmm3 - vpshufb %xmm6,%xmm0,%xmm0 - addq $64,%r9 - vpshufb %xmm6,%xmm1,%xmm1 - vpshufb %xmm6,%xmm2,%xmm2 - vpshufb %xmm6,%xmm3,%xmm3 - vpaddd %xmm11,%xmm0,%xmm4 - vpaddd %xmm11,%xmm1,%xmm5 - vpaddd %xmm11,%xmm2,%xmm6 - vmovdqa %xmm4,0(%rsp) - vmovdqa %xmm5,16(%rsp) - vmovdqa %xmm6,32(%rsp) - jmp L$oop_avx -.p2align 4 -L$oop_avx: - shrdl $2,%ebx,%ebx - xorl %edx,%esi - vpalignr $8,%xmm0,%xmm1,%xmm4 - movl %eax,%edi - addl 0(%rsp),%ebp - vpaddd %xmm3,%xmm11,%xmm9 - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrldq $4,%xmm3,%xmm8 - addl %esi,%ebp - andl %ebx,%edi - vpxor %xmm0,%xmm4,%xmm4 - xorl %ecx,%ebx - addl %eax,%ebp - vpxor %xmm2,%xmm8,%xmm8 - shrdl $7,%eax,%eax - xorl %ecx,%edi - movl %ebp,%esi - addl 4(%rsp),%edx - vpxor %xmm8,%xmm4,%xmm4 - xorl %ebx,%eax - shldl $5,%ebp,%ebp - vmovdqa %xmm9,48(%rsp) - addl %edi,%edx - andl %eax,%esi - vpsrld $31,%xmm4,%xmm8 - xorl %ebx,%eax - addl %ebp,%edx - shrdl $7,%ebp,%ebp - xorl %ebx,%esi - vpslldq $12,%xmm4,%xmm10 - vpaddd %xmm4,%xmm4,%xmm4 - movl %edx,%edi - addl 8(%rsp),%ecx - xorl %eax,%ebp - shldl $5,%edx,%edx - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm4,%xmm4 - addl %esi,%ecx - andl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm4,%xmm4 - shrdl $7,%edx,%edx - xorl %eax,%edi - movl %ecx,%esi - addl 12(%rsp),%ebx - vpxor %xmm10,%xmm4,%xmm4 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - addl %edi,%ebx - andl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %ebp,%esi - vpalignr $8,%xmm1,%xmm2,%xmm5 - movl %ebx,%edi - addl 16(%rsp),%eax - vpaddd %xmm4,%xmm11,%xmm9 - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrldq $4,%xmm4,%xmm8 - addl %esi,%eax - andl %ecx,%edi - vpxor %xmm1,%xmm5,%xmm5 - xorl %edx,%ecx - addl %ebx,%eax - vpxor %xmm3,%xmm8,%xmm8 - shrdl $7,%ebx,%ebx - xorl %edx,%edi - movl %eax,%esi - addl 20(%rsp),%ebp - vpxor %xmm8,%xmm5,%xmm5 - xorl %ecx,%ebx - shldl $5,%eax,%eax - vmovdqa %xmm9,0(%rsp) - addl %edi,%ebp - andl %ebx,%esi - vpsrld $31,%xmm5,%xmm8 - xorl %ecx,%ebx - addl %eax,%ebp - shrdl $7,%eax,%eax - xorl %ecx,%esi - vpslldq $12,%xmm5,%xmm10 - vpaddd %xmm5,%xmm5,%xmm5 - movl %ebp,%edi - addl 24(%rsp),%edx - xorl %ebx,%eax - shldl $5,%ebp,%ebp - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm5,%xmm5 - addl %esi,%edx - andl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm5,%xmm5 - shrdl $7,%ebp,%ebp - xorl %ebx,%edi - movl %edx,%esi - addl 28(%rsp),%ecx - vpxor %xmm10,%xmm5,%xmm5 - xorl %eax,%ebp - shldl $5,%edx,%edx - vmovdqa -32(%r14),%xmm11 - addl %edi,%ecx - andl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - vpalignr $8,%xmm2,%xmm3,%xmm6 - movl %ecx,%edi - addl 32(%rsp),%ebx - vpaddd %xmm5,%xmm11,%xmm9 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - vpsrldq $4,%xmm5,%xmm8 - addl %esi,%ebx - andl %edx,%edi - vpxor %xmm2,%xmm6,%xmm6 - xorl %ebp,%edx - addl %ecx,%ebx - vpxor %xmm4,%xmm8,%xmm8 - shrdl $7,%ecx,%ecx - xorl %ebp,%edi - movl %ebx,%esi - addl 36(%rsp),%eax - vpxor %xmm8,%xmm6,%xmm6 - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vmovdqa %xmm9,16(%rsp) - addl %edi,%eax - andl %ecx,%esi - vpsrld $31,%xmm6,%xmm8 - xorl %edx,%ecx - addl %ebx,%eax - shrdl $7,%ebx,%ebx - xorl %edx,%esi - vpslldq $12,%xmm6,%xmm10 - vpaddd %xmm6,%xmm6,%xmm6 - movl %eax,%edi - addl 40(%rsp),%ebp - xorl %ecx,%ebx - shldl $5,%eax,%eax - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm6,%xmm6 - addl %esi,%ebp - andl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm6,%xmm6 - shrdl $7,%eax,%eax - xorl %ecx,%edi - movl %ebp,%esi - addl 44(%rsp),%edx - vpxor %xmm10,%xmm6,%xmm6 - xorl %ebx,%eax - shldl $5,%ebp,%ebp - addl %edi,%edx - andl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - shrdl $7,%ebp,%ebp - xorl %ebx,%esi - vpalignr $8,%xmm3,%xmm4,%xmm7 - movl %edx,%edi - addl 48(%rsp),%ecx - vpaddd %xmm6,%xmm11,%xmm9 - xorl %eax,%ebp - shldl $5,%edx,%edx - vpsrldq $4,%xmm6,%xmm8 - addl %esi,%ecx - andl %ebp,%edi - vpxor %xmm3,%xmm7,%xmm7 - xorl %eax,%ebp - addl %edx,%ecx - vpxor %xmm5,%xmm8,%xmm8 - shrdl $7,%edx,%edx - xorl %eax,%edi - movl %ecx,%esi - addl 52(%rsp),%ebx - vpxor %xmm8,%xmm7,%xmm7 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - vmovdqa %xmm9,32(%rsp) - addl %edi,%ebx - andl %edx,%esi - vpsrld $31,%xmm7,%xmm8 - xorl %ebp,%edx - addl %ecx,%ebx - shrdl $7,%ecx,%ecx - xorl %ebp,%esi - vpslldq $12,%xmm7,%xmm10 - vpaddd %xmm7,%xmm7,%xmm7 - movl %ebx,%edi - addl 56(%rsp),%eax - xorl %edx,%ecx - shldl $5,%ebx,%ebx - vpsrld $30,%xmm10,%xmm9 - vpor %xmm8,%xmm7,%xmm7 - addl %esi,%eax - andl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - vpslld $2,%xmm10,%xmm10 - vpxor %xmm9,%xmm7,%xmm7 - shrdl $7,%ebx,%ebx - xorl %edx,%edi - movl %eax,%esi - addl 60(%rsp),%ebp - vpxor %xmm10,%xmm7,%xmm7 - xorl %ecx,%ebx - shldl $5,%eax,%eax - addl %edi,%ebp - andl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - vpalignr $8,%xmm6,%xmm7,%xmm8 - vpxor %xmm4,%xmm0,%xmm0 - shrdl $7,%eax,%eax - xorl %ecx,%esi - movl %ebp,%edi - addl 0(%rsp),%edx - vpxor %xmm1,%xmm0,%xmm0 - xorl %ebx,%eax - shldl $5,%ebp,%ebp - vpaddd %xmm7,%xmm11,%xmm9 - addl %esi,%edx - andl %eax,%edi - vpxor %xmm8,%xmm0,%xmm0 - xorl %ebx,%eax - addl %ebp,%edx - shrdl $7,%ebp,%ebp - xorl %ebx,%edi - vpsrld $30,%xmm0,%xmm8 - vmovdqa %xmm9,48(%rsp) - movl %edx,%esi - addl 4(%rsp),%ecx - xorl %eax,%ebp - shldl $5,%edx,%edx - vpslld $2,%xmm0,%xmm0 - addl %edi,%ecx - andl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - shrdl $7,%edx,%edx - xorl %eax,%esi - movl %ecx,%edi - addl 8(%rsp),%ebx - vpor %xmm8,%xmm0,%xmm0 - xorl %ebp,%edx - shldl $5,%ecx,%ecx - addl %esi,%ebx - andl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 12(%rsp),%eax - xorl %ebp,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm7,%xmm0,%xmm8 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - vpxor %xmm2,%xmm1,%xmm1 - addl %esi,%ebp - xorl %ecx,%edi - vpaddd %xmm0,%xmm11,%xmm9 - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpxor %xmm8,%xmm1,%xmm1 - addl 20(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - vpsrld $30,%xmm1,%xmm8 - vmovdqa %xmm9,0(%rsp) - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpslld $2,%xmm1,%xmm1 - addl 24(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpor %xmm8,%xmm1,%xmm1 - addl 28(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpalignr $8,%xmm0,%xmm1,%xmm8 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - vpxor %xmm3,%xmm2,%xmm2 - addl %esi,%eax - xorl %edx,%edi - vpaddd %xmm1,%xmm11,%xmm9 - vmovdqa 0(%r14),%xmm11 - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpxor %xmm8,%xmm2,%xmm2 - addl 36(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - vpsrld $30,%xmm2,%xmm8 - vmovdqa %xmm9,16(%rsp) - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpslld $2,%xmm2,%xmm2 - addl 40(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpor %xmm8,%xmm2,%xmm2 - addl 44(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpalignr $8,%xmm1,%xmm2,%xmm8 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - vpxor %xmm4,%xmm3,%xmm3 - addl %esi,%ebx - xorl %ebp,%edi - vpaddd %xmm2,%xmm11,%xmm9 - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpxor %xmm8,%xmm3,%xmm3 - addl 52(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - vpsrld $30,%xmm3,%xmm8 - vmovdqa %xmm9,32(%rsp) - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpslld $2,%xmm3,%xmm3 - addl 56(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ecx,%edi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpor %xmm8,%xmm3,%xmm3 - addl 60(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpalignr $8,%xmm2,%xmm3,%xmm8 - vpxor %xmm0,%xmm4,%xmm4 - addl 0(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - vpxor %xmm5,%xmm4,%xmm4 - addl %esi,%ecx - xorl %eax,%edi - vpaddd %xmm3,%xmm11,%xmm9 - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpxor %xmm8,%xmm4,%xmm4 - addl 4(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - vpsrld $30,%xmm4,%xmm8 - vmovdqa %xmm9,48(%rsp) - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpslld $2,%xmm4,%xmm4 - addl 8(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vpor %xmm8,%xmm4,%xmm4 - addl 12(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpalignr $8,%xmm3,%xmm4,%xmm8 - vpxor %xmm1,%xmm5,%xmm5 - addl 16(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - vpxor %xmm6,%xmm5,%xmm5 - addl %esi,%edx - xorl %ebx,%edi - vpaddd %xmm4,%xmm11,%xmm9 - shrdl $7,%eax,%eax - addl %ebp,%edx - vpxor %xmm8,%xmm5,%xmm5 - addl 20(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - vpsrld $30,%xmm5,%xmm8 - vmovdqa %xmm9,0(%rsp) - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpslld $2,%xmm5,%xmm5 - addl 24(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vpor %xmm8,%xmm5,%xmm5 - addl 28(%rsp),%eax - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%edi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - vpalignr $8,%xmm4,%xmm5,%xmm8 - vpxor %xmm2,%xmm6,%xmm6 - addl 32(%rsp),%ebp - andl %ecx,%esi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - vpxor %xmm7,%xmm6,%xmm6 - movl %eax,%edi - xorl %ecx,%esi - vpaddd %xmm5,%xmm11,%xmm9 - shldl $5,%eax,%eax - addl %esi,%ebp - vpxor %xmm8,%xmm6,%xmm6 - xorl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - addl 36(%rsp),%edx - vpsrld $30,%xmm6,%xmm8 - vmovdqa %xmm9,16(%rsp) - andl %ebx,%edi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %ebp,%esi - vpslld $2,%xmm6,%xmm6 - xorl %ebx,%edi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - addl 40(%rsp),%ecx - andl %eax,%esi - vpor %xmm8,%xmm6,%xmm6 - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - movl %edx,%edi - xorl %eax,%esi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - addl 44(%rsp),%ebx - andl %ebp,%edi - xorl %eax,%ebp - shrdl $7,%edx,%edx - movl %ecx,%esi - xorl %ebp,%edi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - vpalignr $8,%xmm5,%xmm6,%xmm8 - vpxor %xmm3,%xmm7,%xmm7 - addl 48(%rsp),%eax - andl %edx,%esi - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - vpxor %xmm0,%xmm7,%xmm7 - movl %ebx,%edi - xorl %edx,%esi - vpaddd %xmm6,%xmm11,%xmm9 - vmovdqa 32(%r14),%xmm11 - shldl $5,%ebx,%ebx - addl %esi,%eax - vpxor %xmm8,%xmm7,%xmm7 - xorl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - addl 52(%rsp),%ebp - vpsrld $30,%xmm7,%xmm8 - vmovdqa %xmm9,32(%rsp) - andl %ecx,%edi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - vpslld $2,%xmm7,%xmm7 - xorl %ecx,%edi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - addl 56(%rsp),%edx - andl %ebx,%esi - vpor %xmm8,%xmm7,%xmm7 - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %ebp,%edi - xorl %ebx,%esi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - addl 60(%rsp),%ecx - andl %eax,%edi - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - movl %edx,%esi - xorl %eax,%edi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - vpalignr $8,%xmm6,%xmm7,%xmm8 - vpxor %xmm4,%xmm0,%xmm0 - addl 0(%rsp),%ebx - andl %ebp,%esi - xorl %eax,%ebp - shrdl $7,%edx,%edx - vpxor %xmm1,%xmm0,%xmm0 - movl %ecx,%edi - xorl %ebp,%esi - vpaddd %xmm7,%xmm11,%xmm9 - shldl $5,%ecx,%ecx - addl %esi,%ebx - vpxor %xmm8,%xmm0,%xmm0 - xorl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 4(%rsp),%eax - vpsrld $30,%xmm0,%xmm8 - vmovdqa %xmm9,48(%rsp) - andl %edx,%edi - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - vpslld $2,%xmm0,%xmm0 - xorl %edx,%edi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %ecx,%esi - xorl %edx,%ecx - addl %ebx,%eax - addl 8(%rsp),%ebp - andl %ecx,%esi - vpor %xmm8,%xmm0,%xmm0 - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%edi - xorl %ecx,%esi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ebx,%edi - xorl %ecx,%ebx - addl %eax,%ebp - addl 12(%rsp),%edx - andl %ebx,%edi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - movl %ebp,%esi - xorl %ebx,%edi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %eax,%esi - xorl %ebx,%eax - addl %ebp,%edx - vpalignr $8,%xmm7,%xmm0,%xmm8 - vpxor %xmm5,%xmm1,%xmm1 - addl 16(%rsp),%ecx - andl %eax,%esi - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - vpxor %xmm2,%xmm1,%xmm1 - movl %edx,%edi - xorl %eax,%esi - vpaddd %xmm0,%xmm11,%xmm9 - shldl $5,%edx,%edx - addl %esi,%ecx - vpxor %xmm8,%xmm1,%xmm1 - xorl %ebp,%edi - xorl %eax,%ebp - addl %edx,%ecx - addl 20(%rsp),%ebx - vpsrld $30,%xmm1,%xmm8 - vmovdqa %xmm9,0(%rsp) - andl %ebp,%edi - xorl %eax,%ebp - shrdl $7,%edx,%edx - movl %ecx,%esi - vpslld $2,%xmm1,%xmm1 - xorl %ebp,%edi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %edx,%esi - xorl %ebp,%edx - addl %ecx,%ebx - addl 24(%rsp),%eax - andl %edx,%esi - vpor %xmm8,%xmm1,%xmm1 - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%edi - xorl %edx,%esi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %ecx,%edi - xorl %edx,%ecx - addl %ebx,%eax - addl 28(%rsp),%ebp - andl %ecx,%edi - xorl %edx,%ecx - shrdl $7,%ebx,%ebx - movl %eax,%esi - xorl %ecx,%edi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ebx,%esi - xorl %ecx,%ebx - addl %eax,%ebp - vpalignr $8,%xmm0,%xmm1,%xmm8 - vpxor %xmm6,%xmm2,%xmm2 - addl 32(%rsp),%edx - andl %ebx,%esi - xorl %ecx,%ebx - shrdl $7,%eax,%eax - vpxor %xmm3,%xmm2,%xmm2 - movl %ebp,%edi - xorl %ebx,%esi - vpaddd %xmm1,%xmm11,%xmm9 - shldl $5,%ebp,%ebp - addl %esi,%edx - vpxor %xmm8,%xmm2,%xmm2 - xorl %eax,%edi - xorl %ebx,%eax - addl %ebp,%edx - addl 36(%rsp),%ecx - vpsrld $30,%xmm2,%xmm8 - vmovdqa %xmm9,16(%rsp) - andl %eax,%edi - xorl %ebx,%eax - shrdl $7,%ebp,%ebp - movl %edx,%esi - vpslld $2,%xmm2,%xmm2 - xorl %eax,%edi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %ebp,%esi - xorl %eax,%ebp - addl %edx,%ecx - addl 40(%rsp),%ebx - andl %ebp,%esi - vpor %xmm8,%xmm2,%xmm2 - xorl %eax,%ebp - shrdl $7,%edx,%edx - movl %ecx,%edi - xorl %ebp,%esi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %edx,%edi - xorl %ebp,%edx - addl %ecx,%ebx - addl 44(%rsp),%eax - andl %edx,%edi - xorl %ebp,%edx - shrdl $7,%ecx,%ecx - movl %ebx,%esi - xorl %edx,%edi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - addl %ebx,%eax - vpalignr $8,%xmm1,%xmm2,%xmm8 - vpxor %xmm7,%xmm3,%xmm3 - addl 48(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - vpxor %xmm4,%xmm3,%xmm3 - addl %esi,%ebp - xorl %ecx,%edi - vpaddd %xmm2,%xmm11,%xmm9 - shrdl $7,%ebx,%ebx - addl %eax,%ebp - vpxor %xmm8,%xmm3,%xmm3 - addl 52(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - vpsrld $30,%xmm3,%xmm8 - vmovdqa %xmm9,32(%rsp) - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - vpslld $2,%xmm3,%xmm3 - addl 56(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vpor %xmm8,%xmm3,%xmm3 - addl 60(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 0(%rsp),%eax - vpaddd %xmm3,%xmm11,%xmm9 - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - vmovdqa %xmm9,48(%rsp) - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 4(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 8(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 12(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - cmpq %r10,%r9 - je L$done_avx - vmovdqa 64(%r14),%xmm6 - vmovdqa -64(%r14),%xmm11 - vmovdqu 0(%r9),%xmm0 - vmovdqu 16(%r9),%xmm1 - vmovdqu 32(%r9),%xmm2 - vmovdqu 48(%r9),%xmm3 - vpshufb %xmm6,%xmm0,%xmm0 - addq $64,%r9 - addl 16(%rsp),%ebx - xorl %ebp,%esi - vpshufb %xmm6,%xmm1,%xmm1 - movl %ecx,%edi - shldl $5,%ecx,%ecx - vpaddd %xmm11,%xmm0,%xmm4 - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - vmovdqa %xmm4,0(%rsp) - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ecx,%edi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - vpshufb %xmm6,%xmm2,%xmm2 - movl %edx,%edi - shldl $5,%edx,%edx - vpaddd %xmm11,%xmm1,%xmm5 - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - vmovdqa %xmm5,16(%rsp) - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - vpshufb %xmm6,%xmm3,%xmm3 - movl %ebp,%edi - shldl $5,%ebp,%ebp - vpaddd %xmm11,%xmm2,%xmm6 - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - vmovdqa %xmm6,32(%rsp) - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - addl 12(%r8),%edx - movl %eax,0(%r8) - addl 16(%r8),%ebp - movl %esi,4(%r8) - movl %esi,%ebx - movl %ecx,8(%r8) - movl %ecx,%edi - movl %edx,12(%r8) - xorl %edx,%edi - movl %ebp,16(%r8) - andl %edi,%esi - jmp L$oop_avx - -.p2align 4 -L$done_avx: - addl 16(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 20(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - xorl %edx,%esi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 24(%rsp),%ebp - xorl %ecx,%esi - movl %eax,%edi - shldl $5,%eax,%eax - addl %esi,%ebp - xorl %ecx,%edi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 28(%rsp),%edx - xorl %ebx,%edi - movl %ebp,%esi - shldl $5,%ebp,%ebp - addl %edi,%edx - xorl %ebx,%esi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 32(%rsp),%ecx - xorl %eax,%esi - movl %edx,%edi - shldl $5,%edx,%edx - addl %esi,%ecx - xorl %eax,%edi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - addl 36(%rsp),%ebx - xorl %ebp,%edi - movl %ecx,%esi - shldl $5,%ecx,%ecx - addl %edi,%ebx - xorl %ebp,%esi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 40(%rsp),%eax - xorl %edx,%esi - movl %ebx,%edi - shldl $5,%ebx,%ebx - addl %esi,%eax - xorl %edx,%edi - shrdl $7,%ecx,%ecx - addl %ebx,%eax - addl 44(%rsp),%ebp - xorl %ecx,%edi - movl %eax,%esi - shldl $5,%eax,%eax - addl %edi,%ebp - xorl %ecx,%esi - shrdl $7,%ebx,%ebx - addl %eax,%ebp - addl 48(%rsp),%edx - xorl %ebx,%esi - movl %ebp,%edi - shldl $5,%ebp,%ebp - addl %esi,%edx - xorl %ebx,%edi - shrdl $7,%eax,%eax - addl %ebp,%edx - addl 52(%rsp),%ecx - xorl %eax,%edi - movl %edx,%esi - shldl $5,%edx,%edx - addl %edi,%ecx - xorl %eax,%esi - shrdl $7,%ebp,%ebp - addl %edx,%ecx - addl 56(%rsp),%ebx - xorl %ebp,%esi - movl %ecx,%edi - shldl $5,%ecx,%ecx - addl %esi,%ebx - xorl %ebp,%edi - shrdl $7,%edx,%edx - addl %ecx,%ebx - addl 60(%rsp),%eax - xorl %edx,%edi - movl %ebx,%esi - shldl $5,%ebx,%ebx - addl %edi,%eax - shrdl $7,%ecx,%ecx - addl %ebx,%eax - vzeroupper - - addl 0(%r8),%eax - addl 4(%r8),%esi - addl 8(%r8),%ecx - movl %eax,0(%r8) - addl 12(%r8),%edx - movl %esi,4(%r8) - addl 16(%r8),%ebp - movl %ecx,8(%r8) - movl %edx,12(%r8) - movl %ebp,16(%r8) - movq -40(%r11),%r14 - - movq -32(%r11),%r13 - - movq -24(%r11),%r12 - - movq -16(%r11),%rbp - - movq -8(%r11),%rbx - - leaq (%r11),%rsp - -L$epilogue_avx: - .byte 0xf3,0xc3 - - -.p2align 6 -K_XX_XX: -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0 -.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.p2align 6 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha256-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha256-x86_64.S deleted file mode 100644 index 5e46e81c16..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha256-x86_64.S +++ /dev/null @@ -1,3971 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.globl _sha256_block_data_order -.private_extern _sha256_block_data_order - -.p2align 4 -_sha256_block_data_order: - - leaq _OPENSSL_ia32cap_P(%rip),%r11 - movl 0(%r11),%r9d - movl 4(%r11),%r10d - movl 8(%r11),%r11d - andl $1073741824,%r9d - andl $268435968,%r10d - orl %r9d,%r10d - cmpl $1342177792,%r10d - je L$avx_shortcut - testl $512,%r10d - jnz L$ssse3_shortcut - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - shlq $4,%rdx - subq $64+32,%rsp - leaq (%rsi,%rdx,4),%rdx - andq $-64,%rsp - movq %rdi,64+0(%rsp) - movq %rsi,64+8(%rsp) - movq %rdx,64+16(%rsp) - movq %rax,88(%rsp) - -L$prologue: - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - movl 16(%rdi),%r8d - movl 20(%rdi),%r9d - movl 24(%rdi),%r10d - movl 28(%rdi),%r11d - jmp L$loop - -.p2align 4 -L$loop: - movl %ebx,%edi - leaq K256(%rip),%rbp - xorl %ecx,%edi - movl 0(%rsi),%r12d - movl %r8d,%r13d - movl %eax,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,0(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - addl %r14d,%r11d - movl 4(%rsi),%r12d - movl %edx,%r13d - movl %r11d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,4(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - addl %r14d,%r10d - movl 8(%rsi),%r12d - movl %ecx,%r13d - movl %r10d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,8(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - addl %r14d,%r9d - movl 12(%rsi),%r12d - movl %ebx,%r13d - movl %r9d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,12(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - addl %r14d,%r8d - movl 16(%rsi),%r12d - movl %eax,%r13d - movl %r8d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,16(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - addl %r14d,%edx - movl 20(%rsi),%r12d - movl %r11d,%r13d - movl %edx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,20(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - addl %r14d,%ecx - movl 24(%rsi),%r12d - movl %r10d,%r13d - movl %ecx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,24(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - addl %r14d,%ebx - movl 28(%rsi),%r12d - movl %r9d,%r13d - movl %ebx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,28(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - addl %r14d,%eax - movl 32(%rsi),%r12d - movl %r8d,%r13d - movl %eax,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,32(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - addl %r14d,%r11d - movl 36(%rsi),%r12d - movl %edx,%r13d - movl %r11d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,36(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - addl %r14d,%r10d - movl 40(%rsi),%r12d - movl %ecx,%r13d - movl %r10d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,40(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - addl %r14d,%r9d - movl 44(%rsi),%r12d - movl %ebx,%r13d - movl %r9d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,44(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - addl %r14d,%r8d - movl 48(%rsi),%r12d - movl %eax,%r13d - movl %r8d,%r14d - bswapl %r12d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,48(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - addl %r14d,%edx - movl 52(%rsi),%r12d - movl %r11d,%r13d - movl %edx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,52(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - addl %r14d,%ecx - movl 56(%rsi),%r12d - movl %r10d,%r13d - movl %ecx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,56(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - addl %r14d,%ebx - movl 60(%rsi),%r12d - movl %r9d,%r13d - movl %ebx,%r14d - bswapl %r12d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,60(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - jmp L$rounds_16_xx -.p2align 4 -L$rounds_16_xx: - movl 4(%rsp),%r13d - movl 56(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%eax - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 36(%rsp),%r12d - - addl 0(%rsp),%r12d - movl %r8d,%r13d - addl %r15d,%r12d - movl %eax,%r14d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,0(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - movl 8(%rsp),%r13d - movl 60(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r11d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 40(%rsp),%r12d - - addl 4(%rsp),%r12d - movl %edx,%r13d - addl %edi,%r12d - movl %r11d,%r14d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,4(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - movl 12(%rsp),%r13d - movl 0(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r10d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 44(%rsp),%r12d - - addl 8(%rsp),%r12d - movl %ecx,%r13d - addl %r15d,%r12d - movl %r10d,%r14d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,8(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - movl 16(%rsp),%r13d - movl 4(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r9d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 48(%rsp),%r12d - - addl 12(%rsp),%r12d - movl %ebx,%r13d - addl %edi,%r12d - movl %r9d,%r14d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,12(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - movl 20(%rsp),%r13d - movl 8(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r8d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 52(%rsp),%r12d - - addl 16(%rsp),%r12d - movl %eax,%r13d - addl %r15d,%r12d - movl %r8d,%r14d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,16(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - movl 24(%rsp),%r13d - movl 12(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%edx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 56(%rsp),%r12d - - addl 20(%rsp),%r12d - movl %r11d,%r13d - addl %edi,%r12d - movl %edx,%r14d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,20(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - movl 28(%rsp),%r13d - movl 16(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ecx - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 60(%rsp),%r12d - - addl 24(%rsp),%r12d - movl %r10d,%r13d - addl %r15d,%r12d - movl %ecx,%r14d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,24(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - movl 32(%rsp),%r13d - movl 20(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ebx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 0(%rsp),%r12d - - addl 28(%rsp),%r12d - movl %r9d,%r13d - addl %edi,%r12d - movl %ebx,%r14d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,28(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - movl 36(%rsp),%r13d - movl 24(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%eax - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 4(%rsp),%r12d - - addl 32(%rsp),%r12d - movl %r8d,%r13d - addl %r15d,%r12d - movl %eax,%r14d - rorl $14,%r13d - movl %r9d,%r15d - - xorl %r8d,%r13d - rorl $9,%r14d - xorl %r10d,%r15d - - movl %r12d,32(%rsp) - xorl %eax,%r14d - andl %r8d,%r15d - - rorl $5,%r13d - addl %r11d,%r12d - xorl %r10d,%r15d - - rorl $11,%r14d - xorl %r8d,%r13d - addl %r15d,%r12d - - movl %eax,%r15d - addl (%rbp),%r12d - xorl %eax,%r14d - - xorl %ebx,%r15d - rorl $6,%r13d - movl %ebx,%r11d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r11d - addl %r12d,%edx - addl %r12d,%r11d - - leaq 4(%rbp),%rbp - movl 40(%rsp),%r13d - movl 28(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r11d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 8(%rsp),%r12d - - addl 36(%rsp),%r12d - movl %edx,%r13d - addl %edi,%r12d - movl %r11d,%r14d - rorl $14,%r13d - movl %r8d,%edi - - xorl %edx,%r13d - rorl $9,%r14d - xorl %r9d,%edi - - movl %r12d,36(%rsp) - xorl %r11d,%r14d - andl %edx,%edi - - rorl $5,%r13d - addl %r10d,%r12d - xorl %r9d,%edi - - rorl $11,%r14d - xorl %edx,%r13d - addl %edi,%r12d - - movl %r11d,%edi - addl (%rbp),%r12d - xorl %r11d,%r14d - - xorl %eax,%edi - rorl $6,%r13d - movl %eax,%r10d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r10d - addl %r12d,%ecx - addl %r12d,%r10d - - leaq 4(%rbp),%rbp - movl 44(%rsp),%r13d - movl 32(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r10d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 12(%rsp),%r12d - - addl 40(%rsp),%r12d - movl %ecx,%r13d - addl %r15d,%r12d - movl %r10d,%r14d - rorl $14,%r13d - movl %edx,%r15d - - xorl %ecx,%r13d - rorl $9,%r14d - xorl %r8d,%r15d - - movl %r12d,40(%rsp) - xorl %r10d,%r14d - andl %ecx,%r15d - - rorl $5,%r13d - addl %r9d,%r12d - xorl %r8d,%r15d - - rorl $11,%r14d - xorl %ecx,%r13d - addl %r15d,%r12d - - movl %r10d,%r15d - addl (%rbp),%r12d - xorl %r10d,%r14d - - xorl %r11d,%r15d - rorl $6,%r13d - movl %r11d,%r9d - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%r9d - addl %r12d,%ebx - addl %r12d,%r9d - - leaq 4(%rbp),%rbp - movl 48(%rsp),%r13d - movl 36(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r9d - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 16(%rsp),%r12d - - addl 44(%rsp),%r12d - movl %ebx,%r13d - addl %edi,%r12d - movl %r9d,%r14d - rorl $14,%r13d - movl %ecx,%edi - - xorl %ebx,%r13d - rorl $9,%r14d - xorl %edx,%edi - - movl %r12d,44(%rsp) - xorl %r9d,%r14d - andl %ebx,%edi - - rorl $5,%r13d - addl %r8d,%r12d - xorl %edx,%edi - - rorl $11,%r14d - xorl %ebx,%r13d - addl %edi,%r12d - - movl %r9d,%edi - addl (%rbp),%r12d - xorl %r9d,%r14d - - xorl %r10d,%edi - rorl $6,%r13d - movl %r10d,%r8d - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%r8d - addl %r12d,%eax - addl %r12d,%r8d - - leaq 20(%rbp),%rbp - movl 52(%rsp),%r13d - movl 40(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%r8d - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 20(%rsp),%r12d - - addl 48(%rsp),%r12d - movl %eax,%r13d - addl %r15d,%r12d - movl %r8d,%r14d - rorl $14,%r13d - movl %ebx,%r15d - - xorl %eax,%r13d - rorl $9,%r14d - xorl %ecx,%r15d - - movl %r12d,48(%rsp) - xorl %r8d,%r14d - andl %eax,%r15d - - rorl $5,%r13d - addl %edx,%r12d - xorl %ecx,%r15d - - rorl $11,%r14d - xorl %eax,%r13d - addl %r15d,%r12d - - movl %r8d,%r15d - addl (%rbp),%r12d - xorl %r8d,%r14d - - xorl %r9d,%r15d - rorl $6,%r13d - movl %r9d,%edx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%edx - addl %r12d,%r11d - addl %r12d,%edx - - leaq 4(%rbp),%rbp - movl 56(%rsp),%r13d - movl 44(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%edx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 24(%rsp),%r12d - - addl 52(%rsp),%r12d - movl %r11d,%r13d - addl %edi,%r12d - movl %edx,%r14d - rorl $14,%r13d - movl %eax,%edi - - xorl %r11d,%r13d - rorl $9,%r14d - xorl %ebx,%edi - - movl %r12d,52(%rsp) - xorl %edx,%r14d - andl %r11d,%edi - - rorl $5,%r13d - addl %ecx,%r12d - xorl %ebx,%edi - - rorl $11,%r14d - xorl %r11d,%r13d - addl %edi,%r12d - - movl %edx,%edi - addl (%rbp),%r12d - xorl %edx,%r14d - - xorl %r8d,%edi - rorl $6,%r13d - movl %r8d,%ecx - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%ecx - addl %r12d,%r10d - addl %r12d,%ecx - - leaq 4(%rbp),%rbp - movl 60(%rsp),%r13d - movl 48(%rsp),%r15d - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ecx - movl %r15d,%r14d - rorl $2,%r15d - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%r15d - shrl $10,%r14d - - rorl $17,%r15d - xorl %r13d,%r12d - xorl %r14d,%r15d - addl 28(%rsp),%r12d - - addl 56(%rsp),%r12d - movl %r10d,%r13d - addl %r15d,%r12d - movl %ecx,%r14d - rorl $14,%r13d - movl %r11d,%r15d - - xorl %r10d,%r13d - rorl $9,%r14d - xorl %eax,%r15d - - movl %r12d,56(%rsp) - xorl %ecx,%r14d - andl %r10d,%r15d - - rorl $5,%r13d - addl %ebx,%r12d - xorl %eax,%r15d - - rorl $11,%r14d - xorl %r10d,%r13d - addl %r15d,%r12d - - movl %ecx,%r15d - addl (%rbp),%r12d - xorl %ecx,%r14d - - xorl %edx,%r15d - rorl $6,%r13d - movl %edx,%ebx - - andl %r15d,%edi - rorl $2,%r14d - addl %r13d,%r12d - - xorl %edi,%ebx - addl %r12d,%r9d - addl %r12d,%ebx - - leaq 4(%rbp),%rbp - movl 0(%rsp),%r13d - movl 52(%rsp),%edi - - movl %r13d,%r12d - rorl $11,%r13d - addl %r14d,%ebx - movl %edi,%r14d - rorl $2,%edi - - xorl %r12d,%r13d - shrl $3,%r12d - rorl $7,%r13d - xorl %r14d,%edi - shrl $10,%r14d - - rorl $17,%edi - xorl %r13d,%r12d - xorl %r14d,%edi - addl 32(%rsp),%r12d - - addl 60(%rsp),%r12d - movl %r9d,%r13d - addl %edi,%r12d - movl %ebx,%r14d - rorl $14,%r13d - movl %r10d,%edi - - xorl %r9d,%r13d - rorl $9,%r14d - xorl %r11d,%edi - - movl %r12d,60(%rsp) - xorl %ebx,%r14d - andl %r9d,%edi - - rorl $5,%r13d - addl %eax,%r12d - xorl %r11d,%edi - - rorl $11,%r14d - xorl %r9d,%r13d - addl %edi,%r12d - - movl %ebx,%edi - addl (%rbp),%r12d - xorl %ebx,%r14d - - xorl %ecx,%edi - rorl $6,%r13d - movl %ecx,%eax - - andl %edi,%r15d - rorl $2,%r14d - addl %r13d,%r12d - - xorl %r15d,%eax - addl %r12d,%r8d - addl %r12d,%eax - - leaq 20(%rbp),%rbp - cmpb $0,3(%rbp) - jnz L$rounds_16_xx - - movq 64+0(%rsp),%rdi - addl %r14d,%eax - leaq 64(%rsi),%rsi - - addl 0(%rdi),%eax - addl 4(%rdi),%ebx - addl 8(%rdi),%ecx - addl 12(%rdi),%edx - addl 16(%rdi),%r8d - addl 20(%rdi),%r9d - addl 24(%rdi),%r10d - addl 28(%rdi),%r11d - - cmpq 64+16(%rsp),%rsi - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - jb L$loop - - movq 88(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$epilogue: - .byte 0xf3,0xc3 - - -.p2align 6 - -K256: -.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 -.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff -.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff -.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 -.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 -.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 - -.p2align 6 -sha256_block_data_order_ssse3: - -L$ssse3_shortcut: - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - shlq $4,%rdx - subq $96,%rsp - leaq (%rsi,%rdx,4),%rdx - andq $-64,%rsp - movq %rdi,64+0(%rsp) - movq %rsi,64+8(%rsp) - movq %rdx,64+16(%rsp) - movq %rax,88(%rsp) - -L$prologue_ssse3: - - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - movl 16(%rdi),%r8d - movl 20(%rdi),%r9d - movl 24(%rdi),%r10d - movl 28(%rdi),%r11d - - - jmp L$loop_ssse3 -.p2align 4 -L$loop_ssse3: - movdqa K256+512(%rip),%xmm7 - movdqu 0(%rsi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqu 32(%rsi),%xmm2 -.byte 102,15,56,0,199 - movdqu 48(%rsi),%xmm3 - leaq K256(%rip),%rbp -.byte 102,15,56,0,207 - movdqa 0(%rbp),%xmm4 - movdqa 32(%rbp),%xmm5 -.byte 102,15,56,0,215 - paddd %xmm0,%xmm4 - movdqa 64(%rbp),%xmm6 -.byte 102,15,56,0,223 - movdqa 96(%rbp),%xmm7 - paddd %xmm1,%xmm5 - paddd %xmm2,%xmm6 - paddd %xmm3,%xmm7 - movdqa %xmm4,0(%rsp) - movl %eax,%r14d - movdqa %xmm5,16(%rsp) - movl %ebx,%edi - movdqa %xmm6,32(%rsp) - xorl %ecx,%edi - movdqa %xmm7,48(%rsp) - movl %r8d,%r13d - jmp L$ssse3_00_47 - -.p2align 4 -L$ssse3_00_47: - subq $-128,%rbp - rorl $14,%r13d - movdqa %xmm1,%xmm4 - movl %r14d,%eax - movl %r9d,%r12d - movdqa %xmm3,%xmm7 - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d -.byte 102,15,58,15,224,4 - andl %r8d,%r12d - xorl %r8d,%r13d -.byte 102,15,58,15,250,4 - addl 0(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %ebx,%r15d - addl %r12d,%r11d - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - paddd %xmm7,%xmm0 - rorl $2,%r14d - addl %r11d,%edx - psrld $7,%xmm6 - addl %edi,%r11d - movl %edx,%r13d - pshufd $250,%xmm3,%xmm7 - addl %r11d,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%r11d - movl %r8d,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %r11d,%r14d - pxor %xmm5,%xmm4 - andl %edx,%r12d - xorl %edx,%r13d - pslld $11,%xmm5 - addl 4(%rsp),%r10d - movl %r11d,%edi - pxor %xmm6,%xmm4 - xorl %r9d,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %eax,%edi - addl %r12d,%r10d - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - psrld $10,%xmm7 - addl %r13d,%r10d - xorl %eax,%r15d - paddd %xmm4,%xmm0 - rorl $2,%r14d - addl %r10d,%ecx - psrlq $17,%xmm6 - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %ecx,%r13d - xorl %r8d,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %ecx,%r13d - addl 8(%rsp),%r9d - movl %r10d,%r15d - psrldq $8,%xmm7 - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - paddd %xmm7,%xmm0 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - pshufd $80,%xmm0,%xmm7 - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - movdqa %xmm7,%xmm6 - addl %edi,%r9d - movl %ebx,%r13d - psrld $10,%xmm7 - addl %r9d,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%r9d - movl %ecx,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - psrlq $2,%xmm6 - andl %ebx,%r12d - xorl %ebx,%r13d - addl 12(%rsp),%r8d - pxor %xmm6,%xmm7 - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %r10d,%edi - addl %r12d,%r8d - movdqa 0(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - paddd %xmm7,%xmm0 - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - paddd %xmm0,%xmm6 - movl %eax,%r13d - addl %r8d,%r14d - movdqa %xmm6,0(%rsp) - rorl $14,%r13d - movdqa %xmm2,%xmm4 - movl %r14d,%r8d - movl %ebx,%r12d - movdqa %xmm0,%xmm7 - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d -.byte 102,15,58,15,225,4 - andl %eax,%r12d - xorl %eax,%r13d -.byte 102,15,58,15,251,4 - addl 16(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %r9d,%r15d - addl %r12d,%edx - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - paddd %xmm7,%xmm1 - rorl $2,%r14d - addl %edx,%r11d - psrld $7,%xmm6 - addl %edi,%edx - movl %r11d,%r13d - pshufd $250,%xmm0,%xmm7 - addl %edx,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%edx - movl %eax,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %edx,%r14d - pxor %xmm5,%xmm4 - andl %r11d,%r12d - xorl %r11d,%r13d - pslld $11,%xmm5 - addl 20(%rsp),%ecx - movl %edx,%edi - pxor %xmm6,%xmm4 - xorl %ebx,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %r8d,%edi - addl %r12d,%ecx - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - psrld $10,%xmm7 - addl %r13d,%ecx - xorl %r8d,%r15d - paddd %xmm4,%xmm1 - rorl $2,%r14d - addl %ecx,%r10d - psrlq $17,%xmm6 - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %r10d,%r13d - xorl %eax,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %r10d,%r13d - addl 24(%rsp),%ebx - movl %ecx,%r15d - psrldq $8,%xmm7 - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - paddd %xmm7,%xmm1 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - pshufd $80,%xmm1,%xmm7 - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - movdqa %xmm7,%xmm6 - addl %edi,%ebx - movl %r9d,%r13d - psrld $10,%xmm7 - addl %ebx,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%ebx - movl %r10d,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - psrlq $2,%xmm6 - andl %r9d,%r12d - xorl %r9d,%r13d - addl 28(%rsp),%eax - pxor %xmm6,%xmm7 - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %ecx,%edi - addl %r12d,%eax - movdqa 32(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - paddd %xmm7,%xmm1 - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - paddd %xmm1,%xmm6 - movl %r8d,%r13d - addl %eax,%r14d - movdqa %xmm6,16(%rsp) - rorl $14,%r13d - movdqa %xmm3,%xmm4 - movl %r14d,%eax - movl %r9d,%r12d - movdqa %xmm1,%xmm7 - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d -.byte 102,15,58,15,226,4 - andl %r8d,%r12d - xorl %r8d,%r13d -.byte 102,15,58,15,248,4 - addl 32(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %ebx,%r15d - addl %r12d,%r11d - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - paddd %xmm7,%xmm2 - rorl $2,%r14d - addl %r11d,%edx - psrld $7,%xmm6 - addl %edi,%r11d - movl %edx,%r13d - pshufd $250,%xmm1,%xmm7 - addl %r11d,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%r11d - movl %r8d,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %r11d,%r14d - pxor %xmm5,%xmm4 - andl %edx,%r12d - xorl %edx,%r13d - pslld $11,%xmm5 - addl 36(%rsp),%r10d - movl %r11d,%edi - pxor %xmm6,%xmm4 - xorl %r9d,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %eax,%edi - addl %r12d,%r10d - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - psrld $10,%xmm7 - addl %r13d,%r10d - xorl %eax,%r15d - paddd %xmm4,%xmm2 - rorl $2,%r14d - addl %r10d,%ecx - psrlq $17,%xmm6 - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %ecx,%r13d - xorl %r8d,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %ecx,%r13d - addl 40(%rsp),%r9d - movl %r10d,%r15d - psrldq $8,%xmm7 - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - paddd %xmm7,%xmm2 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - pshufd $80,%xmm2,%xmm7 - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - movdqa %xmm7,%xmm6 - addl %edi,%r9d - movl %ebx,%r13d - psrld $10,%xmm7 - addl %r9d,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%r9d - movl %ecx,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - psrlq $2,%xmm6 - andl %ebx,%r12d - xorl %ebx,%r13d - addl 44(%rsp),%r8d - pxor %xmm6,%xmm7 - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %r10d,%edi - addl %r12d,%r8d - movdqa 64(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - paddd %xmm7,%xmm2 - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - paddd %xmm2,%xmm6 - movl %eax,%r13d - addl %r8d,%r14d - movdqa %xmm6,32(%rsp) - rorl $14,%r13d - movdqa %xmm0,%xmm4 - movl %r14d,%r8d - movl %ebx,%r12d - movdqa %xmm2,%xmm7 - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d -.byte 102,15,58,15,227,4 - andl %eax,%r12d - xorl %eax,%r13d -.byte 102,15,58,15,249,4 - addl 48(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - movdqa %xmm4,%xmm5 - xorl %r9d,%r15d - addl %r12d,%edx - movdqa %xmm4,%xmm6 - rorl $6,%r13d - andl %r15d,%edi - psrld $3,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - paddd %xmm7,%xmm3 - rorl $2,%r14d - addl %edx,%r11d - psrld $7,%xmm6 - addl %edi,%edx - movl %r11d,%r13d - pshufd $250,%xmm2,%xmm7 - addl %edx,%r14d - rorl $14,%r13d - pslld $14,%xmm5 - movl %r14d,%edx - movl %eax,%r12d - pxor %xmm6,%xmm4 - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - psrld $11,%xmm6 - xorl %edx,%r14d - pxor %xmm5,%xmm4 - andl %r11d,%r12d - xorl %r11d,%r13d - pslld $11,%xmm5 - addl 52(%rsp),%ecx - movl %edx,%edi - pxor %xmm6,%xmm4 - xorl %ebx,%r12d - rorl $11,%r14d - movdqa %xmm7,%xmm6 - xorl %r8d,%edi - addl %r12d,%ecx - pxor %xmm5,%xmm4 - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - psrld $10,%xmm7 - addl %r13d,%ecx - xorl %r8d,%r15d - paddd %xmm4,%xmm3 - rorl $2,%r14d - addl %ecx,%r10d - psrlq $17,%xmm6 - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - pxor %xmm6,%xmm7 - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - psrlq $2,%xmm6 - xorl %r10d,%r13d - xorl %eax,%r12d - pxor %xmm6,%xmm7 - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - pshufd $128,%xmm7,%xmm7 - xorl %r10d,%r13d - addl 56(%rsp),%ebx - movl %ecx,%r15d - psrldq $8,%xmm7 - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - paddd %xmm7,%xmm3 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - pshufd $80,%xmm3,%xmm7 - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - movdqa %xmm7,%xmm6 - addl %edi,%ebx - movl %r9d,%r13d - psrld $10,%xmm7 - addl %ebx,%r14d - rorl $14,%r13d - psrlq $17,%xmm6 - movl %r14d,%ebx - movl %r10d,%r12d - pxor %xmm6,%xmm7 - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - psrlq $2,%xmm6 - andl %r9d,%r12d - xorl %r9d,%r13d - addl 60(%rsp),%eax - pxor %xmm6,%xmm7 - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - pshufd $8,%xmm7,%xmm7 - xorl %ecx,%edi - addl %r12d,%eax - movdqa 96(%rbp),%xmm6 - rorl $6,%r13d - andl %edi,%r15d - pslldq $8,%xmm7 - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - paddd %xmm7,%xmm3 - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - paddd %xmm3,%xmm6 - movl %r8d,%r13d - addl %eax,%r14d - movdqa %xmm6,48(%rsp) - cmpb $0,131(%rbp) - jne L$ssse3_00_47 - rorl $14,%r13d - movl %r14d,%eax - movl %r9d,%r12d - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 0(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - rorl $6,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - rorl $2,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - rorl $14,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 4(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - rorl $11,%r14d - xorl %eax,%edi - addl %r12d,%r10d - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - rorl $2,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 8(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - rorl $14,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 12(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - rorl $6,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - rorl $14,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 16(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - rorl $6,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - rorl $2,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - rorl $14,%r13d - movl %r14d,%edx - movl %eax,%r12d - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 20(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - rorl $11,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - rorl $2,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 24(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - rorl $14,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 28(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - xorl %ecx,%edi - addl %r12d,%eax - rorl $6,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - rorl $14,%r13d - movl %r14d,%eax - movl %r9d,%r12d - rorl $9,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - rorl $5,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 32(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - rorl $11,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - rorl $6,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - rorl $2,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - rorl $14,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - rorl $9,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - rorl $5,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 36(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - rorl $11,%r14d - xorl %eax,%edi - addl %r12d,%r10d - rorl $6,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - rorl $2,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - rorl $14,%r13d - movl %r14d,%r10d - movl %edx,%r12d - rorl $9,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - rorl $5,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 40(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - rorl $11,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - rorl $6,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - rorl $2,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - rorl $14,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - rorl $9,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - rorl $5,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 44(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - rorl $11,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - rorl $6,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - rorl $2,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - rorl $14,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - rorl $9,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - rorl $5,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 48(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - rorl $11,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - rorl $6,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - rorl $2,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - rorl $14,%r13d - movl %r14d,%edx - movl %eax,%r12d - rorl $9,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - rorl $5,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 52(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - rorl $11,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - rorl $6,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - rorl $2,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - rorl $14,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - rorl $9,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - rorl $5,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 56(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - rorl $11,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - rorl $6,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - rorl $2,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - rorl $14,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - rorl $9,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - rorl $5,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 60(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - rorl $11,%r14d - xorl %ecx,%edi - addl %r12d,%eax - rorl $6,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - rorl $2,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - movq 64+0(%rsp),%rdi - movl %r14d,%eax - - addl 0(%rdi),%eax - leaq 64(%rsi),%rsi - addl 4(%rdi),%ebx - addl 8(%rdi),%ecx - addl 12(%rdi),%edx - addl 16(%rdi),%r8d - addl 20(%rdi),%r9d - addl 24(%rdi),%r10d - addl 28(%rdi),%r11d - - cmpq 64+16(%rsp),%rsi - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - jb L$loop_ssse3 - - movq 88(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$epilogue_ssse3: - .byte 0xf3,0xc3 - - - -.p2align 6 -sha256_block_data_order_avx: - -L$avx_shortcut: - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - shlq $4,%rdx - subq $96,%rsp - leaq (%rsi,%rdx,4),%rdx - andq $-64,%rsp - movq %rdi,64+0(%rsp) - movq %rsi,64+8(%rsp) - movq %rdx,64+16(%rsp) - movq %rax,88(%rsp) - -L$prologue_avx: - - vzeroupper - movl 0(%rdi),%eax - movl 4(%rdi),%ebx - movl 8(%rdi),%ecx - movl 12(%rdi),%edx - movl 16(%rdi),%r8d - movl 20(%rdi),%r9d - movl 24(%rdi),%r10d - movl 28(%rdi),%r11d - vmovdqa K256+512+32(%rip),%xmm8 - vmovdqa K256+512+64(%rip),%xmm9 - jmp L$loop_avx -.p2align 4 -L$loop_avx: - vmovdqa K256+512(%rip),%xmm7 - vmovdqu 0(%rsi),%xmm0 - vmovdqu 16(%rsi),%xmm1 - vmovdqu 32(%rsi),%xmm2 - vmovdqu 48(%rsi),%xmm3 - vpshufb %xmm7,%xmm0,%xmm0 - leaq K256(%rip),%rbp - vpshufb %xmm7,%xmm1,%xmm1 - vpshufb %xmm7,%xmm2,%xmm2 - vpaddd 0(%rbp),%xmm0,%xmm4 - vpshufb %xmm7,%xmm3,%xmm3 - vpaddd 32(%rbp),%xmm1,%xmm5 - vpaddd 64(%rbp),%xmm2,%xmm6 - vpaddd 96(%rbp),%xmm3,%xmm7 - vmovdqa %xmm4,0(%rsp) - movl %eax,%r14d - vmovdqa %xmm5,16(%rsp) - movl %ebx,%edi - vmovdqa %xmm6,32(%rsp) - xorl %ecx,%edi - vmovdqa %xmm7,48(%rsp) - movl %r8d,%r13d - jmp L$avx_00_47 - -.p2align 4 -L$avx_00_47: - subq $-128,%rbp - vpalignr $4,%xmm0,%xmm1,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - vpalignr $4,%xmm2,%xmm3,%xmm7 - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - vpaddd %xmm7,%xmm0,%xmm0 - xorl %r8d,%r13d - addl 0(%rsp),%r11d - movl %eax,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - vpshufd $250,%xmm3,%xmm7 - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - vpsrld $11,%xmm6,%xmm6 - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 4(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - vpaddd %xmm4,%xmm0,%xmm0 - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - vpxor %xmm7,%xmm6,%xmm6 - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - vpaddd %xmm6,%xmm0,%xmm0 - andl %ecx,%r12d - xorl %ecx,%r13d - addl 8(%rsp),%r9d - vpshufd $80,%xmm0,%xmm7 - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - vpxor %xmm7,%xmm6,%xmm6 - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - vpaddd %xmm6,%xmm0,%xmm0 - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - vpaddd 0(%rbp),%xmm0,%xmm6 - xorl %ebx,%r13d - addl 12(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - vmovdqa %xmm6,0(%rsp) - vpalignr $4,%xmm1,%xmm2,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - vpalignr $4,%xmm3,%xmm0,%xmm7 - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - vpaddd %xmm7,%xmm1,%xmm1 - xorl %eax,%r13d - addl 16(%rsp),%edx - movl %r8d,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - vpshufd $250,%xmm0,%xmm7 - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - vpsrld $11,%xmm6,%xmm6 - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 20(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - vpaddd %xmm4,%xmm1,%xmm1 - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - vpxor %xmm7,%xmm6,%xmm6 - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - vpaddd %xmm6,%xmm1,%xmm1 - andl %r10d,%r12d - xorl %r10d,%r13d - addl 24(%rsp),%ebx - vpshufd $80,%xmm1,%xmm7 - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - vpxor %xmm7,%xmm6,%xmm6 - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - vpaddd %xmm6,%xmm1,%xmm1 - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - vpaddd 32(%rbp),%xmm1,%xmm6 - xorl %r9d,%r13d - addl 28(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - vmovdqa %xmm6,16(%rsp) - vpalignr $4,%xmm2,%xmm3,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - vpalignr $4,%xmm0,%xmm1,%xmm7 - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - vpaddd %xmm7,%xmm2,%xmm2 - xorl %r8d,%r13d - addl 32(%rsp),%r11d - movl %eax,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - vpshufd $250,%xmm1,%xmm7 - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - vpsrld $11,%xmm6,%xmm6 - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 36(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - vpaddd %xmm4,%xmm2,%xmm2 - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - vpxor %xmm7,%xmm6,%xmm6 - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - vpaddd %xmm6,%xmm2,%xmm2 - andl %ecx,%r12d - xorl %ecx,%r13d - addl 40(%rsp),%r9d - vpshufd $80,%xmm2,%xmm7 - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - vpxor %xmm7,%xmm6,%xmm6 - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - vpaddd %xmm6,%xmm2,%xmm2 - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - vpaddd 64(%rbp),%xmm2,%xmm6 - xorl %ebx,%r13d - addl 44(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - vmovdqa %xmm6,32(%rsp) - vpalignr $4,%xmm3,%xmm0,%xmm4 - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - vpalignr $4,%xmm1,%xmm2,%xmm7 - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - vpsrld $7,%xmm4,%xmm6 - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - vpaddd %xmm7,%xmm3,%xmm3 - xorl %eax,%r13d - addl 48(%rsp),%edx - movl %r8d,%r15d - vpsrld $3,%xmm4,%xmm7 - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - vpslld $14,%xmm4,%xmm5 - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - vpxor %xmm6,%xmm7,%xmm4 - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - vpshufd $250,%xmm2,%xmm7 - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - vpsrld $11,%xmm6,%xmm6 - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - vpxor %xmm5,%xmm4,%xmm4 - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - vpslld $11,%xmm5,%xmm5 - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - vpxor %xmm6,%xmm4,%xmm4 - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - vpsrld $10,%xmm7,%xmm6 - addl 52(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - vpxor %xmm5,%xmm4,%xmm4 - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - vpsrlq $17,%xmm7,%xmm7 - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - vpaddd %xmm4,%xmm3,%xmm3 - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - vpxor %xmm7,%xmm6,%xmm6 - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - vpsrlq $2,%xmm7,%xmm7 - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - vpxor %xmm7,%xmm6,%xmm6 - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - vpshufb %xmm8,%xmm6,%xmm6 - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - vpaddd %xmm6,%xmm3,%xmm3 - andl %r10d,%r12d - xorl %r10d,%r13d - addl 56(%rsp),%ebx - vpshufd $80,%xmm3,%xmm7 - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - vpsrld $10,%xmm7,%xmm6 - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - vpsrlq $17,%xmm7,%xmm7 - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - vpxor %xmm7,%xmm6,%xmm6 - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - vpsrlq $2,%xmm7,%xmm7 - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - vpxor %xmm7,%xmm6,%xmm6 - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - vpshufb %xmm9,%xmm6,%xmm6 - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - vpaddd %xmm6,%xmm3,%xmm3 - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - vpaddd 96(%rbp),%xmm3,%xmm6 - xorl %r9d,%r13d - addl 60(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - vmovdqa %xmm6,48(%rsp) - cmpb $0,131(%rbp) - jne L$avx_00_47 - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 0(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 4(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 8(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 12(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 16(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 20(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 24(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 28(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%eax - movl %r9d,%r12d - shrdl $9,%r14d,%r14d - xorl %r8d,%r13d - xorl %r10d,%r12d - shrdl $5,%r13d,%r13d - xorl %eax,%r14d - andl %r8d,%r12d - xorl %r8d,%r13d - addl 32(%rsp),%r11d - movl %eax,%r15d - xorl %r10d,%r12d - shrdl $11,%r14d,%r14d - xorl %ebx,%r15d - addl %r12d,%r11d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %eax,%r14d - addl %r13d,%r11d - xorl %ebx,%edi - shrdl $2,%r14d,%r14d - addl %r11d,%edx - addl %edi,%r11d - movl %edx,%r13d - addl %r11d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r11d - movl %r8d,%r12d - shrdl $9,%r14d,%r14d - xorl %edx,%r13d - xorl %r9d,%r12d - shrdl $5,%r13d,%r13d - xorl %r11d,%r14d - andl %edx,%r12d - xorl %edx,%r13d - addl 36(%rsp),%r10d - movl %r11d,%edi - xorl %r9d,%r12d - shrdl $11,%r14d,%r14d - xorl %eax,%edi - addl %r12d,%r10d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r11d,%r14d - addl %r13d,%r10d - xorl %eax,%r15d - shrdl $2,%r14d,%r14d - addl %r10d,%ecx - addl %r15d,%r10d - movl %ecx,%r13d - addl %r10d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r10d - movl %edx,%r12d - shrdl $9,%r14d,%r14d - xorl %ecx,%r13d - xorl %r8d,%r12d - shrdl $5,%r13d,%r13d - xorl %r10d,%r14d - andl %ecx,%r12d - xorl %ecx,%r13d - addl 40(%rsp),%r9d - movl %r10d,%r15d - xorl %r8d,%r12d - shrdl $11,%r14d,%r14d - xorl %r11d,%r15d - addl %r12d,%r9d - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r10d,%r14d - addl %r13d,%r9d - xorl %r11d,%edi - shrdl $2,%r14d,%r14d - addl %r9d,%ebx - addl %edi,%r9d - movl %ebx,%r13d - addl %r9d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r9d - movl %ecx,%r12d - shrdl $9,%r14d,%r14d - xorl %ebx,%r13d - xorl %edx,%r12d - shrdl $5,%r13d,%r13d - xorl %r9d,%r14d - andl %ebx,%r12d - xorl %ebx,%r13d - addl 44(%rsp),%r8d - movl %r9d,%edi - xorl %edx,%r12d - shrdl $11,%r14d,%r14d - xorl %r10d,%edi - addl %r12d,%r8d - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %r9d,%r14d - addl %r13d,%r8d - xorl %r10d,%r15d - shrdl $2,%r14d,%r14d - addl %r8d,%eax - addl %r15d,%r8d - movl %eax,%r13d - addl %r8d,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%r8d - movl %ebx,%r12d - shrdl $9,%r14d,%r14d - xorl %eax,%r13d - xorl %ecx,%r12d - shrdl $5,%r13d,%r13d - xorl %r8d,%r14d - andl %eax,%r12d - xorl %eax,%r13d - addl 48(%rsp),%edx - movl %r8d,%r15d - xorl %ecx,%r12d - shrdl $11,%r14d,%r14d - xorl %r9d,%r15d - addl %r12d,%edx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %r8d,%r14d - addl %r13d,%edx - xorl %r9d,%edi - shrdl $2,%r14d,%r14d - addl %edx,%r11d - addl %edi,%edx - movl %r11d,%r13d - addl %edx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%edx - movl %eax,%r12d - shrdl $9,%r14d,%r14d - xorl %r11d,%r13d - xorl %ebx,%r12d - shrdl $5,%r13d,%r13d - xorl %edx,%r14d - andl %r11d,%r12d - xorl %r11d,%r13d - addl 52(%rsp),%ecx - movl %edx,%edi - xorl %ebx,%r12d - shrdl $11,%r14d,%r14d - xorl %r8d,%edi - addl %r12d,%ecx - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %edx,%r14d - addl %r13d,%ecx - xorl %r8d,%r15d - shrdl $2,%r14d,%r14d - addl %ecx,%r10d - addl %r15d,%ecx - movl %r10d,%r13d - addl %ecx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ecx - movl %r11d,%r12d - shrdl $9,%r14d,%r14d - xorl %r10d,%r13d - xorl %eax,%r12d - shrdl $5,%r13d,%r13d - xorl %ecx,%r14d - andl %r10d,%r12d - xorl %r10d,%r13d - addl 56(%rsp),%ebx - movl %ecx,%r15d - xorl %eax,%r12d - shrdl $11,%r14d,%r14d - xorl %edx,%r15d - addl %r12d,%ebx - shrdl $6,%r13d,%r13d - andl %r15d,%edi - xorl %ecx,%r14d - addl %r13d,%ebx - xorl %edx,%edi - shrdl $2,%r14d,%r14d - addl %ebx,%r9d - addl %edi,%ebx - movl %r9d,%r13d - addl %ebx,%r14d - shrdl $14,%r13d,%r13d - movl %r14d,%ebx - movl %r10d,%r12d - shrdl $9,%r14d,%r14d - xorl %r9d,%r13d - xorl %r11d,%r12d - shrdl $5,%r13d,%r13d - xorl %ebx,%r14d - andl %r9d,%r12d - xorl %r9d,%r13d - addl 60(%rsp),%eax - movl %ebx,%edi - xorl %r11d,%r12d - shrdl $11,%r14d,%r14d - xorl %ecx,%edi - addl %r12d,%eax - shrdl $6,%r13d,%r13d - andl %edi,%r15d - xorl %ebx,%r14d - addl %r13d,%eax - xorl %ecx,%r15d - shrdl $2,%r14d,%r14d - addl %eax,%r8d - addl %r15d,%eax - movl %r8d,%r13d - addl %eax,%r14d - movq 64+0(%rsp),%rdi - movl %r14d,%eax - - addl 0(%rdi),%eax - leaq 64(%rsi),%rsi - addl 4(%rdi),%ebx - addl 8(%rdi),%ecx - addl 12(%rdi),%edx - addl 16(%rdi),%r8d - addl 20(%rdi),%r9d - addl 24(%rdi),%r10d - addl 28(%rdi),%r11d - - cmpq 64+16(%rsp),%rsi - - movl %eax,0(%rdi) - movl %ebx,4(%rdi) - movl %ecx,8(%rdi) - movl %edx,12(%rdi) - movl %r8d,16(%rdi) - movl %r9d,20(%rdi) - movl %r10d,24(%rdi) - movl %r11d,28(%rdi) - jb L$loop_avx - - movq 88(%rsp),%rsi - - vzeroupper - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$epilogue_avx: - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha512-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha512-x86_64.S deleted file mode 100644 index c550e794ac..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/sha512-x86_64.S +++ /dev/null @@ -1,2990 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -.globl _sha512_block_data_order -.private_extern _sha512_block_data_order - -.p2align 4 -_sha512_block_data_order: - - leaq _OPENSSL_ia32cap_P(%rip),%r11 - movl 0(%r11),%r9d - movl 4(%r11),%r10d - movl 8(%r11),%r11d - andl $1073741824,%r9d - andl $268435968,%r10d - orl %r9d,%r10d - cmpl $1342177792,%r10d - je L$avx_shortcut - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - shlq $4,%rdx - subq $128+32,%rsp - leaq (%rsi,%rdx,8),%rdx - andq $-64,%rsp - movq %rdi,128+0(%rsp) - movq %rsi,128+8(%rsp) - movq %rdx,128+16(%rsp) - movq %rax,152(%rsp) - -L$prologue: - - movq 0(%rdi),%rax - movq 8(%rdi),%rbx - movq 16(%rdi),%rcx - movq 24(%rdi),%rdx - movq 32(%rdi),%r8 - movq 40(%rdi),%r9 - movq 48(%rdi),%r10 - movq 56(%rdi),%r11 - jmp L$loop - -.p2align 4 -L$loop: - movq %rbx,%rdi - leaq K512(%rip),%rbp - xorq %rcx,%rdi - movq 0(%rsi),%r12 - movq %r8,%r13 - movq %rax,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,0(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - addq %r14,%r11 - movq 8(%rsi),%r12 - movq %rdx,%r13 - movq %r11,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,8(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - addq %r14,%r10 - movq 16(%rsi),%r12 - movq %rcx,%r13 - movq %r10,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,16(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - addq %r14,%r9 - movq 24(%rsi),%r12 - movq %rbx,%r13 - movq %r9,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,24(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - addq %r14,%r8 - movq 32(%rsi),%r12 - movq %rax,%r13 - movq %r8,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,32(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - addq %r14,%rdx - movq 40(%rsi),%r12 - movq %r11,%r13 - movq %rdx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,40(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - addq %r14,%rcx - movq 48(%rsi),%r12 - movq %r10,%r13 - movq %rcx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,48(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - addq %r14,%rbx - movq 56(%rsi),%r12 - movq %r9,%r13 - movq %rbx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,56(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - addq %r14,%rax - movq 64(%rsi),%r12 - movq %r8,%r13 - movq %rax,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,64(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - addq %r14,%r11 - movq 72(%rsi),%r12 - movq %rdx,%r13 - movq %r11,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,72(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - addq %r14,%r10 - movq 80(%rsi),%r12 - movq %rcx,%r13 - movq %r10,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,80(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - addq %r14,%r9 - movq 88(%rsi),%r12 - movq %rbx,%r13 - movq %r9,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,88(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - addq %r14,%r8 - movq 96(%rsi),%r12 - movq %rax,%r13 - movq %r8,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,96(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - addq %r14,%rdx - movq 104(%rsi),%r12 - movq %r11,%r13 - movq %rdx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,104(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - addq %r14,%rcx - movq 112(%rsi),%r12 - movq %r10,%r13 - movq %rcx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,112(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - addq %r14,%rbx - movq 120(%rsi),%r12 - movq %r9,%r13 - movq %rbx,%r14 - bswapq %r12 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,120(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - jmp L$rounds_16_xx -.p2align 4 -L$rounds_16_xx: - movq 8(%rsp),%r13 - movq 112(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rax - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 72(%rsp),%r12 - - addq 0(%rsp),%r12 - movq %r8,%r13 - addq %r15,%r12 - movq %rax,%r14 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,0(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - movq 16(%rsp),%r13 - movq 120(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r11 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 80(%rsp),%r12 - - addq 8(%rsp),%r12 - movq %rdx,%r13 - addq %rdi,%r12 - movq %r11,%r14 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,8(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - movq 24(%rsp),%r13 - movq 0(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r10 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 88(%rsp),%r12 - - addq 16(%rsp),%r12 - movq %rcx,%r13 - addq %r15,%r12 - movq %r10,%r14 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,16(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - movq 32(%rsp),%r13 - movq 8(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r9 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 96(%rsp),%r12 - - addq 24(%rsp),%r12 - movq %rbx,%r13 - addq %rdi,%r12 - movq %r9,%r14 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,24(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - movq 40(%rsp),%r13 - movq 16(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r8 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 104(%rsp),%r12 - - addq 32(%rsp),%r12 - movq %rax,%r13 - addq %r15,%r12 - movq %r8,%r14 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,32(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - movq 48(%rsp),%r13 - movq 24(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rdx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 112(%rsp),%r12 - - addq 40(%rsp),%r12 - movq %r11,%r13 - addq %rdi,%r12 - movq %rdx,%r14 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,40(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - movq 56(%rsp),%r13 - movq 32(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rcx - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 120(%rsp),%r12 - - addq 48(%rsp),%r12 - movq %r10,%r13 - addq %r15,%r12 - movq %rcx,%r14 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,48(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - movq 64(%rsp),%r13 - movq 40(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rbx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 0(%rsp),%r12 - - addq 56(%rsp),%r12 - movq %r9,%r13 - addq %rdi,%r12 - movq %rbx,%r14 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,56(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - movq 72(%rsp),%r13 - movq 48(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rax - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 8(%rsp),%r12 - - addq 64(%rsp),%r12 - movq %r8,%r13 - addq %r15,%r12 - movq %rax,%r14 - rorq $23,%r13 - movq %r9,%r15 - - xorq %r8,%r13 - rorq $5,%r14 - xorq %r10,%r15 - - movq %r12,64(%rsp) - xorq %rax,%r14 - andq %r8,%r15 - - rorq $4,%r13 - addq %r11,%r12 - xorq %r10,%r15 - - rorq $6,%r14 - xorq %r8,%r13 - addq %r15,%r12 - - movq %rax,%r15 - addq (%rbp),%r12 - xorq %rax,%r14 - - xorq %rbx,%r15 - rorq $14,%r13 - movq %rbx,%r11 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r11 - addq %r12,%rdx - addq %r12,%r11 - - leaq 8(%rbp),%rbp - movq 80(%rsp),%r13 - movq 56(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r11 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 16(%rsp),%r12 - - addq 72(%rsp),%r12 - movq %rdx,%r13 - addq %rdi,%r12 - movq %r11,%r14 - rorq $23,%r13 - movq %r8,%rdi - - xorq %rdx,%r13 - rorq $5,%r14 - xorq %r9,%rdi - - movq %r12,72(%rsp) - xorq %r11,%r14 - andq %rdx,%rdi - - rorq $4,%r13 - addq %r10,%r12 - xorq %r9,%rdi - - rorq $6,%r14 - xorq %rdx,%r13 - addq %rdi,%r12 - - movq %r11,%rdi - addq (%rbp),%r12 - xorq %r11,%r14 - - xorq %rax,%rdi - rorq $14,%r13 - movq %rax,%r10 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r10 - addq %r12,%rcx - addq %r12,%r10 - - leaq 24(%rbp),%rbp - movq 88(%rsp),%r13 - movq 64(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r10 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 24(%rsp),%r12 - - addq 80(%rsp),%r12 - movq %rcx,%r13 - addq %r15,%r12 - movq %r10,%r14 - rorq $23,%r13 - movq %rdx,%r15 - - xorq %rcx,%r13 - rorq $5,%r14 - xorq %r8,%r15 - - movq %r12,80(%rsp) - xorq %r10,%r14 - andq %rcx,%r15 - - rorq $4,%r13 - addq %r9,%r12 - xorq %r8,%r15 - - rorq $6,%r14 - xorq %rcx,%r13 - addq %r15,%r12 - - movq %r10,%r15 - addq (%rbp),%r12 - xorq %r10,%r14 - - xorq %r11,%r15 - rorq $14,%r13 - movq %r11,%r9 - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%r9 - addq %r12,%rbx - addq %r12,%r9 - - leaq 8(%rbp),%rbp - movq 96(%rsp),%r13 - movq 72(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r9 - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 32(%rsp),%r12 - - addq 88(%rsp),%r12 - movq %rbx,%r13 - addq %rdi,%r12 - movq %r9,%r14 - rorq $23,%r13 - movq %rcx,%rdi - - xorq %rbx,%r13 - rorq $5,%r14 - xorq %rdx,%rdi - - movq %r12,88(%rsp) - xorq %r9,%r14 - andq %rbx,%rdi - - rorq $4,%r13 - addq %r8,%r12 - xorq %rdx,%rdi - - rorq $6,%r14 - xorq %rbx,%r13 - addq %rdi,%r12 - - movq %r9,%rdi - addq (%rbp),%r12 - xorq %r9,%r14 - - xorq %r10,%rdi - rorq $14,%r13 - movq %r10,%r8 - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%r8 - addq %r12,%rax - addq %r12,%r8 - - leaq 24(%rbp),%rbp - movq 104(%rsp),%r13 - movq 80(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%r8 - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 40(%rsp),%r12 - - addq 96(%rsp),%r12 - movq %rax,%r13 - addq %r15,%r12 - movq %r8,%r14 - rorq $23,%r13 - movq %rbx,%r15 - - xorq %rax,%r13 - rorq $5,%r14 - xorq %rcx,%r15 - - movq %r12,96(%rsp) - xorq %r8,%r14 - andq %rax,%r15 - - rorq $4,%r13 - addq %rdx,%r12 - xorq %rcx,%r15 - - rorq $6,%r14 - xorq %rax,%r13 - addq %r15,%r12 - - movq %r8,%r15 - addq (%rbp),%r12 - xorq %r8,%r14 - - xorq %r9,%r15 - rorq $14,%r13 - movq %r9,%rdx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rdx - addq %r12,%r11 - addq %r12,%rdx - - leaq 8(%rbp),%rbp - movq 112(%rsp),%r13 - movq 88(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rdx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 48(%rsp),%r12 - - addq 104(%rsp),%r12 - movq %r11,%r13 - addq %rdi,%r12 - movq %rdx,%r14 - rorq $23,%r13 - movq %rax,%rdi - - xorq %r11,%r13 - rorq $5,%r14 - xorq %rbx,%rdi - - movq %r12,104(%rsp) - xorq %rdx,%r14 - andq %r11,%rdi - - rorq $4,%r13 - addq %rcx,%r12 - xorq %rbx,%rdi - - rorq $6,%r14 - xorq %r11,%r13 - addq %rdi,%r12 - - movq %rdx,%rdi - addq (%rbp),%r12 - xorq %rdx,%r14 - - xorq %r8,%rdi - rorq $14,%r13 - movq %r8,%rcx - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rcx - addq %r12,%r10 - addq %r12,%rcx - - leaq 24(%rbp),%rbp - movq 120(%rsp),%r13 - movq 96(%rsp),%r15 - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rcx - movq %r15,%r14 - rorq $42,%r15 - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%r15 - shrq $6,%r14 - - rorq $19,%r15 - xorq %r13,%r12 - xorq %r14,%r15 - addq 56(%rsp),%r12 - - addq 112(%rsp),%r12 - movq %r10,%r13 - addq %r15,%r12 - movq %rcx,%r14 - rorq $23,%r13 - movq %r11,%r15 - - xorq %r10,%r13 - rorq $5,%r14 - xorq %rax,%r15 - - movq %r12,112(%rsp) - xorq %rcx,%r14 - andq %r10,%r15 - - rorq $4,%r13 - addq %rbx,%r12 - xorq %rax,%r15 - - rorq $6,%r14 - xorq %r10,%r13 - addq %r15,%r12 - - movq %rcx,%r15 - addq (%rbp),%r12 - xorq %rcx,%r14 - - xorq %rdx,%r15 - rorq $14,%r13 - movq %rdx,%rbx - - andq %r15,%rdi - rorq $28,%r14 - addq %r13,%r12 - - xorq %rdi,%rbx - addq %r12,%r9 - addq %r12,%rbx - - leaq 8(%rbp),%rbp - movq 0(%rsp),%r13 - movq 104(%rsp),%rdi - - movq %r13,%r12 - rorq $7,%r13 - addq %r14,%rbx - movq %rdi,%r14 - rorq $42,%rdi - - xorq %r12,%r13 - shrq $7,%r12 - rorq $1,%r13 - xorq %r14,%rdi - shrq $6,%r14 - - rorq $19,%rdi - xorq %r13,%r12 - xorq %r14,%rdi - addq 64(%rsp),%r12 - - addq 120(%rsp),%r12 - movq %r9,%r13 - addq %rdi,%r12 - movq %rbx,%r14 - rorq $23,%r13 - movq %r10,%rdi - - xorq %r9,%r13 - rorq $5,%r14 - xorq %r11,%rdi - - movq %r12,120(%rsp) - xorq %rbx,%r14 - andq %r9,%rdi - - rorq $4,%r13 - addq %rax,%r12 - xorq %r11,%rdi - - rorq $6,%r14 - xorq %r9,%r13 - addq %rdi,%r12 - - movq %rbx,%rdi - addq (%rbp),%r12 - xorq %rbx,%r14 - - xorq %rcx,%rdi - rorq $14,%r13 - movq %rcx,%rax - - andq %rdi,%r15 - rorq $28,%r14 - addq %r13,%r12 - - xorq %r15,%rax - addq %r12,%r8 - addq %r12,%rax - - leaq 24(%rbp),%rbp - cmpb $0,7(%rbp) - jnz L$rounds_16_xx - - movq 128+0(%rsp),%rdi - addq %r14,%rax - leaq 128(%rsi),%rsi - - addq 0(%rdi),%rax - addq 8(%rdi),%rbx - addq 16(%rdi),%rcx - addq 24(%rdi),%rdx - addq 32(%rdi),%r8 - addq 40(%rdi),%r9 - addq 48(%rdi),%r10 - addq 56(%rdi),%r11 - - cmpq 128+16(%rsp),%rsi - - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,16(%rdi) - movq %rdx,24(%rdi) - movq %r8,32(%rdi) - movq %r9,40(%rdi) - movq %r10,48(%rdi) - movq %r11,56(%rdi) - jb L$loop - - movq 152(%rsp),%rsi - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$epilogue: - .byte 0xf3,0xc3 - - -.p2align 6 - -K512: -.quad 0x428a2f98d728ae22,0x7137449123ef65cd -.quad 0x428a2f98d728ae22,0x7137449123ef65cd -.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc -.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc -.quad 0x3956c25bf348b538,0x59f111f1b605d019 -.quad 0x3956c25bf348b538,0x59f111f1b605d019 -.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 -.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 -.quad 0xd807aa98a3030242,0x12835b0145706fbe -.quad 0xd807aa98a3030242,0x12835b0145706fbe -.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 -.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 -.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 -.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 -.quad 0x9bdc06a725c71235,0xc19bf174cf692694 -.quad 0x9bdc06a725c71235,0xc19bf174cf692694 -.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 -.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 -.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 -.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 -.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 -.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 -.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 -.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 -.quad 0x983e5152ee66dfab,0xa831c66d2db43210 -.quad 0x983e5152ee66dfab,0xa831c66d2db43210 -.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 -.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 -.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 -.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 -.quad 0x06ca6351e003826f,0x142929670a0e6e70 -.quad 0x06ca6351e003826f,0x142929670a0e6e70 -.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 -.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 -.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df -.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df -.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 -.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 -.quad 0x81c2c92e47edaee6,0x92722c851482353b -.quad 0x81c2c92e47edaee6,0x92722c851482353b -.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 -.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 -.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 -.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 -.quad 0xd192e819d6ef5218,0xd69906245565a910 -.quad 0xd192e819d6ef5218,0xd69906245565a910 -.quad 0xf40e35855771202a,0x106aa07032bbd1b8 -.quad 0xf40e35855771202a,0x106aa07032bbd1b8 -.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 -.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 -.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 -.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 -.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb -.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb -.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 -.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 -.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 -.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 -.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec -.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec -.quad 0x90befffa23631e28,0xa4506cebde82bde9 -.quad 0x90befffa23631e28,0xa4506cebde82bde9 -.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b -.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b -.quad 0xca273eceea26619c,0xd186b8c721c0c207 -.quad 0xca273eceea26619c,0xd186b8c721c0c207 -.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 -.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 -.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 -.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 -.quad 0x113f9804bef90dae,0x1b710b35131c471b -.quad 0x113f9804bef90dae,0x1b710b35131c471b -.quad 0x28db77f523047d84,0x32caab7b40c72493 -.quad 0x28db77f523047d84,0x32caab7b40c72493 -.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c -.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c -.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a -.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a -.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 -.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 - -.quad 0x0001020304050607,0x08090a0b0c0d0e0f -.quad 0x0001020304050607,0x08090a0b0c0d0e0f -.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 - -.p2align 6 -sha512_block_data_order_avx: - -L$avx_shortcut: - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - shlq $4,%rdx - subq $160,%rsp - leaq (%rsi,%rdx,8),%rdx - andq $-64,%rsp - movq %rdi,128+0(%rsp) - movq %rsi,128+8(%rsp) - movq %rdx,128+16(%rsp) - movq %rax,152(%rsp) - -L$prologue_avx: - - vzeroupper - movq 0(%rdi),%rax - movq 8(%rdi),%rbx - movq 16(%rdi),%rcx - movq 24(%rdi),%rdx - movq 32(%rdi),%r8 - movq 40(%rdi),%r9 - movq 48(%rdi),%r10 - movq 56(%rdi),%r11 - jmp L$loop_avx -.p2align 4 -L$loop_avx: - vmovdqa K512+1280(%rip),%xmm11 - vmovdqu 0(%rsi),%xmm0 - leaq K512+128(%rip),%rbp - vmovdqu 16(%rsi),%xmm1 - vmovdqu 32(%rsi),%xmm2 - vpshufb %xmm11,%xmm0,%xmm0 - vmovdqu 48(%rsi),%xmm3 - vpshufb %xmm11,%xmm1,%xmm1 - vmovdqu 64(%rsi),%xmm4 - vpshufb %xmm11,%xmm2,%xmm2 - vmovdqu 80(%rsi),%xmm5 - vpshufb %xmm11,%xmm3,%xmm3 - vmovdqu 96(%rsi),%xmm6 - vpshufb %xmm11,%xmm4,%xmm4 - vmovdqu 112(%rsi),%xmm7 - vpshufb %xmm11,%xmm5,%xmm5 - vpaddq -128(%rbp),%xmm0,%xmm8 - vpshufb %xmm11,%xmm6,%xmm6 - vpaddq -96(%rbp),%xmm1,%xmm9 - vpshufb %xmm11,%xmm7,%xmm7 - vpaddq -64(%rbp),%xmm2,%xmm10 - vpaddq -32(%rbp),%xmm3,%xmm11 - vmovdqa %xmm8,0(%rsp) - vpaddq 0(%rbp),%xmm4,%xmm8 - vmovdqa %xmm9,16(%rsp) - vpaddq 32(%rbp),%xmm5,%xmm9 - vmovdqa %xmm10,32(%rsp) - vpaddq 64(%rbp),%xmm6,%xmm10 - vmovdqa %xmm11,48(%rsp) - vpaddq 96(%rbp),%xmm7,%xmm11 - vmovdqa %xmm8,64(%rsp) - movq %rax,%r14 - vmovdqa %xmm9,80(%rsp) - movq %rbx,%rdi - vmovdqa %xmm10,96(%rsp) - xorq %rcx,%rdi - vmovdqa %xmm11,112(%rsp) - movq %r8,%r13 - jmp L$avx_00_47 - -.p2align 4 -L$avx_00_47: - addq $256,%rbp - vpalignr $8,%xmm0,%xmm1,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rax - vpalignr $8,%xmm4,%xmm5,%xmm11 - movq %r9,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r8,%r13 - xorq %r10,%r12 - vpaddq %xmm11,%xmm0,%xmm0 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r8,%r12 - xorq %r8,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 0(%rsp),%r11 - movq %rax,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rbx,%r15 - addq %r12,%r11 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rax,%r14 - addq %r13,%r11 - vpxor %xmm10,%xmm8,%xmm8 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm7,%xmm11 - addq %r11,%rdx - addq %rdi,%r11 - vpxor %xmm9,%xmm8,%xmm8 - movq %rdx,%r13 - addq %r11,%r14 - vpsllq $3,%xmm7,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r11 - vpaddq %xmm8,%xmm0,%xmm0 - movq %r8,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm7,%xmm9 - xorq %rdx,%r13 - xorq %r9,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rdx,%r12 - xorq %rdx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 8(%rsp),%r10 - movq %r11,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r9,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rax,%rdi - addq %r12,%r10 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm0,%xmm0 - xorq %r11,%r14 - addq %r13,%r10 - vpaddq -128(%rbp),%xmm0,%xmm10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - vmovdqa %xmm10,0(%rsp) - vpalignr $8,%xmm1,%xmm2,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r10 - vpalignr $8,%xmm5,%xmm6,%xmm11 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rcx,%r13 - xorq %r8,%r12 - vpaddq %xmm11,%xmm1,%xmm1 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rcx,%r12 - xorq %rcx,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 16(%rsp),%r9 - movq %r10,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r11,%r15 - addq %r12,%r9 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r10,%r14 - addq %r13,%r9 - vpxor %xmm10,%xmm8,%xmm8 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm0,%xmm11 - addq %r9,%rbx - addq %rdi,%r9 - vpxor %xmm9,%xmm8,%xmm8 - movq %rbx,%r13 - addq %r9,%r14 - vpsllq $3,%xmm0,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r9 - vpaddq %xmm8,%xmm1,%xmm1 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm0,%xmm9 - xorq %rbx,%r13 - xorq %rdx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rbx,%r12 - xorq %rbx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 24(%rsp),%r8 - movq %r9,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r10,%rdi - addq %r12,%r8 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm1,%xmm1 - xorq %r9,%r14 - addq %r13,%r8 - vpaddq -96(%rbp),%xmm1,%xmm10 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - vmovdqa %xmm10,16(%rsp) - vpalignr $8,%xmm2,%xmm3,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r8 - vpalignr $8,%xmm6,%xmm7,%xmm11 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rax,%r13 - xorq %rcx,%r12 - vpaddq %xmm11,%xmm2,%xmm2 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rax,%r12 - xorq %rax,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 32(%rsp),%rdx - movq %r8,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r9,%r15 - addq %r12,%rdx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r8,%r14 - addq %r13,%rdx - vpxor %xmm10,%xmm8,%xmm8 - xorq %r9,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm1,%xmm11 - addq %rdx,%r11 - addq %rdi,%rdx - vpxor %xmm9,%xmm8,%xmm8 - movq %r11,%r13 - addq %rdx,%r14 - vpsllq $3,%xmm1,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rdx - vpaddq %xmm8,%xmm2,%xmm2 - movq %rax,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm1,%xmm9 - xorq %r11,%r13 - xorq %rbx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r11,%r12 - xorq %r11,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 40(%rsp),%rcx - movq %rdx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r8,%rdi - addq %r12,%rcx - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm2,%xmm2 - xorq %rdx,%r14 - addq %r13,%rcx - vpaddq -64(%rbp),%xmm2,%xmm10 - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - vmovdqa %xmm10,32(%rsp) - vpalignr $8,%xmm3,%xmm4,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rcx - vpalignr $8,%xmm7,%xmm0,%xmm11 - movq %r11,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r10,%r13 - xorq %rax,%r12 - vpaddq %xmm11,%xmm3,%xmm3 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r10,%r12 - xorq %r10,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 48(%rsp),%rbx - movq %rcx,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rdx,%r15 - addq %r12,%rbx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rcx,%r14 - addq %r13,%rbx - vpxor %xmm10,%xmm8,%xmm8 - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm2,%xmm11 - addq %rbx,%r9 - addq %rdi,%rbx - vpxor %xmm9,%xmm8,%xmm8 - movq %r9,%r13 - addq %rbx,%r14 - vpsllq $3,%xmm2,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rbx - vpaddq %xmm8,%xmm3,%xmm3 - movq %r10,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm2,%xmm9 - xorq %r9,%r13 - xorq %r11,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r9,%r12 - xorq %r9,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 56(%rsp),%rax - movq %rbx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r11,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rcx,%rdi - addq %r12,%rax - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm3,%xmm3 - xorq %rbx,%r14 - addq %r13,%rax - vpaddq -32(%rbp),%xmm3,%xmm10 - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - vmovdqa %xmm10,48(%rsp) - vpalignr $8,%xmm4,%xmm5,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rax - vpalignr $8,%xmm0,%xmm1,%xmm11 - movq %r9,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r8,%r13 - xorq %r10,%r12 - vpaddq %xmm11,%xmm4,%xmm4 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r8,%r12 - xorq %r8,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 64(%rsp),%r11 - movq %rax,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rbx,%r15 - addq %r12,%r11 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rax,%r14 - addq %r13,%r11 - vpxor %xmm10,%xmm8,%xmm8 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm3,%xmm11 - addq %r11,%rdx - addq %rdi,%r11 - vpxor %xmm9,%xmm8,%xmm8 - movq %rdx,%r13 - addq %r11,%r14 - vpsllq $3,%xmm3,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r11 - vpaddq %xmm8,%xmm4,%xmm4 - movq %r8,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm3,%xmm9 - xorq %rdx,%r13 - xorq %r9,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rdx,%r12 - xorq %rdx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 72(%rsp),%r10 - movq %r11,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r9,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rax,%rdi - addq %r12,%r10 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm4,%xmm4 - xorq %r11,%r14 - addq %r13,%r10 - vpaddq 0(%rbp),%xmm4,%xmm10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - vmovdqa %xmm10,64(%rsp) - vpalignr $8,%xmm5,%xmm6,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r10 - vpalignr $8,%xmm1,%xmm2,%xmm11 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rcx,%r13 - xorq %r8,%r12 - vpaddq %xmm11,%xmm5,%xmm5 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rcx,%r12 - xorq %rcx,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 80(%rsp),%r9 - movq %r10,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r11,%r15 - addq %r12,%r9 - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r10,%r14 - addq %r13,%r9 - vpxor %xmm10,%xmm8,%xmm8 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm4,%xmm11 - addq %r9,%rbx - addq %rdi,%r9 - vpxor %xmm9,%xmm8,%xmm8 - movq %rbx,%r13 - addq %r9,%r14 - vpsllq $3,%xmm4,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%r9 - vpaddq %xmm8,%xmm5,%xmm5 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm4,%xmm9 - xorq %rbx,%r13 - xorq %rdx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %rbx,%r12 - xorq %rbx,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 88(%rsp),%r8 - movq %r9,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r10,%rdi - addq %r12,%r8 - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm5,%xmm5 - xorq %r9,%r14 - addq %r13,%r8 - vpaddq 32(%rbp),%xmm5,%xmm10 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - vmovdqa %xmm10,80(%rsp) - vpalignr $8,%xmm6,%xmm7,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%r8 - vpalignr $8,%xmm2,%xmm3,%xmm11 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %rax,%r13 - xorq %rcx,%r12 - vpaddq %xmm11,%xmm6,%xmm6 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %rax,%r12 - xorq %rax,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 96(%rsp),%rdx - movq %r8,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %r9,%r15 - addq %r12,%rdx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %r8,%r14 - addq %r13,%rdx - vpxor %xmm10,%xmm8,%xmm8 - xorq %r9,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm5,%xmm11 - addq %rdx,%r11 - addq %rdi,%rdx - vpxor %xmm9,%xmm8,%xmm8 - movq %r11,%r13 - addq %rdx,%r14 - vpsllq $3,%xmm5,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rdx - vpaddq %xmm8,%xmm6,%xmm6 - movq %rax,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm5,%xmm9 - xorq %r11,%r13 - xorq %rbx,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r11,%r12 - xorq %r11,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 104(%rsp),%rcx - movq %rdx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %r8,%rdi - addq %r12,%rcx - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm6,%xmm6 - xorq %rdx,%r14 - addq %r13,%rcx - vpaddq 64(%rbp),%xmm6,%xmm10 - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - vmovdqa %xmm10,96(%rsp) - vpalignr $8,%xmm7,%xmm0,%xmm8 - shrdq $23,%r13,%r13 - movq %r14,%rcx - vpalignr $8,%xmm3,%xmm4,%xmm11 - movq %r11,%r12 - shrdq $5,%r14,%r14 - vpsrlq $1,%xmm8,%xmm10 - xorq %r10,%r13 - xorq %rax,%r12 - vpaddq %xmm11,%xmm7,%xmm7 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - vpsrlq $7,%xmm8,%xmm11 - andq %r10,%r12 - xorq %r10,%r13 - vpsllq $56,%xmm8,%xmm9 - addq 112(%rsp),%rbx - movq %rcx,%r15 - vpxor %xmm10,%xmm11,%xmm8 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - vpsrlq $7,%xmm10,%xmm10 - xorq %rdx,%r15 - addq %r12,%rbx - vpxor %xmm9,%xmm8,%xmm8 - shrdq $14,%r13,%r13 - andq %r15,%rdi - vpsllq $7,%xmm9,%xmm9 - xorq %rcx,%r14 - addq %r13,%rbx - vpxor %xmm10,%xmm8,%xmm8 - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - vpsrlq $6,%xmm6,%xmm11 - addq %rbx,%r9 - addq %rdi,%rbx - vpxor %xmm9,%xmm8,%xmm8 - movq %r9,%r13 - addq %rbx,%r14 - vpsllq $3,%xmm6,%xmm10 - shrdq $23,%r13,%r13 - movq %r14,%rbx - vpaddq %xmm8,%xmm7,%xmm7 - movq %r10,%r12 - shrdq $5,%r14,%r14 - vpsrlq $19,%xmm6,%xmm9 - xorq %r9,%r13 - xorq %r11,%r12 - vpxor %xmm10,%xmm11,%xmm11 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - vpsllq $42,%xmm10,%xmm10 - andq %r9,%r12 - xorq %r9,%r13 - vpxor %xmm9,%xmm11,%xmm11 - addq 120(%rsp),%rax - movq %rbx,%rdi - vpsrlq $42,%xmm9,%xmm9 - xorq %r11,%r12 - shrdq $6,%r14,%r14 - vpxor %xmm10,%xmm11,%xmm11 - xorq %rcx,%rdi - addq %r12,%rax - vpxor %xmm9,%xmm11,%xmm11 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - vpaddq %xmm11,%xmm7,%xmm7 - xorq %rbx,%r14 - addq %r13,%rax - vpaddq 96(%rbp),%xmm7,%xmm10 - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - vmovdqa %xmm10,112(%rsp) - cmpb $0,135(%rbp) - jne L$avx_00_47 - shrdq $23,%r13,%r13 - movq %r14,%rax - movq %r9,%r12 - shrdq $5,%r14,%r14 - xorq %r8,%r13 - xorq %r10,%r12 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - andq %r8,%r12 - xorq %r8,%r13 - addq 0(%rsp),%r11 - movq %rax,%r15 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - xorq %rbx,%r15 - addq %r12,%r11 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rax,%r14 - addq %r13,%r11 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - addq %r11,%rdx - addq %rdi,%r11 - movq %rdx,%r13 - addq %r11,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r11 - movq %r8,%r12 - shrdq $5,%r14,%r14 - xorq %rdx,%r13 - xorq %r9,%r12 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - andq %rdx,%r12 - xorq %rdx,%r13 - addq 8(%rsp),%r10 - movq %r11,%rdi - xorq %r9,%r12 - shrdq $6,%r14,%r14 - xorq %rax,%rdi - addq %r12,%r10 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r11,%r14 - addq %r13,%r10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r10 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - xorq %rcx,%r13 - xorq %r8,%r12 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - andq %rcx,%r12 - xorq %rcx,%r13 - addq 16(%rsp),%r9 - movq %r10,%r15 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - xorq %r11,%r15 - addq %r12,%r9 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r10,%r14 - addq %r13,%r9 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - addq %r9,%rbx - addq %rdi,%r9 - movq %rbx,%r13 - addq %r9,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r9 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - xorq %rbx,%r13 - xorq %rdx,%r12 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - andq %rbx,%r12 - xorq %rbx,%r13 - addq 24(%rsp),%r8 - movq %r9,%rdi - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - xorq %r10,%rdi - addq %r12,%r8 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r9,%r14 - addq %r13,%r8 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r8 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - xorq %rax,%r13 - xorq %rcx,%r12 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - andq %rax,%r12 - xorq %rax,%r13 - addq 32(%rsp),%rdx - movq %r8,%r15 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - xorq %r9,%r15 - addq %r12,%rdx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r8,%r14 - addq %r13,%rdx - xorq %r9,%rdi - shrdq $28,%r14,%r14 - addq %rdx,%r11 - addq %rdi,%rdx - movq %r11,%r13 - addq %rdx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rdx - movq %rax,%r12 - shrdq $5,%r14,%r14 - xorq %r11,%r13 - xorq %rbx,%r12 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - andq %r11,%r12 - xorq %r11,%r13 - addq 40(%rsp),%rcx - movq %rdx,%rdi - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - xorq %r8,%rdi - addq %r12,%rcx - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rdx,%r14 - addq %r13,%rcx - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rcx - movq %r11,%r12 - shrdq $5,%r14,%r14 - xorq %r10,%r13 - xorq %rax,%r12 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - andq %r10,%r12 - xorq %r10,%r13 - addq 48(%rsp),%rbx - movq %rcx,%r15 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - xorq %rdx,%r15 - addq %r12,%rbx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rcx,%r14 - addq %r13,%rbx - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - addq %rbx,%r9 - addq %rdi,%rbx - movq %r9,%r13 - addq %rbx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rbx - movq %r10,%r12 - shrdq $5,%r14,%r14 - xorq %r9,%r13 - xorq %r11,%r12 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - andq %r9,%r12 - xorq %r9,%r13 - addq 56(%rsp),%rax - movq %rbx,%rdi - xorq %r11,%r12 - shrdq $6,%r14,%r14 - xorq %rcx,%rdi - addq %r12,%rax - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rbx,%r14 - addq %r13,%rax - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rax - movq %r9,%r12 - shrdq $5,%r14,%r14 - xorq %r8,%r13 - xorq %r10,%r12 - shrdq $4,%r13,%r13 - xorq %rax,%r14 - andq %r8,%r12 - xorq %r8,%r13 - addq 64(%rsp),%r11 - movq %rax,%r15 - xorq %r10,%r12 - shrdq $6,%r14,%r14 - xorq %rbx,%r15 - addq %r12,%r11 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rax,%r14 - addq %r13,%r11 - xorq %rbx,%rdi - shrdq $28,%r14,%r14 - addq %r11,%rdx - addq %rdi,%r11 - movq %rdx,%r13 - addq %r11,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r11 - movq %r8,%r12 - shrdq $5,%r14,%r14 - xorq %rdx,%r13 - xorq %r9,%r12 - shrdq $4,%r13,%r13 - xorq %r11,%r14 - andq %rdx,%r12 - xorq %rdx,%r13 - addq 72(%rsp),%r10 - movq %r11,%rdi - xorq %r9,%r12 - shrdq $6,%r14,%r14 - xorq %rax,%rdi - addq %r12,%r10 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r11,%r14 - addq %r13,%r10 - xorq %rax,%r15 - shrdq $28,%r14,%r14 - addq %r10,%rcx - addq %r15,%r10 - movq %rcx,%r13 - addq %r10,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r10 - movq %rdx,%r12 - shrdq $5,%r14,%r14 - xorq %rcx,%r13 - xorq %r8,%r12 - shrdq $4,%r13,%r13 - xorq %r10,%r14 - andq %rcx,%r12 - xorq %rcx,%r13 - addq 80(%rsp),%r9 - movq %r10,%r15 - xorq %r8,%r12 - shrdq $6,%r14,%r14 - xorq %r11,%r15 - addq %r12,%r9 - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r10,%r14 - addq %r13,%r9 - xorq %r11,%rdi - shrdq $28,%r14,%r14 - addq %r9,%rbx - addq %rdi,%r9 - movq %rbx,%r13 - addq %r9,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r9 - movq %rcx,%r12 - shrdq $5,%r14,%r14 - xorq %rbx,%r13 - xorq %rdx,%r12 - shrdq $4,%r13,%r13 - xorq %r9,%r14 - andq %rbx,%r12 - xorq %rbx,%r13 - addq 88(%rsp),%r8 - movq %r9,%rdi - xorq %rdx,%r12 - shrdq $6,%r14,%r14 - xorq %r10,%rdi - addq %r12,%r8 - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %r9,%r14 - addq %r13,%r8 - xorq %r10,%r15 - shrdq $28,%r14,%r14 - addq %r8,%rax - addq %r15,%r8 - movq %rax,%r13 - addq %r8,%r14 - shrdq $23,%r13,%r13 - movq %r14,%r8 - movq %rbx,%r12 - shrdq $5,%r14,%r14 - xorq %rax,%r13 - xorq %rcx,%r12 - shrdq $4,%r13,%r13 - xorq %r8,%r14 - andq %rax,%r12 - xorq %rax,%r13 - addq 96(%rsp),%rdx - movq %r8,%r15 - xorq %rcx,%r12 - shrdq $6,%r14,%r14 - xorq %r9,%r15 - addq %r12,%rdx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %r8,%r14 - addq %r13,%rdx - xorq %r9,%rdi - shrdq $28,%r14,%r14 - addq %rdx,%r11 - addq %rdi,%rdx - movq %r11,%r13 - addq %rdx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rdx - movq %rax,%r12 - shrdq $5,%r14,%r14 - xorq %r11,%r13 - xorq %rbx,%r12 - shrdq $4,%r13,%r13 - xorq %rdx,%r14 - andq %r11,%r12 - xorq %r11,%r13 - addq 104(%rsp),%rcx - movq %rdx,%rdi - xorq %rbx,%r12 - shrdq $6,%r14,%r14 - xorq %r8,%rdi - addq %r12,%rcx - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rdx,%r14 - addq %r13,%rcx - xorq %r8,%r15 - shrdq $28,%r14,%r14 - addq %rcx,%r10 - addq %r15,%rcx - movq %r10,%r13 - addq %rcx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rcx - movq %r11,%r12 - shrdq $5,%r14,%r14 - xorq %r10,%r13 - xorq %rax,%r12 - shrdq $4,%r13,%r13 - xorq %rcx,%r14 - andq %r10,%r12 - xorq %r10,%r13 - addq 112(%rsp),%rbx - movq %rcx,%r15 - xorq %rax,%r12 - shrdq $6,%r14,%r14 - xorq %rdx,%r15 - addq %r12,%rbx - shrdq $14,%r13,%r13 - andq %r15,%rdi - xorq %rcx,%r14 - addq %r13,%rbx - xorq %rdx,%rdi - shrdq $28,%r14,%r14 - addq %rbx,%r9 - addq %rdi,%rbx - movq %r9,%r13 - addq %rbx,%r14 - shrdq $23,%r13,%r13 - movq %r14,%rbx - movq %r10,%r12 - shrdq $5,%r14,%r14 - xorq %r9,%r13 - xorq %r11,%r12 - shrdq $4,%r13,%r13 - xorq %rbx,%r14 - andq %r9,%r12 - xorq %r9,%r13 - addq 120(%rsp),%rax - movq %rbx,%rdi - xorq %r11,%r12 - shrdq $6,%r14,%r14 - xorq %rcx,%rdi - addq %r12,%rax - shrdq $14,%r13,%r13 - andq %rdi,%r15 - xorq %rbx,%r14 - addq %r13,%rax - xorq %rcx,%r15 - shrdq $28,%r14,%r14 - addq %rax,%r8 - addq %r15,%rax - movq %r8,%r13 - addq %rax,%r14 - movq 128+0(%rsp),%rdi - movq %r14,%rax - - addq 0(%rdi),%rax - leaq 128(%rsi),%rsi - addq 8(%rdi),%rbx - addq 16(%rdi),%rcx - addq 24(%rdi),%rdx - addq 32(%rdi),%r8 - addq 40(%rdi),%r9 - addq 48(%rdi),%r10 - addq 56(%rdi),%r11 - - cmpq 128+16(%rsp),%rsi - - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - movq %rcx,16(%rdi) - movq %rdx,24(%rdi) - movq %r8,32(%rdi) - movq %r9,40(%rdi) - movq %r10,48(%rdi) - movq %r11,56(%rdi) - jb L$loop_avx - - movq 152(%rsp),%rsi - - vzeroupper - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$epilogue_avx: - .byte 0xf3,0xc3 - - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S deleted file mode 100644 index cd52d67e60..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S +++ /dev/null @@ -1,1130 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - - - - - - - - - - - - - - -.p2align 4 -_vpaes_encrypt_core: - - movq %rdx,%r9 - movq $16,%r11 - movl 240(%rdx),%eax - movdqa %xmm9,%xmm1 - movdqa L$k_ipt(%rip),%xmm2 - pandn %xmm0,%xmm1 - movdqu (%r9),%xmm5 - psrld $4,%xmm1 - pand %xmm9,%xmm0 -.byte 102,15,56,0,208 - movdqa L$k_ipt+16(%rip),%xmm0 -.byte 102,15,56,0,193 - pxor %xmm5,%xmm2 - addq $16,%r9 - pxor %xmm2,%xmm0 - leaq L$k_mc_backward(%rip),%r10 - jmp L$enc_entry - -.p2align 4 -L$enc_loop: - - movdqa %xmm13,%xmm4 - movdqa %xmm12,%xmm0 -.byte 102,15,56,0,226 -.byte 102,15,56,0,195 - pxor %xmm5,%xmm4 - movdqa %xmm15,%xmm5 - pxor %xmm4,%xmm0 - movdqa -64(%r11,%r10,1),%xmm1 -.byte 102,15,56,0,234 - movdqa (%r11,%r10,1),%xmm4 - movdqa %xmm14,%xmm2 -.byte 102,15,56,0,211 - movdqa %xmm0,%xmm3 - pxor %xmm5,%xmm2 -.byte 102,15,56,0,193 - addq $16,%r9 - pxor %xmm2,%xmm0 -.byte 102,15,56,0,220 - addq $16,%r11 - pxor %xmm0,%xmm3 -.byte 102,15,56,0,193 - andq $0x30,%r11 - subq $1,%rax - pxor %xmm3,%xmm0 - -L$enc_entry: - - movdqa %xmm9,%xmm1 - movdqa %xmm11,%xmm5 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm0 -.byte 102,15,56,0,232 - movdqa %xmm10,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm10,%xmm4 - pxor %xmm5,%xmm3 -.byte 102,15,56,0,224 - movdqa %xmm10,%xmm2 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,211 - movdqa %xmm10,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%r9),%xmm5 - pxor %xmm1,%xmm3 - jnz L$enc_loop - - - movdqa -96(%r10),%xmm4 - movdqa -80(%r10),%xmm0 -.byte 102,15,56,0,226 - pxor %xmm5,%xmm4 -.byte 102,15,56,0,195 - movdqa 64(%r11,%r10,1),%xmm1 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,193 - .byte 0xf3,0xc3 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -.p2align 4 -_vpaes_encrypt_core_2x: - - movq %rdx,%r9 - movq $16,%r11 - movl 240(%rdx),%eax - movdqa %xmm9,%xmm1 - movdqa %xmm9,%xmm7 - movdqa L$k_ipt(%rip),%xmm2 - movdqa %xmm2,%xmm8 - pandn %xmm0,%xmm1 - pandn %xmm6,%xmm7 - movdqu (%r9),%xmm5 - - psrld $4,%xmm1 - psrld $4,%xmm7 - pand %xmm9,%xmm0 - pand %xmm9,%xmm6 -.byte 102,15,56,0,208 -.byte 102,68,15,56,0,198 - movdqa L$k_ipt+16(%rip),%xmm0 - movdqa %xmm0,%xmm6 -.byte 102,15,56,0,193 -.byte 102,15,56,0,247 - pxor %xmm5,%xmm2 - pxor %xmm5,%xmm8 - addq $16,%r9 - pxor %xmm2,%xmm0 - pxor %xmm8,%xmm6 - leaq L$k_mc_backward(%rip),%r10 - jmp L$enc2x_entry - -.p2align 4 -L$enc2x_loop: - - movdqa L$k_sb1(%rip),%xmm4 - movdqa L$k_sb1+16(%rip),%xmm0 - movdqa %xmm4,%xmm12 - movdqa %xmm0,%xmm6 -.byte 102,15,56,0,226 -.byte 102,69,15,56,0,224 -.byte 102,15,56,0,195 -.byte 102,65,15,56,0,243 - pxor %xmm5,%xmm4 - pxor %xmm5,%xmm12 - movdqa L$k_sb2(%rip),%xmm5 - movdqa %xmm5,%xmm13 - pxor %xmm4,%xmm0 - pxor %xmm12,%xmm6 - movdqa -64(%r11,%r10,1),%xmm1 - -.byte 102,15,56,0,234 -.byte 102,69,15,56,0,232 - movdqa (%r11,%r10,1),%xmm4 - - movdqa L$k_sb2+16(%rip),%xmm2 - movdqa %xmm2,%xmm8 -.byte 102,15,56,0,211 -.byte 102,69,15,56,0,195 - movdqa %xmm0,%xmm3 - movdqa %xmm6,%xmm11 - pxor %xmm5,%xmm2 - pxor %xmm13,%xmm8 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - addq $16,%r9 - pxor %xmm2,%xmm0 - pxor %xmm8,%xmm6 -.byte 102,15,56,0,220 -.byte 102,68,15,56,0,220 - addq $16,%r11 - pxor %xmm0,%xmm3 - pxor %xmm6,%xmm11 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - andq $0x30,%r11 - subq $1,%rax - pxor %xmm3,%xmm0 - pxor %xmm11,%xmm6 - -L$enc2x_entry: - - movdqa %xmm9,%xmm1 - movdqa %xmm9,%xmm7 - movdqa L$k_inv+16(%rip),%xmm5 - movdqa %xmm5,%xmm13 - pandn %xmm0,%xmm1 - pandn %xmm6,%xmm7 - psrld $4,%xmm1 - psrld $4,%xmm7 - pand %xmm9,%xmm0 - pand %xmm9,%xmm6 -.byte 102,15,56,0,232 -.byte 102,68,15,56,0,238 - movdqa %xmm10,%xmm3 - movdqa %xmm10,%xmm11 - pxor %xmm1,%xmm0 - pxor %xmm7,%xmm6 -.byte 102,15,56,0,217 -.byte 102,68,15,56,0,223 - movdqa %xmm10,%xmm4 - movdqa %xmm10,%xmm12 - pxor %xmm5,%xmm3 - pxor %xmm13,%xmm11 -.byte 102,15,56,0,224 -.byte 102,68,15,56,0,230 - movdqa %xmm10,%xmm2 - movdqa %xmm10,%xmm8 - pxor %xmm5,%xmm4 - pxor %xmm13,%xmm12 -.byte 102,15,56,0,211 -.byte 102,69,15,56,0,195 - movdqa %xmm10,%xmm3 - movdqa %xmm10,%xmm11 - pxor %xmm0,%xmm2 - pxor %xmm6,%xmm8 -.byte 102,15,56,0,220 -.byte 102,69,15,56,0,220 - movdqu (%r9),%xmm5 - - pxor %xmm1,%xmm3 - pxor %xmm7,%xmm11 - jnz L$enc2x_loop - - - movdqa -96(%r10),%xmm4 - movdqa -80(%r10),%xmm0 - movdqa %xmm4,%xmm12 - movdqa %xmm0,%xmm6 -.byte 102,15,56,0,226 -.byte 102,69,15,56,0,224 - pxor %xmm5,%xmm4 - pxor %xmm5,%xmm12 -.byte 102,15,56,0,195 -.byte 102,65,15,56,0,243 - movdqa 64(%r11,%r10,1),%xmm1 - - pxor %xmm4,%xmm0 - pxor %xmm12,%xmm6 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - .byte 0xf3,0xc3 - - - - - - - - - -.p2align 4 -_vpaes_decrypt_core: - - movq %rdx,%r9 - movl 240(%rdx),%eax - movdqa %xmm9,%xmm1 - movdqa L$k_dipt(%rip),%xmm2 - pandn %xmm0,%xmm1 - movq %rax,%r11 - psrld $4,%xmm1 - movdqu (%r9),%xmm5 - shlq $4,%r11 - pand %xmm9,%xmm0 -.byte 102,15,56,0,208 - movdqa L$k_dipt+16(%rip),%xmm0 - xorq $0x30,%r11 - leaq L$k_dsbd(%rip),%r10 -.byte 102,15,56,0,193 - andq $0x30,%r11 - pxor %xmm5,%xmm2 - movdqa L$k_mc_forward+48(%rip),%xmm5 - pxor %xmm2,%xmm0 - addq $16,%r9 - addq %r10,%r11 - jmp L$dec_entry - -.p2align 4 -L$dec_loop: - - - - movdqa -32(%r10),%xmm4 - movdqa -16(%r10),%xmm1 -.byte 102,15,56,0,226 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 0(%r10),%xmm4 - pxor %xmm1,%xmm0 - movdqa 16(%r10),%xmm1 - -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 32(%r10),%xmm4 - pxor %xmm1,%xmm0 - movdqa 48(%r10),%xmm1 - -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - movdqa 64(%r10),%xmm4 - pxor %xmm1,%xmm0 - movdqa 80(%r10),%xmm1 - -.byte 102,15,56,0,226 -.byte 102,15,56,0,197 -.byte 102,15,56,0,203 - pxor %xmm4,%xmm0 - addq $16,%r9 -.byte 102,15,58,15,237,12 - pxor %xmm1,%xmm0 - subq $1,%rax - -L$dec_entry: - - movdqa %xmm9,%xmm1 - pandn %xmm0,%xmm1 - movdqa %xmm11,%xmm2 - psrld $4,%xmm1 - pand %xmm9,%xmm0 -.byte 102,15,56,0,208 - movdqa %xmm10,%xmm3 - pxor %xmm1,%xmm0 -.byte 102,15,56,0,217 - movdqa %xmm10,%xmm4 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm10,%xmm2 -.byte 102,15,56,0,211 - movdqa %xmm10,%xmm3 - pxor %xmm0,%xmm2 -.byte 102,15,56,0,220 - movdqu (%r9),%xmm0 - pxor %xmm1,%xmm3 - jnz L$dec_loop - - - movdqa 96(%r10),%xmm4 -.byte 102,15,56,0,226 - pxor %xmm0,%xmm4 - movdqa 112(%r10),%xmm0 - movdqa -352(%r11),%xmm2 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 -.byte 102,15,56,0,194 - .byte 0xf3,0xc3 - - - - - - - - - -.p2align 4 -_vpaes_schedule_core: - - - - - - - call _vpaes_preheat - movdqa L$k_rcon(%rip),%xmm8 - movdqu (%rdi),%xmm0 - - - movdqa %xmm0,%xmm3 - leaq L$k_ipt(%rip),%r11 - call _vpaes_schedule_transform - movdqa %xmm0,%xmm7 - - leaq L$k_sr(%rip),%r10 - testq %rcx,%rcx - jnz L$schedule_am_decrypting - - - movdqu %xmm0,(%rdx) - jmp L$schedule_go - -L$schedule_am_decrypting: - - movdqa (%r8,%r10,1),%xmm1 -.byte 102,15,56,0,217 - movdqu %xmm3,(%rdx) - xorq $0x30,%r8 - -L$schedule_go: - cmpl $192,%esi - ja L$schedule_256 - je L$schedule_192 - - - - - - - - - - -L$schedule_128: - movl $10,%esi - -L$oop_schedule_128: - call _vpaes_schedule_round - decq %rsi - jz L$schedule_mangle_last - call _vpaes_schedule_mangle - jmp L$oop_schedule_128 - - - - - - - - - - - - - - - - -.p2align 4 -L$schedule_192: - movdqu 8(%rdi),%xmm0 - call _vpaes_schedule_transform - movdqa %xmm0,%xmm6 - pxor %xmm4,%xmm4 - movhlps %xmm4,%xmm6 - movl $4,%esi - -L$oop_schedule_192: - call _vpaes_schedule_round -.byte 102,15,58,15,198,8 - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - call _vpaes_schedule_mangle - call _vpaes_schedule_round - decq %rsi - jz L$schedule_mangle_last - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - jmp L$oop_schedule_192 - - - - - - - - - - - -.p2align 4 -L$schedule_256: - movdqu 16(%rdi),%xmm0 - call _vpaes_schedule_transform - movl $7,%esi - -L$oop_schedule_256: - call _vpaes_schedule_mangle - movdqa %xmm0,%xmm6 - - - call _vpaes_schedule_round - decq %rsi - jz L$schedule_mangle_last - call _vpaes_schedule_mangle - - - pshufd $0xFF,%xmm0,%xmm0 - movdqa %xmm7,%xmm5 - movdqa %xmm6,%xmm7 - call _vpaes_schedule_low_round - movdqa %xmm5,%xmm7 - - jmp L$oop_schedule_256 - - - - - - - - - - - - -.p2align 4 -L$schedule_mangle_last: - - leaq L$k_deskew(%rip),%r11 - testq %rcx,%rcx - jnz L$schedule_mangle_last_dec - - - movdqa (%r8,%r10,1),%xmm1 -.byte 102,15,56,0,193 - leaq L$k_opt(%rip),%r11 - addq $32,%rdx - -L$schedule_mangle_last_dec: - addq $-16,%rdx - pxor L$k_s63(%rip),%xmm0 - call _vpaes_schedule_transform - movdqu %xmm0,(%rdx) - - - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - .byte 0xf3,0xc3 - - - - - - - - - - - - - - - - - - -.p2align 4 -_vpaes_schedule_192_smear: - - pshufd $0x80,%xmm6,%xmm1 - pshufd $0xFE,%xmm7,%xmm0 - pxor %xmm1,%xmm6 - pxor %xmm1,%xmm1 - pxor %xmm0,%xmm6 - movdqa %xmm6,%xmm0 - movhlps %xmm1,%xmm6 - .byte 0xf3,0xc3 - - - - - - - - - - - - - - - - - - - - - - -.p2align 4 -_vpaes_schedule_round: - - - pxor %xmm1,%xmm1 -.byte 102,65,15,58,15,200,15 -.byte 102,69,15,58,15,192,15 - pxor %xmm1,%xmm7 - - - pshufd $0xFF,%xmm0,%xmm0 -.byte 102,15,58,15,192,1 - - - - -_vpaes_schedule_low_round: - - movdqa %xmm7,%xmm1 - pslldq $4,%xmm7 - pxor %xmm1,%xmm7 - movdqa %xmm7,%xmm1 - pslldq $8,%xmm7 - pxor %xmm1,%xmm7 - pxor L$k_s63(%rip),%xmm7 - - - movdqa %xmm9,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm0 - movdqa %xmm11,%xmm2 -.byte 102,15,56,0,208 - pxor %xmm1,%xmm0 - movdqa %xmm10,%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - movdqa %xmm10,%xmm4 -.byte 102,15,56,0,224 - pxor %xmm2,%xmm4 - movdqa %xmm10,%xmm2 -.byte 102,15,56,0,211 - pxor %xmm0,%xmm2 - movdqa %xmm10,%xmm3 -.byte 102,15,56,0,220 - pxor %xmm1,%xmm3 - movdqa %xmm13,%xmm4 -.byte 102,15,56,0,226 - movdqa %xmm12,%xmm0 -.byte 102,15,56,0,195 - pxor %xmm4,%xmm0 - - - pxor %xmm7,%xmm0 - movdqa %xmm0,%xmm7 - .byte 0xf3,0xc3 - - - - - - - - - - - - - -.p2align 4 -_vpaes_schedule_transform: - - movdqa %xmm9,%xmm1 - pandn %xmm0,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm0 - movdqa (%r11),%xmm2 -.byte 102,15,56,0,208 - movdqa 16(%r11),%xmm0 -.byte 102,15,56,0,193 - pxor %xmm2,%xmm0 - .byte 0xf3,0xc3 - - - - - - - - - - - - - - - - - - - - - - - - - - - -.p2align 4 -_vpaes_schedule_mangle: - - movdqa %xmm0,%xmm4 - movdqa L$k_mc_forward(%rip),%xmm5 - testq %rcx,%rcx - jnz L$schedule_mangle_dec - - - addq $16,%rdx - pxor L$k_s63(%rip),%xmm4 -.byte 102,15,56,0,229 - movdqa %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 -.byte 102,15,56,0,229 - pxor %xmm4,%xmm3 - - jmp L$schedule_mangle_both -.p2align 4 -L$schedule_mangle_dec: - - leaq L$k_dksd(%rip),%r11 - movdqa %xmm9,%xmm1 - pandn %xmm4,%xmm1 - psrld $4,%xmm1 - pand %xmm9,%xmm4 - - movdqa 0(%r11),%xmm2 -.byte 102,15,56,0,212 - movdqa 16(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - - movdqa 32(%r11),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 48(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - - movdqa 64(%r11),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 80(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 -.byte 102,15,56,0,221 - - movdqa 96(%r11),%xmm2 -.byte 102,15,56,0,212 - pxor %xmm3,%xmm2 - movdqa 112(%r11),%xmm3 -.byte 102,15,56,0,217 - pxor %xmm2,%xmm3 - - addq $-16,%rdx - -L$schedule_mangle_both: - movdqa (%r8,%r10,1),%xmm1 -.byte 102,15,56,0,217 - addq $-16,%r8 - andq $0x30,%r8 - movdqu %xmm3,(%rdx) - .byte 0xf3,0xc3 - - - - - - -.globl _vpaes_set_encrypt_key -.private_extern _vpaes_set_encrypt_key - -.p2align 4 -_vpaes_set_encrypt_key: - -#ifdef BORINGSSL_DISPATCH_TEST - - movb $1,_BORINGSSL_function_hit+5(%rip) -#endif - - movl %esi,%eax - shrl $5,%eax - addl $5,%eax - movl %eax,240(%rdx) - - movl $0,%ecx - movl $0x30,%r8d - call _vpaes_schedule_core - xorl %eax,%eax - .byte 0xf3,0xc3 - - - -.globl _vpaes_set_decrypt_key -.private_extern _vpaes_set_decrypt_key - -.p2align 4 -_vpaes_set_decrypt_key: - - movl %esi,%eax - shrl $5,%eax - addl $5,%eax - movl %eax,240(%rdx) - shll $4,%eax - leaq 16(%rdx,%rax,1),%rdx - - movl $1,%ecx - movl %esi,%r8d - shrl $1,%r8d - andl $32,%r8d - xorl $32,%r8d - call _vpaes_schedule_core - xorl %eax,%eax - .byte 0xf3,0xc3 - - - -.globl _vpaes_encrypt -.private_extern _vpaes_encrypt - -.p2align 4 -_vpaes_encrypt: - -#ifdef BORINGSSL_DISPATCH_TEST - - movb $1,_BORINGSSL_function_hit+4(%rip) -#endif - movdqu (%rdi),%xmm0 - call _vpaes_preheat - call _vpaes_encrypt_core - movdqu %xmm0,(%rsi) - .byte 0xf3,0xc3 - - - -.globl _vpaes_decrypt -.private_extern _vpaes_decrypt - -.p2align 4 -_vpaes_decrypt: - - movdqu (%rdi),%xmm0 - call _vpaes_preheat - call _vpaes_decrypt_core - movdqu %xmm0,(%rsi) - .byte 0xf3,0xc3 - - -.globl _vpaes_cbc_encrypt -.private_extern _vpaes_cbc_encrypt - -.p2align 4 -_vpaes_cbc_encrypt: - - xchgq %rcx,%rdx - subq $16,%rcx - jc L$cbc_abort - movdqu (%r8),%xmm6 - subq %rdi,%rsi - call _vpaes_preheat - cmpl $0,%r9d - je L$cbc_dec_loop - jmp L$cbc_enc_loop -.p2align 4 -L$cbc_enc_loop: - movdqu (%rdi),%xmm0 - pxor %xmm6,%xmm0 - call _vpaes_encrypt_core - movdqa %xmm0,%xmm6 - movdqu %xmm0,(%rsi,%rdi,1) - leaq 16(%rdi),%rdi - subq $16,%rcx - jnc L$cbc_enc_loop - jmp L$cbc_done -.p2align 4 -L$cbc_dec_loop: - movdqu (%rdi),%xmm0 - movdqa %xmm0,%xmm7 - call _vpaes_decrypt_core - pxor %xmm6,%xmm0 - movdqa %xmm7,%xmm6 - movdqu %xmm0,(%rsi,%rdi,1) - leaq 16(%rdi),%rdi - subq $16,%rcx - jnc L$cbc_dec_loop -L$cbc_done: - movdqu %xmm6,(%r8) -L$cbc_abort: - .byte 0xf3,0xc3 - - -.globl _vpaes_ctr32_encrypt_blocks -.private_extern _vpaes_ctr32_encrypt_blocks - -.p2align 4 -_vpaes_ctr32_encrypt_blocks: - - - xchgq %rcx,%rdx - testq %rcx,%rcx - jz L$ctr32_abort - movdqu (%r8),%xmm0 - movdqa L$ctr_add_one(%rip),%xmm8 - subq %rdi,%rsi - call _vpaes_preheat - movdqa %xmm0,%xmm6 - pshufb L$rev_ctr(%rip),%xmm6 - - testq $1,%rcx - jz L$ctr32_prep_loop - - - - movdqu (%rdi),%xmm7 - call _vpaes_encrypt_core - pxor %xmm7,%xmm0 - paddd %xmm8,%xmm6 - movdqu %xmm0,(%rsi,%rdi,1) - subq $1,%rcx - leaq 16(%rdi),%rdi - jz L$ctr32_done - -L$ctr32_prep_loop: - - - movdqa %xmm6,%xmm14 - movdqa %xmm6,%xmm15 - paddd %xmm8,%xmm15 - -L$ctr32_loop: - movdqa L$rev_ctr(%rip),%xmm1 - movdqa %xmm14,%xmm0 - movdqa %xmm15,%xmm6 -.byte 102,15,56,0,193 -.byte 102,15,56,0,241 - call _vpaes_encrypt_core_2x - movdqu (%rdi),%xmm1 - movdqu 16(%rdi),%xmm2 - movdqa L$ctr_add_two(%rip),%xmm3 - pxor %xmm1,%xmm0 - pxor %xmm2,%xmm6 - paddd %xmm3,%xmm14 - paddd %xmm3,%xmm15 - movdqu %xmm0,(%rsi,%rdi,1) - movdqu %xmm6,16(%rsi,%rdi,1) - subq $2,%rcx - leaq 32(%rdi),%rdi - jnz L$ctr32_loop - -L$ctr32_done: -L$ctr32_abort: - .byte 0xf3,0xc3 - - - - - - - - - -.p2align 4 -_vpaes_preheat: - - leaq L$k_s0F(%rip),%r10 - movdqa -32(%r10),%xmm10 - movdqa -16(%r10),%xmm11 - movdqa 0(%r10),%xmm9 - movdqa 48(%r10),%xmm13 - movdqa 64(%r10),%xmm12 - movdqa 80(%r10),%xmm15 - movdqa 96(%r10),%xmm14 - .byte 0xf3,0xc3 - - - - - - - - -.p2align 6 -_vpaes_consts: -L$k_inv: -.quad 0x0E05060F0D080180, 0x040703090A0B0C02 -.quad 0x01040A060F0B0780, 0x030D0E0C02050809 - -L$k_s0F: -.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F - -L$k_ipt: -.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 -.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 - -L$k_sb1: -.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 -.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF -L$k_sb2: -.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD -.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A -L$k_sbo: -.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 -.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA - -L$k_mc_forward: -.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 -.quad 0x080B0A0904070605, 0x000302010C0F0E0D -.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 -.quad 0x000302010C0F0E0D, 0x080B0A0904070605 - -L$k_mc_backward: -.quad 0x0605040702010003, 0x0E0D0C0F0A09080B -.quad 0x020100030E0D0C0F, 0x0A09080B06050407 -.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 -.quad 0x0A09080B06050407, 0x020100030E0D0C0F - -L$k_sr: -.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 -.quad 0x030E09040F0A0500, 0x0B06010C07020D08 -.quad 0x0F060D040B020900, 0x070E050C030A0108 -.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 - -L$k_rcon: -.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 - -L$k_s63: -.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B - -L$k_opt: -.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 -.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 - -L$k_deskew: -.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A -.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 - - - - - -L$k_dksd: -.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 -.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E -L$k_dksb: -.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 -.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 -L$k_dkse: -.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 -.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 -L$k_dks9: -.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC -.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE - - - - - -L$k_dipt: -.quad 0x0F505B040B545F00, 0x154A411E114E451A -.quad 0x86E383E660056500, 0x12771772F491F194 - -L$k_dsb9: -.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 -.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 -L$k_dsbd: -.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 -.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 -L$k_dsbb: -.quad 0xD022649296B44200, 0x602646F6B0F2D404 -.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B -L$k_dsbe: -.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 -.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 -L$k_dsbo: -.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D -.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C - - -L$rev_ctr: -.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908 - - -L$ctr_add_one: -.quad 0x0000000000000000, 0x0000000100000000 -L$ctr_add_two: -.quad 0x0000000000000000, 0x0000000200000000 - -.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 -.p2align 6 - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/x86_64-mont.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/x86_64-mont.S deleted file mode 100644 index 8d6444cb6f..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/x86_64-mont.S +++ /dev/null @@ -1,1256 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - -.globl _bn_mul_mont -.private_extern _bn_mul_mont - -.p2align 4 -_bn_mul_mont: - - movl %r9d,%r9d - movq %rsp,%rax - - testl $3,%r9d - jnz L$mul_enter - cmpl $8,%r9d - jb L$mul_enter - leaq _OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - cmpq %rsi,%rdx - jne L$mul4x_enter - testl $7,%r9d - jz L$sqr8x_enter - jmp L$mul4x_enter - -.p2align 4 -L$mul_enter: - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - negq %r9 - movq %rsp,%r11 - leaq -16(%rsp,%r9,8),%r10 - negq %r9 - andq $-1024,%r10 - - - - - - - - - - subq %r10,%r11 - andq $-4096,%r11 - leaq (%r10,%r11,1),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja L$mul_page_walk - jmp L$mul_page_walk_done - -.p2align 4 -L$mul_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja L$mul_page_walk -L$mul_page_walk_done: - - movq %rax,8(%rsp,%r9,8) - -L$mul_body: - movq %rdx,%r12 - movq (%r8),%r8 - movq (%r12),%rbx - movq (%rsi),%rax - - xorq %r14,%r14 - xorq %r15,%r15 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp L$1st_enter - -.p2align 4 -L$1st: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r13 - movq %r10,%r11 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -L$1st_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - leaq 1(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - cmpq %r9,%r15 - jne L$1st - - addq %rax,%r13 - movq (%rsi),%rax - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - movq %r10,%r11 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - jmp L$outer -.p2align 4 -L$outer: - movq (%r12,%r14,8),%rbx - xorq %r15,%r15 - movq %r8,%rbp - movq (%rsp),%r10 - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq 8(%rsp),%r10 - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp L$inner_enter - -.p2align 4 -L$inner: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -L$inner_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - leaq 1(%r15),%r15 - - mulq %rbp - cmpq %r9,%r15 - jne L$inner - - addq %rax,%r13 - movq (%rsi),%rax - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - cmpq %r9,%r14 - jb L$outer - - xorq %r14,%r14 - movq (%rsp),%rax - movq %r9,%r15 - -.p2align 4 -L$sub: sbbq (%rcx,%r14,8),%rax - movq %rax,(%rdi,%r14,8) - movq 8(%rsp,%r14,8),%rax - leaq 1(%r14),%r14 - decq %r15 - jnz L$sub - - sbbq $0,%rax - movq $-1,%rbx - xorq %rax,%rbx - xorq %r14,%r14 - movq %r9,%r15 - -L$copy: - movq (%rdi,%r14,8),%rcx - movq (%rsp,%r14,8),%rdx - andq %rbx,%rcx - andq %rax,%rdx - movq %r9,(%rsp,%r14,8) - orq %rcx,%rdx - movq %rdx,(%rdi,%r14,8) - leaq 1(%r14),%r14 - subq $1,%r15 - jnz L$copy - - movq 8(%rsp,%r9,8),%rsi - - movq $1,%rax - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$mul_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 4 -bn_mul4x_mont: - - movl %r9d,%r9d - movq %rsp,%rax - -L$mul4x_enter: - andl $0x80100,%r11d - cmpl $0x80100,%r11d - je L$mulx4x_enter - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - negq %r9 - movq %rsp,%r11 - leaq -32(%rsp,%r9,8),%r10 - negq %r9 - andq $-1024,%r10 - - subq %r10,%r11 - andq $-4096,%r11 - leaq (%r10,%r11,1),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja L$mul4x_page_walk - jmp L$mul4x_page_walk_done - -L$mul4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja L$mul4x_page_walk -L$mul4x_page_walk_done: - - movq %rax,8(%rsp,%r9,8) - -L$mul4x_body: - movq %rdi,16(%rsp,%r9,8) - movq %rdx,%r12 - movq (%r8),%r8 - movq (%r12),%rbx - movq (%rsi),%rax - - xorq %r14,%r14 - xorq %r15,%r15 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 4(%r15),%r15 - adcq $0,%rdx - movq %rdi,(%rsp) - movq %rdx,%r13 - jmp L$1st4x -.p2align 4 -L$1st4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx,%r15,8),%rax - adcq $0,%rdx - leaq 4(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq -16(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-32(%rsp,%r15,8) - movq %rdx,%r13 - cmpq %r9,%r15 - jb L$1st4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - movq %r13,-8(%rsp,%r15,8) - movq %rdi,(%rsp,%r15,8) - - leaq 1(%r14),%r14 -.p2align 2 -L$outer4x: - movq (%r12,%r14,8),%rbx - xorq %r15,%r15 - movq (%rsp),%r10 - movq %r8,%rbp - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - addq 8(%rsp),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 4(%r15),%r15 - adcq $0,%rdx - movq %rdi,(%rsp) - movq %rdx,%r13 - jmp L$inner4x -.p2align 4 -L$inner4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -16(%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -8(%rsp,%r15,8),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - addq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx,%r15,8),%rax - adcq $0,%rdx - addq 8(%rsp,%r15,8),%r11 - adcq $0,%rdx - leaq 4(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq -16(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-32(%rsp,%r15,8) - movq %rdx,%r13 - cmpq %r9,%r15 - jb L$inner4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -16(%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%rsp,%r15,8) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx,%r15,8),%rax - adcq $0,%rdx - addq -8(%rsp,%r15,8),%r11 - adcq $0,%rdx - leaq 1(%r14),%r14 - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%rsp,%r15,8) - movq %rdx,%r13 - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - addq (%rsp,%r9,8),%r13 - adcq $0,%rdi - movq %r13,-8(%rsp,%r15,8) - movq %rdi,(%rsp,%r15,8) - - cmpq %r9,%r14 - jb L$outer4x - movq 16(%rsp,%r9,8),%rdi - leaq -4(%r9),%r15 - movq 0(%rsp),%rax - movq 8(%rsp),%rdx - shrq $2,%r15 - leaq (%rsp),%rsi - xorq %r14,%r14 - - subq 0(%rcx),%rax - movq 16(%rsi),%rbx - movq 24(%rsi),%rbp - sbbq 8(%rcx),%rdx - -L$sub4x: - movq %rax,0(%rdi,%r14,8) - movq %rdx,8(%rdi,%r14,8) - sbbq 16(%rcx,%r14,8),%rbx - movq 32(%rsi,%r14,8),%rax - movq 40(%rsi,%r14,8),%rdx - sbbq 24(%rcx,%r14,8),%rbp - movq %rbx,16(%rdi,%r14,8) - movq %rbp,24(%rdi,%r14,8) - sbbq 32(%rcx,%r14,8),%rax - movq 48(%rsi,%r14,8),%rbx - movq 56(%rsi,%r14,8),%rbp - sbbq 40(%rcx,%r14,8),%rdx - leaq 4(%r14),%r14 - decq %r15 - jnz L$sub4x - - movq %rax,0(%rdi,%r14,8) - movq 32(%rsi,%r14,8),%rax - sbbq 16(%rcx,%r14,8),%rbx - movq %rdx,8(%rdi,%r14,8) - sbbq 24(%rcx,%r14,8),%rbp - movq %rbx,16(%rdi,%r14,8) - - sbbq $0,%rax - movq %rbp,24(%rdi,%r14,8) - pxor %xmm0,%xmm0 -.byte 102,72,15,110,224 - pcmpeqd %xmm5,%xmm5 - pshufd $0,%xmm4,%xmm4 - movq %r9,%r15 - pxor %xmm4,%xmm5 - shrq $2,%r15 - xorl %eax,%eax - - jmp L$copy4x -.p2align 4 -L$copy4x: - movdqa (%rsp,%rax,1),%xmm1 - movdqu (%rdi,%rax,1),%xmm2 - pand %xmm4,%xmm1 - pand %xmm5,%xmm2 - movdqa 16(%rsp,%rax,1),%xmm3 - movdqa %xmm0,(%rsp,%rax,1) - por %xmm2,%xmm1 - movdqu 16(%rdi,%rax,1),%xmm2 - movdqu %xmm1,(%rdi,%rax,1) - pand %xmm4,%xmm3 - pand %xmm5,%xmm2 - movdqa %xmm0,16(%rsp,%rax,1) - por %xmm2,%xmm3 - movdqu %xmm3,16(%rdi,%rax,1) - leaq 32(%rax),%rax - decq %r15 - jnz L$copy4x - movq 8(%rsp,%r9,8),%rsi - - movq $1,%rax - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$mul4x_epilogue: - .byte 0xf3,0xc3 - - - - - - -.p2align 5 -bn_sqr8x_mont: - - movq %rsp,%rax - -L$sqr8x_enter: - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$sqr8x_prologue: - - movl %r9d,%r10d - shll $3,%r9d - shlq $3+2,%r10 - negq %r9 - - - - - - - leaq -64(%rsp,%r9,2),%r11 - movq %rsp,%rbp - movq (%r8),%r8 - subq %rsi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb L$sqr8x_sp_alt - subq %r11,%rbp - leaq -64(%rbp,%r9,2),%rbp - jmp L$sqr8x_sp_done - -.p2align 5 -L$sqr8x_sp_alt: - leaq 4096-64(,%r9,2),%r10 - leaq -64(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -L$sqr8x_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$sqr8x_page_walk - jmp L$sqr8x_page_walk_done - -.p2align 4 -L$sqr8x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$sqr8x_page_walk -L$sqr8x_page_walk_done: - - movq %r9,%r10 - negq %r9 - - movq %r8,32(%rsp) - movq %rax,40(%rsp) - -L$sqr8x_body: - -.byte 102,72,15,110,209 - pxor %xmm0,%xmm0 -.byte 102,72,15,110,207 -.byte 102,73,15,110,218 - leaq _OPENSSL_ia32cap_P(%rip),%rax - movl 8(%rax),%eax - andl $0x80100,%eax - cmpl $0x80100,%eax - jne L$sqr8x_nox - - call _bn_sqrx8x_internal - - - - - leaq (%r8,%rcx,1),%rbx - movq %rcx,%r9 - movq %rcx,%rdx -.byte 102,72,15,126,207 - sarq $3+2,%rcx - jmp L$sqr8x_sub - -.p2align 5 -L$sqr8x_nox: - call _bn_sqr8x_internal - - - - - leaq (%rdi,%r9,1),%rbx - movq %r9,%rcx - movq %r9,%rdx -.byte 102,72,15,126,207 - sarq $3+2,%rcx - jmp L$sqr8x_sub - -.p2align 5 -L$sqr8x_sub: - movq 0(%rbx),%r12 - movq 8(%rbx),%r13 - movq 16(%rbx),%r14 - movq 24(%rbx),%r15 - leaq 32(%rbx),%rbx - sbbq 0(%rbp),%r12 - sbbq 8(%rbp),%r13 - sbbq 16(%rbp),%r14 - sbbq 24(%rbp),%r15 - leaq 32(%rbp),%rbp - movq %r12,0(%rdi) - movq %r13,8(%rdi) - movq %r14,16(%rdi) - movq %r15,24(%rdi) - leaq 32(%rdi),%rdi - incq %rcx - jnz L$sqr8x_sub - - sbbq $0,%rax - leaq (%rbx,%r9,1),%rbx - leaq (%rdi,%r9,1),%rdi - -.byte 102,72,15,110,200 - pxor %xmm0,%xmm0 - pshufd $0,%xmm1,%xmm1 - movq 40(%rsp),%rsi - - jmp L$sqr8x_cond_copy - -.p2align 5 -L$sqr8x_cond_copy: - movdqa 0(%rbx),%xmm2 - movdqa 16(%rbx),%xmm3 - leaq 32(%rbx),%rbx - movdqu 0(%rdi),%xmm4 - movdqu 16(%rdi),%xmm5 - leaq 32(%rdi),%rdi - movdqa %xmm0,-32(%rbx) - movdqa %xmm0,-16(%rbx) - movdqa %xmm0,-32(%rbx,%rdx,1) - movdqa %xmm0,-16(%rbx,%rdx,1) - pcmpeqd %xmm1,%xmm0 - pand %xmm1,%xmm2 - pand %xmm1,%xmm3 - pand %xmm0,%xmm4 - pand %xmm0,%xmm5 - pxor %xmm0,%xmm0 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqu %xmm4,-32(%rdi) - movdqu %xmm5,-16(%rdi) - addq $32,%r9 - jnz L$sqr8x_cond_copy - - movq $1,%rax - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$sqr8x_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -bn_mulx4x_mont: - - movq %rsp,%rax - -L$mulx4x_enter: - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$mulx4x_prologue: - - shll $3,%r9d - xorq %r10,%r10 - subq %r9,%r10 - movq (%r8),%r8 - leaq -72(%rsp,%r10,1),%rbp - andq $-128,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$mulx4x_page_walk - jmp L$mulx4x_page_walk_done - -.p2align 4 -L$mulx4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$mulx4x_page_walk -L$mulx4x_page_walk_done: - - leaq (%rdx,%r9,1),%r10 - - - - - - - - - - - - - movq %r9,0(%rsp) - shrq $5,%r9 - movq %r10,16(%rsp) - subq $1,%r9 - movq %r8,24(%rsp) - movq %rdi,32(%rsp) - movq %rax,40(%rsp) - - movq %r9,48(%rsp) - jmp L$mulx4x_body - -.p2align 5 -L$mulx4x_body: - leaq 8(%rdx),%rdi - movq (%rdx),%rdx - leaq 64+32(%rsp),%rbx - movq %rdx,%r9 - - mulxq 0(%rsi),%r8,%rax - mulxq 8(%rsi),%r11,%r14 - addq %rax,%r11 - movq %rdi,8(%rsp) - mulxq 16(%rsi),%r12,%r13 - adcq %r14,%r12 - adcq $0,%r13 - - movq %r8,%rdi - imulq 24(%rsp),%r8 - xorq %rbp,%rbp - - mulxq 24(%rsi),%rax,%r14 - movq %r8,%rdx - leaq 32(%rsi),%rsi - adcxq %rax,%r13 - adcxq %rbp,%r14 - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%rdi - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 -.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 - movq 48(%rsp),%rdi - movq %r10,-32(%rbx) - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-24(%rbx) - adcxq %rax,%r12 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r12,-16(%rbx) - - jmp L$mulx4x_1st - -.p2align 5 -L$mulx4x_1st: - adcxq %rbp,%r15 - mulxq 0(%rsi),%r10,%rax - adcxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 -.byte 0x67,0x67 - movq %r8,%rdx - adcxq %rax,%r13 - adcxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - movq %r11,-32(%rbx) - adoxq %r15,%r13 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r13,-16(%rbx) - - decq %rdi - jnz L$mulx4x_1st - - movq 0(%rsp),%rax - movq 8(%rsp),%rdi - adcq %rbp,%r15 - addq %r15,%r14 - sbbq %r15,%r15 - movq %r14,-8(%rbx) - jmp L$mulx4x_outer - -.p2align 5 -L$mulx4x_outer: - movq (%rdi),%rdx - leaq 8(%rdi),%rdi - subq %rax,%rsi - movq %r15,(%rbx) - leaq 64+32(%rsp),%rbx - subq %rax,%rcx - - mulxq 0(%rsi),%r8,%r11 - xorl %ebp,%ebp - movq %rdx,%r9 - mulxq 8(%rsi),%r14,%r12 - adoxq -32(%rbx),%r8 - adcxq %r14,%r11 - mulxq 16(%rsi),%r15,%r13 - adoxq -24(%rbx),%r11 - adcxq %r15,%r12 - adoxq -16(%rbx),%r12 - adcxq %rbp,%r13 - adoxq %rbp,%r13 - - movq %rdi,8(%rsp) - movq %r8,%r15 - imulq 24(%rsp),%r8 - xorl %ebp,%ebp - - mulxq 24(%rsi),%rax,%r14 - movq %r8,%rdx - adcxq %rax,%r13 - adoxq -8(%rbx),%r13 - adcxq %rbp,%r14 - leaq 32(%rsi),%rsi - adoxq %rbp,%r14 - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%r15 - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - mulxq 16(%rcx),%rax,%r12 - movq %r10,-32(%rbx) - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-24(%rbx) - leaq 32(%rcx),%rcx - adcxq %rax,%r12 - adoxq %rbp,%r15 - movq 48(%rsp),%rdi - movq %r12,-16(%rbx) - - jmp L$mulx4x_inner - -.p2align 5 -L$mulx4x_inner: - mulxq 0(%rsi),%r10,%rax - adcxq %rbp,%r15 - adoxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq 0(%rbx),%r10 - adoxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq 8(%rbx),%r11 - adoxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 - movq %r8,%rdx - adcxq 16(%rbx),%r12 - adoxq %rax,%r13 - adcxq 24(%rbx),%r13 - adoxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - adcxq %rbp,%r14 - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - adoxq %r15,%r13 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-32(%rbx) - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r13,-16(%rbx) - - decq %rdi - jnz L$mulx4x_inner - - movq 0(%rsp),%rax - movq 8(%rsp),%rdi - adcq %rbp,%r15 - subq 0(%rbx),%rbp - adcq %r15,%r14 - sbbq %r15,%r15 - movq %r14,-8(%rbx) - - cmpq 16(%rsp),%rdi - jne L$mulx4x_outer - - leaq 64(%rsp),%rbx - subq %rax,%rcx - negq %r15 - movq %rax,%rdx - shrq $3+2,%rax - movq 32(%rsp),%rdi - jmp L$mulx4x_sub - -.p2align 5 -L$mulx4x_sub: - movq 0(%rbx),%r11 - movq 8(%rbx),%r12 - movq 16(%rbx),%r13 - movq 24(%rbx),%r14 - leaq 32(%rbx),%rbx - sbbq 0(%rcx),%r11 - sbbq 8(%rcx),%r12 - sbbq 16(%rcx),%r13 - sbbq 24(%rcx),%r14 - leaq 32(%rcx),%rcx - movq %r11,0(%rdi) - movq %r12,8(%rdi) - movq %r13,16(%rdi) - movq %r14,24(%rdi) - leaq 32(%rdi),%rdi - decq %rax - jnz L$mulx4x_sub - - sbbq $0,%r15 - leaq 64(%rsp),%rbx - subq %rdx,%rdi - -.byte 102,73,15,110,207 - pxor %xmm0,%xmm0 - pshufd $0,%xmm1,%xmm1 - movq 40(%rsp),%rsi - - jmp L$mulx4x_cond_copy - -.p2align 5 -L$mulx4x_cond_copy: - movdqa 0(%rbx),%xmm2 - movdqa 16(%rbx),%xmm3 - leaq 32(%rbx),%rbx - movdqu 0(%rdi),%xmm4 - movdqu 16(%rdi),%xmm5 - leaq 32(%rdi),%rdi - movdqa %xmm0,-32(%rbx) - movdqa %xmm0,-16(%rbx) - pcmpeqd %xmm1,%xmm0 - pand %xmm1,%xmm2 - pand %xmm1,%xmm3 - pand %xmm0,%xmm4 - pand %xmm0,%xmm5 - pxor %xmm0,%xmm0 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqu %xmm4,-32(%rdi) - movdqu %xmm5,-16(%rdi) - subq $32,%rdx - jnz L$mulx4x_cond_copy - - movq %rdx,(%rbx) - - movq $1,%rax - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$mulx4x_epilogue: - .byte 0xf3,0xc3 - - -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.p2align 4 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/x86_64-mont5.S b/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/x86_64-mont5.S deleted file mode 100644 index 4bd36feae4..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/fipsmodule/x86_64-mont5.S +++ /dev/null @@ -1,3788 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - -.globl _bn_mul_mont_gather5 -.private_extern _bn_mul_mont_gather5 - -.p2align 6 -_bn_mul_mont_gather5: - - movl %r9d,%r9d - movq %rsp,%rax - - testl $7,%r9d - jnz L$mul_enter - leaq _OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - jmp L$mul4x_enter - -.p2align 4 -L$mul_enter: - movd 8(%rsp),%xmm5 - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - - - negq %r9 - movq %rsp,%r11 - leaq -280(%rsp,%r9,8),%r10 - negq %r9 - andq $-1024,%r10 - - - - - - - - - - subq %r10,%r11 - andq $-4096,%r11 - leaq (%r10,%r11,1),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja L$mul_page_walk - jmp L$mul_page_walk_done - -L$mul_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r11 - cmpq %r10,%rsp - ja L$mul_page_walk -L$mul_page_walk_done: - - leaq L$inc(%rip),%r10 - movq %rax,8(%rsp,%r9,8) - -L$mul_body: - - leaq 128(%rdx),%r12 - movdqa 0(%r10),%xmm0 - movdqa 16(%r10),%xmm1 - leaq 24-112(%rsp,%r9,8),%r10 - andq $-16,%r10 - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 - movdqa %xmm1,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 -.byte 0x67 - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,112(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,128(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,144(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,160(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,176(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,192(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,208(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,224(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,240(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,256(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,272(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,288(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,304(%r10) - - paddd %xmm2,%xmm3 -.byte 0x67 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,320(%r10) - - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,336(%r10) - pand 64(%r12),%xmm0 - - pand 80(%r12),%xmm1 - pand 96(%r12),%xmm2 - movdqa %xmm3,352(%r10) - pand 112(%r12),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -128(%r12),%xmm4 - movdqa -112(%r12),%xmm5 - movdqa -96(%r12),%xmm2 - pand 112(%r10),%xmm4 - movdqa -80(%r12),%xmm3 - pand 128(%r10),%xmm5 - por %xmm4,%xmm0 - pand 144(%r10),%xmm2 - por %xmm5,%xmm1 - pand 160(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -64(%r12),%xmm4 - movdqa -48(%r12),%xmm5 - movdqa -32(%r12),%xmm2 - pand 176(%r10),%xmm4 - movdqa -16(%r12),%xmm3 - pand 192(%r10),%xmm5 - por %xmm4,%xmm0 - pand 208(%r10),%xmm2 - por %xmm5,%xmm1 - pand 224(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa 0(%r12),%xmm4 - movdqa 16(%r12),%xmm5 - movdqa 32(%r12),%xmm2 - pand 240(%r10),%xmm4 - movdqa 48(%r12),%xmm3 - pand 256(%r10),%xmm5 - por %xmm4,%xmm0 - pand 272(%r10),%xmm2 - por %xmm5,%xmm1 - pand 288(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - por %xmm1,%xmm0 - pshufd $0x4e,%xmm0,%xmm1 - por %xmm1,%xmm0 - leaq 256(%r12),%r12 -.byte 102,72,15,126,195 - - movq (%r8),%r8 - movq (%rsi),%rax - - xorq %r14,%r14 - xorq %r15,%r15 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp L$1st_enter - -.p2align 4 -L$1st: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r13 - movq %r10,%r11 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -L$1st_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - leaq 1(%r15),%r15 - movq %rdx,%r10 - - mulq %rbp - cmpq %r9,%r15 - jne L$1st - - - addq %rax,%r13 - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-16(%rsp,%r9,8) - movq %rdx,%r13 - movq %r10,%r11 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - jmp L$outer -.p2align 4 -L$outer: - leaq 24+128(%rsp,%r9,8),%rdx - andq $-16,%rdx - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - movdqa -128(%r12),%xmm0 - movdqa -112(%r12),%xmm1 - movdqa -96(%r12),%xmm2 - movdqa -80(%r12),%xmm3 - pand -128(%rdx),%xmm0 - pand -112(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -80(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%r12),%xmm0 - movdqa -48(%r12),%xmm1 - movdqa -32(%r12),%xmm2 - movdqa -16(%r12),%xmm3 - pand -64(%rdx),%xmm0 - pand -48(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -16(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%r12),%xmm0 - movdqa 16(%r12),%xmm1 - movdqa 32(%r12),%xmm2 - movdqa 48(%r12),%xmm3 - pand 0(%rdx),%xmm0 - pand 16(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 48(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%r12),%xmm0 - movdqa 80(%r12),%xmm1 - movdqa 96(%r12),%xmm2 - movdqa 112(%r12),%xmm3 - pand 64(%rdx),%xmm0 - pand 80(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 112(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - leaq 256(%r12),%r12 - - movq (%rsi),%rax -.byte 102,72,15,126,195 - - xorq %r15,%r15 - movq %r8,%rbp - movq (%rsp),%r10 - - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi),%rax - adcq $0,%rdx - movq 8(%rsp),%r10 - movq %rdx,%r13 - - leaq 1(%r15),%r15 - jmp L$inner_enter - -.p2align 4 -L$inner: - addq %rax,%r13 - movq (%rsi,%r15,8),%rax - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r15,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r15,8) - movq %rdx,%r13 - -L$inner_enter: - mulq %rbx - addq %rax,%r11 - movq (%rcx,%r15,8),%rax - adcq $0,%rdx - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - leaq 1(%r15),%r15 - - mulq %rbp - cmpq %r9,%r15 - jne L$inner - - addq %rax,%r13 - adcq $0,%rdx - addq %r10,%r13 - movq (%rsp,%r9,8),%r10 - adcq $0,%rdx - movq %r13,-16(%rsp,%r9,8) - movq %rdx,%r13 - - xorq %rdx,%rdx - addq %r11,%r13 - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%rsp,%r9,8) - movq %rdx,(%rsp,%r9,8) - - leaq 1(%r14),%r14 - cmpq %r9,%r14 - jb L$outer - - xorq %r14,%r14 - movq (%rsp),%rax - leaq (%rsp),%rsi - movq %r9,%r15 - jmp L$sub -.p2align 4 -L$sub: sbbq (%rcx,%r14,8),%rax - movq %rax,(%rdi,%r14,8) - movq 8(%rsi,%r14,8),%rax - leaq 1(%r14),%r14 - decq %r15 - jnz L$sub - - sbbq $0,%rax - movq $-1,%rbx - xorq %rax,%rbx - xorq %r14,%r14 - movq %r9,%r15 - -L$copy: - movq (%rdi,%r14,8),%rcx - movq (%rsp,%r14,8),%rdx - andq %rbx,%rcx - andq %rax,%rdx - movq %r14,(%rsp,%r14,8) - orq %rcx,%rdx - movq %rdx,(%rdi,%r14,8) - leaq 1(%r14),%r14 - subq $1,%r15 - jnz L$copy - - movq 8(%rsp,%r9,8),%rsi - - movq $1,%rax - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$mul_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -bn_mul4x_mont_gather5: - -.byte 0x67 - movq %rsp,%rax - -L$mul4x_enter: - andl $0x80108,%r11d - cmpl $0x80108,%r11d - je L$mulx4x_enter - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$mul4x_prologue: - -.byte 0x67 - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - - - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb L$mul4xsp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp L$mul4xsp_done - -.p2align 5 -L$mul4xsp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -L$mul4xsp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$mul4x_page_walk - jmp L$mul4x_page_walk_done - -L$mul4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$mul4x_page_walk -L$mul4x_page_walk_done: - - negq %r9 - - movq %rax,40(%rsp) - -L$mul4x_body: - - call mul4x_internal - - movq 40(%rsp),%rsi - - movq $1,%rax - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$mul4x_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 5 -mul4x_internal: - - shlq $5,%r9 - movd 8(%rax),%xmm5 - leaq L$inc(%rip),%rax - leaq 128(%rdx,%r9,1),%r13 - shrq $5,%r9 - movdqa 0(%rax),%xmm0 - movdqa 16(%rax),%xmm1 - leaq 88-112(%rsp,%r9,1),%r10 - leaq 128(%rdx),%r12 - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 -.byte 0x67,0x67 - movdqa %xmm1,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 -.byte 0x67 - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,112(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,128(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,144(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,160(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,176(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,192(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,208(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,224(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,240(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,256(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,272(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,288(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,304(%r10) - - paddd %xmm2,%xmm3 -.byte 0x67 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,320(%r10) - - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,336(%r10) - pand 64(%r12),%xmm0 - - pand 80(%r12),%xmm1 - pand 96(%r12),%xmm2 - movdqa %xmm3,352(%r10) - pand 112(%r12),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -128(%r12),%xmm4 - movdqa -112(%r12),%xmm5 - movdqa -96(%r12),%xmm2 - pand 112(%r10),%xmm4 - movdqa -80(%r12),%xmm3 - pand 128(%r10),%xmm5 - por %xmm4,%xmm0 - pand 144(%r10),%xmm2 - por %xmm5,%xmm1 - pand 160(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -64(%r12),%xmm4 - movdqa -48(%r12),%xmm5 - movdqa -32(%r12),%xmm2 - pand 176(%r10),%xmm4 - movdqa -16(%r12),%xmm3 - pand 192(%r10),%xmm5 - por %xmm4,%xmm0 - pand 208(%r10),%xmm2 - por %xmm5,%xmm1 - pand 224(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa 0(%r12),%xmm4 - movdqa 16(%r12),%xmm5 - movdqa 32(%r12),%xmm2 - pand 240(%r10),%xmm4 - movdqa 48(%r12),%xmm3 - pand 256(%r10),%xmm5 - por %xmm4,%xmm0 - pand 272(%r10),%xmm2 - por %xmm5,%xmm1 - pand 288(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - por %xmm1,%xmm0 - pshufd $0x4e,%xmm0,%xmm1 - por %xmm1,%xmm0 - leaq 256(%r12),%r12 -.byte 102,72,15,126,195 - - movq %r13,16+8(%rsp) - movq %rdi,56+8(%rsp) - - movq (%r8),%r8 - movq (%rsi),%rax - leaq (%rsi,%r9,1),%rsi - negq %r9 - - movq %r8,%rbp - mulq %rbx - movq %rax,%r10 - movq (%rcx),%rax - - imulq %r10,%rbp - leaq 64+8(%rsp),%r14 - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi,%r9,1),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%r9),%r15 - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %rdi,(%r14) - movq %rdx,%r13 - jmp L$1st4x - -.p2align 5 -L$1st4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%r14) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq 0(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-8(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %rdi,(%r14) - movq %rdx,%r13 - - addq $32,%r15 - jnz L$1st4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx),%rax - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %rdi,-16(%r14) - movq %rdx,%r13 - - leaq (%rcx,%r9,1),%rcx - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - movq %r13,-8(%r14) - - jmp L$outer4x - -.p2align 5 -L$outer4x: - leaq 16+128(%r14),%rdx - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - movdqa -128(%r12),%xmm0 - movdqa -112(%r12),%xmm1 - movdqa -96(%r12),%xmm2 - movdqa -80(%r12),%xmm3 - pand -128(%rdx),%xmm0 - pand -112(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -80(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%r12),%xmm0 - movdqa -48(%r12),%xmm1 - movdqa -32(%r12),%xmm2 - movdqa -16(%r12),%xmm3 - pand -64(%rdx),%xmm0 - pand -48(%rdx),%xmm1 - por %xmm0,%xmm4 - pand -32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand -16(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%r12),%xmm0 - movdqa 16(%r12),%xmm1 - movdqa 32(%r12),%xmm2 - movdqa 48(%r12),%xmm3 - pand 0(%rdx),%xmm0 - pand 16(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 32(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 48(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%r12),%xmm0 - movdqa 80(%r12),%xmm1 - movdqa 96(%r12),%xmm2 - movdqa 112(%r12),%xmm3 - pand 64(%rdx),%xmm0 - pand 80(%rdx),%xmm1 - por %xmm0,%xmm4 - pand 96(%rdx),%xmm2 - por %xmm1,%xmm5 - pand 112(%rdx),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - leaq 256(%r12),%r12 -.byte 102,72,15,126,195 - - movq (%r14,%r9,1),%r10 - movq %r8,%rbp - mulq %rbx - addq %rax,%r10 - movq (%rcx),%rax - adcq $0,%rdx - - imulq %r10,%rbp - movq %rdx,%r11 - movq %rdi,(%r14) - - leaq (%r14,%r9,1),%r14 - - mulq %rbp - addq %rax,%r10 - movq 8(%rsi,%r9,1),%rax - adcq $0,%rdx - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - addq 8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%r9),%r15 - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %rdx,%r13 - jmp L$inner4x - -.p2align 5 -L$inner4x: - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - adcq $0,%rdx - addq 16(%r14),%r10 - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %rdi,-32(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq -8(%rcx),%rax - adcq $0,%rdx - addq -8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%r13 - - mulq %rbx - addq %rax,%r10 - movq 0(%rcx),%rax - adcq $0,%rdx - addq (%r14),%r10 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq 8(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %rdi,-16(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq 8(%rcx),%rax - adcq $0,%rdx - addq 8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq 16(%rsi,%r15,1),%rax - adcq $0,%rdx - addq %r11,%rdi - leaq 32(%rcx),%rcx - adcq $0,%rdx - movq %r13,-8(%r14) - movq %rdx,%r13 - - addq $32,%r15 - jnz L$inner4x - - mulq %rbx - addq %rax,%r10 - movq -16(%rcx),%rax - adcq $0,%rdx - addq 16(%r14),%r10 - leaq 32(%r14),%r14 - adcq $0,%rdx - movq %rdx,%r11 - - mulq %rbp - addq %rax,%r13 - movq -8(%rsi),%rax - adcq $0,%rdx - addq %r10,%r13 - adcq $0,%rdx - movq %rdi,-32(%r14) - movq %rdx,%rdi - - mulq %rbx - addq %rax,%r11 - movq %rbp,%rax - movq -8(%rcx),%rbp - adcq $0,%rdx - addq -8(%r14),%r11 - adcq $0,%rdx - movq %rdx,%r10 - - mulq %rbp - addq %rax,%rdi - movq (%rsi,%r9,1),%rax - adcq $0,%rdx - addq %r11,%rdi - adcq $0,%rdx - movq %r13,-24(%r14) - movq %rdx,%r13 - - movq %rdi,-16(%r14) - leaq (%rcx,%r9,1),%rcx - - xorq %rdi,%rdi - addq %r10,%r13 - adcq $0,%rdi - addq (%r14),%r13 - adcq $0,%rdi - movq %r13,-8(%r14) - - cmpq 16+8(%rsp),%r12 - jb L$outer4x - xorq %rax,%rax - subq %r13,%rbp - adcq %r15,%r15 - orq %r15,%rdi - subq %rdi,%rax - leaq (%r14,%r9,1),%rbx - movq (%rcx),%r12 - leaq (%rcx),%rbp - movq %r9,%rcx - sarq $3+2,%rcx - movq 56+8(%rsp),%rdi - decq %r12 - xorq %r10,%r10 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp L$sqr4x_sub_entry - - -.globl _bn_power5 -.private_extern _bn_power5 - -.p2align 5 -_bn_power5: - - movq %rsp,%rax - - leaq _OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - andl $0x80108,%r11d - cmpl $0x80108,%r11d - je L$powerx5_enter - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$power5_prologue: - - shll $3,%r9d - leal (%r9,%r9,2),%r10d - negq %r9 - movq (%r8),%r8 - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb L$pwr_sp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp L$pwr_sp_done - -.p2align 5 -L$pwr_sp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -L$pwr_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$pwr_page_walk - jmp L$pwr_page_walk_done - -L$pwr_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$pwr_page_walk -L$pwr_page_walk_done: - - movq %r9,%r10 - negq %r9 - - - - - - - - - - - movq %r8,32(%rsp) - movq %rax,40(%rsp) - -L$power5_body: -.byte 102,72,15,110,207 -.byte 102,72,15,110,209 -.byte 102,73,15,110,218 -.byte 102,72,15,110,226 - - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - -.byte 102,72,15,126,209 -.byte 102,72,15,126,226 - movq %rsi,%rdi - movq 40(%rsp),%rax - leaq 32(%rsp),%r8 - - call mul4x_internal - - movq 40(%rsp),%rsi - - movq $1,%rax - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$power5_epilogue: - .byte 0xf3,0xc3 - - - -.globl _bn_sqr8x_internal -.private_extern _bn_sqr8x_internal -.private_extern _bn_sqr8x_internal - -.p2align 5 -_bn_sqr8x_internal: -__bn_sqr8x_internal: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - leaq 32(%r10),%rbp - leaq (%rsi,%r9,1),%rsi - - movq %r9,%rcx - - - movq -32(%rsi,%rbp,1),%r14 - leaq 48+8(%rsp,%r9,2),%rdi - movq -24(%rsi,%rbp,1),%rax - leaq -32(%rdi,%rbp,1),%rdi - movq -16(%rsi,%rbp,1),%rbx - movq %rax,%r15 - - mulq %r14 - movq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - movq %r10,-24(%rdi,%rbp,1) - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - adcq $0,%rdx - movq %r11,-16(%rdi,%rbp,1) - movq %rdx,%r10 - - - movq -8(%rsi,%rbp,1),%rbx - mulq %r15 - movq %rax,%r12 - movq %rbx,%rax - movq %rdx,%r13 - - leaq (%rbp),%rcx - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - movq %r10,-8(%rdi,%rcx,1) - jmp L$sqr4x_1st - -.p2align 5 -L$sqr4x_1st: - movq (%rsi,%rcx,1),%rbx - mulq %r15 - addq %rax,%r13 - movq %rbx,%rax - movq %rdx,%r12 - adcq $0,%r12 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq 8(%rsi,%rcx,1),%rbx - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - adcq $0,%r10 - - - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - movq %r11,(%rdi,%rcx,1) - movq %rdx,%r13 - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq 16(%rsi,%rcx,1),%rbx - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - - mulq %r15 - addq %rax,%r13 - movq %rbx,%rax - movq %r10,8(%rdi,%rcx,1) - movq %rdx,%r12 - adcq $0,%r12 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq 24(%rsi,%rcx,1),%rbx - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - adcq $0,%r10 - - - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - movq %r11,16(%rdi,%rcx,1) - movq %rdx,%r13 - adcq $0,%r13 - leaq 32(%rcx),%rcx - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - movq %r10,-8(%rdi,%rcx,1) - - cmpq $0,%rcx - jne L$sqr4x_1st - - mulq %r15 - addq %rax,%r13 - leaq 16(%rbp),%rbp - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - - movq %r13,(%rdi) - movq %rdx,%r12 - movq %rdx,8(%rdi) - jmp L$sqr4x_outer - -.p2align 5 -L$sqr4x_outer: - movq -32(%rsi,%rbp,1),%r14 - leaq 48+8(%rsp,%r9,2),%rdi - movq -24(%rsi,%rbp,1),%rax - leaq -32(%rdi,%rbp,1),%rdi - movq -16(%rsi,%rbp,1),%rbx - movq %rax,%r15 - - mulq %r14 - movq -24(%rdi,%rbp,1),%r10 - addq %rax,%r10 - movq %rbx,%rax - adcq $0,%rdx - movq %r10,-24(%rdi,%rbp,1) - movq %rdx,%r11 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - adcq $0,%rdx - addq -16(%rdi,%rbp,1),%r11 - movq %rdx,%r10 - adcq $0,%r10 - movq %r11,-16(%rdi,%rbp,1) - - xorq %r12,%r12 - - movq -8(%rsi,%rbp,1),%rbx - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - adcq $0,%rdx - addq -8(%rdi,%rbp,1),%r12 - movq %rdx,%r13 - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - adcq $0,%rdx - addq %r12,%r10 - movq %rdx,%r11 - adcq $0,%r11 - movq %r10,-8(%rdi,%rbp,1) - - leaq (%rbp),%rcx - jmp L$sqr4x_inner - -.p2align 5 -L$sqr4x_inner: - movq (%rsi,%rcx,1),%rbx - mulq %r15 - addq %rax,%r13 - movq %rbx,%rax - movq %rdx,%r12 - adcq $0,%r12 - addq (%rdi,%rcx,1),%r13 - adcq $0,%r12 - -.byte 0x67 - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq 8(%rsi,%rcx,1),%rbx - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - adcq $0,%r10 - - mulq %r15 - addq %rax,%r12 - movq %r11,(%rdi,%rcx,1) - movq %rbx,%rax - movq %rdx,%r13 - adcq $0,%r13 - addq 8(%rdi,%rcx,1),%r12 - leaq 16(%rcx),%rcx - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - adcq $0,%rdx - addq %r12,%r10 - movq %rdx,%r11 - adcq $0,%r11 - movq %r10,-8(%rdi,%rcx,1) - - cmpq $0,%rcx - jne L$sqr4x_inner - -.byte 0x67 - mulq %r15 - addq %rax,%r13 - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - - movq %r13,(%rdi) - movq %rdx,%r12 - movq %rdx,8(%rdi) - - addq $16,%rbp - jnz L$sqr4x_outer - - - movq -32(%rsi),%r14 - leaq 48+8(%rsp,%r9,2),%rdi - movq -24(%rsi),%rax - leaq -32(%rdi,%rbp,1),%rdi - movq -16(%rsi),%rbx - movq %rax,%r15 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - - mulq %r14 - addq %rax,%r11 - movq %rbx,%rax - movq %r10,-24(%rdi) - movq %rdx,%r10 - adcq $0,%r10 - addq %r13,%r11 - movq -8(%rsi),%rbx - adcq $0,%r10 - - mulq %r15 - addq %rax,%r12 - movq %rbx,%rax - movq %r11,-16(%rdi) - movq %rdx,%r13 - adcq $0,%r13 - - mulq %r14 - addq %rax,%r10 - movq %rbx,%rax - movq %rdx,%r11 - adcq $0,%r11 - addq %r12,%r10 - adcq $0,%r11 - movq %r10,-8(%rdi) - - mulq %r15 - addq %rax,%r13 - movq -16(%rsi),%rax - adcq $0,%rdx - addq %r11,%r13 - adcq $0,%rdx - - movq %r13,(%rdi) - movq %rdx,%r12 - movq %rdx,8(%rdi) - - mulq %rbx - addq $16,%rbp - xorq %r14,%r14 - subq %r9,%rbp - xorq %r15,%r15 - - addq %r12,%rax - adcq $0,%rdx - movq %rax,8(%rdi) - movq %rdx,16(%rdi) - movq %r15,24(%rdi) - - movq -16(%rsi,%rbp,1),%rax - leaq 48+8(%rsp),%rdi - xorq %r10,%r10 - movq 8(%rdi),%r11 - - leaq (%r14,%r10,2),%r12 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq 16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 24(%rdi),%r11 - adcq %rax,%r12 - movq -8(%rsi,%rbp,1),%rax - movq %r12,(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,8(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - movq 32(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 40(%rdi),%r11 - adcq %rax,%rbx - movq 0(%rsi,%rbp,1),%rax - movq %rbx,16(%rdi) - adcq %rdx,%r8 - leaq 16(%rbp),%rbp - movq %r8,24(%rdi) - sbbq %r15,%r15 - leaq 64(%rdi),%rdi - jmp L$sqr4x_shift_n_add - -.p2align 5 -L$sqr4x_shift_n_add: - leaq (%r14,%r10,2),%r12 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq -16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq -8(%rdi),%r11 - adcq %rax,%r12 - movq -8(%rsi,%rbp,1),%rax - movq %r12,-32(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,-24(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - movq 0(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 8(%rdi),%r11 - adcq %rax,%rbx - movq 0(%rsi,%rbp,1),%rax - movq %rbx,-16(%rdi) - adcq %rdx,%r8 - - leaq (%r14,%r10,2),%r12 - movq %r8,-8(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq 16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 24(%rdi),%r11 - adcq %rax,%r12 - movq 8(%rsi,%rbp,1),%rax - movq %r12,0(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,8(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - movq 32(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq 40(%rdi),%r11 - adcq %rax,%rbx - movq 16(%rsi,%rbp,1),%rax - movq %rbx,16(%rdi) - adcq %rdx,%r8 - movq %r8,24(%rdi) - sbbq %r15,%r15 - leaq 64(%rdi),%rdi - addq $32,%rbp - jnz L$sqr4x_shift_n_add - - leaq (%r14,%r10,2),%r12 -.byte 0x67 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r13 - shrq $63,%r11 - orq %r10,%r13 - movq -16(%rdi),%r10 - movq %r11,%r14 - mulq %rax - negq %r15 - movq -8(%rdi),%r11 - adcq %rax,%r12 - movq -8(%rsi),%rax - movq %r12,-32(%rdi) - adcq %rdx,%r13 - - leaq (%r14,%r10,2),%rbx - movq %r13,-24(%rdi) - sbbq %r15,%r15 - shrq $63,%r10 - leaq (%rcx,%r11,2),%r8 - shrq $63,%r11 - orq %r10,%r8 - mulq %rax - negq %r15 - adcq %rax,%rbx - adcq %rdx,%r8 - movq %rbx,-16(%rdi) - movq %r8,-8(%rdi) -.byte 102,72,15,126,213 -__bn_sqr8x_reduction: - xorq %rax,%rax - leaq (%r9,%rbp,1),%rcx - leaq 48+8(%rsp,%r9,2),%rdx - movq %rcx,0+8(%rsp) - leaq 48+8(%rsp,%r9,1),%rdi - movq %rdx,8+8(%rsp) - negq %r9 - jmp L$8x_reduction_loop - -.p2align 5 -L$8x_reduction_loop: - leaq (%rdi,%r9,1),%rdi -.byte 0x66 - movq 0(%rdi),%rbx - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%r12 - movq 40(%rdi),%r13 - movq 48(%rdi),%r14 - movq 56(%rdi),%r15 - movq %rax,(%rdx) - leaq 64(%rdi),%rdi - -.byte 0x67 - movq %rbx,%r8 - imulq 32+8(%rsp),%rbx - movq 0(%rbp),%rax - movl $8,%ecx - jmp L$8x_reduce - -.p2align 5 -L$8x_reduce: - mulq %rbx - movq 8(%rbp),%rax - negq %r8 - movq %rdx,%r8 - adcq $0,%r8 - - mulq %rbx - addq %rax,%r9 - movq 16(%rbp),%rax - adcq $0,%rdx - addq %r9,%r8 - movq %rbx,48-8+8(%rsp,%rcx,8) - movq %rdx,%r9 - adcq $0,%r9 - - mulq %rbx - addq %rax,%r10 - movq 24(%rbp),%rax - adcq $0,%rdx - addq %r10,%r9 - movq 32+8(%rsp),%rsi - movq %rdx,%r10 - adcq $0,%r10 - - mulq %rbx - addq %rax,%r11 - movq 32(%rbp),%rax - adcq $0,%rdx - imulq %r8,%rsi - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - - mulq %rbx - addq %rax,%r12 - movq 40(%rbp),%rax - adcq $0,%rdx - addq %r12,%r11 - movq %rdx,%r12 - adcq $0,%r12 - - mulq %rbx - addq %rax,%r13 - movq 48(%rbp),%rax - adcq $0,%rdx - addq %r13,%r12 - movq %rdx,%r13 - adcq $0,%r13 - - mulq %rbx - addq %rax,%r14 - movq 56(%rbp),%rax - adcq $0,%rdx - addq %r14,%r13 - movq %rdx,%r14 - adcq $0,%r14 - - mulq %rbx - movq %rsi,%rbx - addq %rax,%r15 - movq 0(%rbp),%rax - adcq $0,%rdx - addq %r15,%r14 - movq %rdx,%r15 - adcq $0,%r15 - - decl %ecx - jnz L$8x_reduce - - leaq 64(%rbp),%rbp - xorq %rax,%rax - movq 8+8(%rsp),%rdx - cmpq 0+8(%rsp),%rbp - jae L$8x_no_tail - -.byte 0x66 - addq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - sbbq %rsi,%rsi - - movq 48+56+8(%rsp),%rbx - movl $8,%ecx - movq 0(%rbp),%rax - jmp L$8x_tail - -.p2align 5 -L$8x_tail: - mulq %rbx - addq %rax,%r8 - movq 8(%rbp),%rax - movq %r8,(%rdi) - movq %rdx,%r8 - adcq $0,%r8 - - mulq %rbx - addq %rax,%r9 - movq 16(%rbp),%rax - adcq $0,%rdx - addq %r9,%r8 - leaq 8(%rdi),%rdi - movq %rdx,%r9 - adcq $0,%r9 - - mulq %rbx - addq %rax,%r10 - movq 24(%rbp),%rax - adcq $0,%rdx - addq %r10,%r9 - movq %rdx,%r10 - adcq $0,%r10 - - mulq %rbx - addq %rax,%r11 - movq 32(%rbp),%rax - adcq $0,%rdx - addq %r11,%r10 - movq %rdx,%r11 - adcq $0,%r11 - - mulq %rbx - addq %rax,%r12 - movq 40(%rbp),%rax - adcq $0,%rdx - addq %r12,%r11 - movq %rdx,%r12 - adcq $0,%r12 - - mulq %rbx - addq %rax,%r13 - movq 48(%rbp),%rax - adcq $0,%rdx - addq %r13,%r12 - movq %rdx,%r13 - adcq $0,%r13 - - mulq %rbx - addq %rax,%r14 - movq 56(%rbp),%rax - adcq $0,%rdx - addq %r14,%r13 - movq %rdx,%r14 - adcq $0,%r14 - - mulq %rbx - movq 48-16+8(%rsp,%rcx,8),%rbx - addq %rax,%r15 - adcq $0,%rdx - addq %r15,%r14 - movq 0(%rbp),%rax - movq %rdx,%r15 - adcq $0,%r15 - - decl %ecx - jnz L$8x_tail - - leaq 64(%rbp),%rbp - movq 8+8(%rsp),%rdx - cmpq 0+8(%rsp),%rbp - jae L$8x_tail_done - - movq 48+56+8(%rsp),%rbx - negq %rsi - movq 0(%rbp),%rax - adcq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - sbbq %rsi,%rsi - - movl $8,%ecx - jmp L$8x_tail - -.p2align 5 -L$8x_tail_done: - xorq %rax,%rax - addq (%rdx),%r8 - adcq $0,%r9 - adcq $0,%r10 - adcq $0,%r11 - adcq $0,%r12 - adcq $0,%r13 - adcq $0,%r14 - adcq $0,%r15 - adcq $0,%rax - - negq %rsi -L$8x_no_tail: - adcq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - adcq $0,%rax - movq -8(%rbp),%rcx - xorq %rsi,%rsi - -.byte 102,72,15,126,213 - - movq %r8,0(%rdi) - movq %r9,8(%rdi) -.byte 102,73,15,126,217 - movq %r10,16(%rdi) - movq %r11,24(%rdi) - movq %r12,32(%rdi) - movq %r13,40(%rdi) - movq %r14,48(%rdi) - movq %r15,56(%rdi) - leaq 64(%rdi),%rdi - - cmpq %rdx,%rdi - jb L$8x_reduction_loop - .byte 0xf3,0xc3 - - - -.p2align 5 -__bn_post4x_internal: - - movq 0(%rbp),%r12 - leaq (%rdi,%r9,1),%rbx - movq %r9,%rcx -.byte 102,72,15,126,207 - negq %rax -.byte 102,72,15,126,206 - sarq $3+2,%rcx - decq %r12 - xorq %r10,%r10 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp L$sqr4x_sub_entry - -.p2align 4 -L$sqr4x_sub: - movq 0(%rbp),%r12 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 -L$sqr4x_sub_entry: - leaq 32(%rbp),%rbp - notq %r12 - notq %r13 - notq %r14 - notq %r15 - andq %rax,%r12 - andq %rax,%r13 - andq %rax,%r14 - andq %rax,%r15 - - negq %r10 - adcq 0(%rbx),%r12 - adcq 8(%rbx),%r13 - adcq 16(%rbx),%r14 - adcq 24(%rbx),%r15 - movq %r12,0(%rdi) - leaq 32(%rbx),%rbx - movq %r13,8(%rdi) - sbbq %r10,%r10 - movq %r14,16(%rdi) - movq %r15,24(%rdi) - leaq 32(%rdi),%rdi - - incq %rcx - jnz L$sqr4x_sub - - movq %r9,%r10 - negq %r9 - .byte 0xf3,0xc3 - - -.globl _bn_from_montgomery -.private_extern _bn_from_montgomery - -.p2align 5 -_bn_from_montgomery: - - testl $7,%r9d - jz bn_from_mont8x - xorl %eax,%eax - .byte 0xf3,0xc3 - - - - -.p2align 5 -bn_from_mont8x: - -.byte 0x67 - movq %rsp,%rax - - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$from_prologue: - - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - movq (%r8),%r8 - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb L$from_sp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp L$from_sp_done - -.p2align 5 -L$from_sp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -L$from_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$from_page_walk - jmp L$from_page_walk_done - -L$from_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$from_page_walk -L$from_page_walk_done: - - movq %r9,%r10 - negq %r9 - - - - - - - - - - - movq %r8,32(%rsp) - movq %rax,40(%rsp) - -L$from_body: - movq %r9,%r11 - leaq 48(%rsp),%rax - pxor %xmm0,%xmm0 - jmp L$mul_by_1 - -.p2align 5 -L$mul_by_1: - movdqu (%rsi),%xmm1 - movdqu 16(%rsi),%xmm2 - movdqu 32(%rsi),%xmm3 - movdqa %xmm0,(%rax,%r9,1) - movdqu 48(%rsi),%xmm4 - movdqa %xmm0,16(%rax,%r9,1) -.byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 - movdqa %xmm1,(%rax) - movdqa %xmm0,32(%rax,%r9,1) - movdqa %xmm2,16(%rax) - movdqa %xmm0,48(%rax,%r9,1) - movdqa %xmm3,32(%rax) - movdqa %xmm4,48(%rax) - leaq 64(%rax),%rax - subq $64,%r11 - jnz L$mul_by_1 - -.byte 102,72,15,110,207 -.byte 102,72,15,110,209 -.byte 0x67 - movq %rcx,%rbp -.byte 102,73,15,110,218 - leaq _OPENSSL_ia32cap_P(%rip),%r11 - movl 8(%r11),%r11d - andl $0x80108,%r11d - cmpl $0x80108,%r11d - jne L$from_mont_nox - - leaq (%rax,%r9,1),%rdi - call __bn_sqrx8x_reduction - call __bn_postx4x_internal - - pxor %xmm0,%xmm0 - leaq 48(%rsp),%rax - jmp L$from_mont_zero - -.p2align 5 -L$from_mont_nox: - call __bn_sqr8x_reduction - call __bn_post4x_internal - - pxor %xmm0,%xmm0 - leaq 48(%rsp),%rax - jmp L$from_mont_zero - -.p2align 5 -L$from_mont_zero: - movq 40(%rsp),%rsi - - movdqa %xmm0,0(%rax) - movdqa %xmm0,16(%rax) - movdqa %xmm0,32(%rax) - movdqa %xmm0,48(%rax) - leaq 64(%rax),%rax - subq $32,%r9 - jnz L$from_mont_zero - - movq $1,%rax - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$from_epilogue: - .byte 0xf3,0xc3 - - - -.p2align 5 -bn_mulx4x_mont_gather5: - - movq %rsp,%rax - -L$mulx4x_enter: - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$mulx4x_prologue: - - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - movq (%r8),%r8 - - - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb L$mulx4xsp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp L$mulx4xsp_done - -L$mulx4xsp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -L$mulx4xsp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$mulx4x_page_walk - jmp L$mulx4x_page_walk_done - -L$mulx4x_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$mulx4x_page_walk -L$mulx4x_page_walk_done: - - - - - - - - - - - - - - movq %r8,32(%rsp) - movq %rax,40(%rsp) - -L$mulx4x_body: - call mulx4x_internal - - movq 40(%rsp),%rsi - - movq $1,%rax - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$mulx4x_epilogue: - .byte 0xf3,0xc3 - - - - -.p2align 5 -mulx4x_internal: - - movq %r9,8(%rsp) - movq %r9,%r10 - negq %r9 - shlq $5,%r9 - negq %r10 - leaq 128(%rdx,%r9,1),%r13 - shrq $5+5,%r9 - movd 8(%rax),%xmm5 - subq $1,%r9 - leaq L$inc(%rip),%rax - movq %r13,16+8(%rsp) - movq %r9,24+8(%rsp) - movq %rdi,56+8(%rsp) - movdqa 0(%rax),%xmm0 - movdqa 16(%rax),%xmm1 - leaq 88-112(%rsp,%r10,1),%r10 - leaq 128(%rdx),%rdi - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 -.byte 0x67 - movdqa %xmm1,%xmm2 -.byte 0x67 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,112(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,128(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,144(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,160(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,176(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,192(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,208(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,224(%r10) - movdqa %xmm4,%xmm3 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,240(%r10) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,256(%r10) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,272(%r10) - movdqa %xmm4,%xmm2 - - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,288(%r10) - movdqa %xmm4,%xmm3 -.byte 0x67 - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,304(%r10) - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,320(%r10) - - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,336(%r10) - - pand 64(%rdi),%xmm0 - pand 80(%rdi),%xmm1 - pand 96(%rdi),%xmm2 - movdqa %xmm3,352(%r10) - pand 112(%rdi),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -128(%rdi),%xmm4 - movdqa -112(%rdi),%xmm5 - movdqa -96(%rdi),%xmm2 - pand 112(%r10),%xmm4 - movdqa -80(%rdi),%xmm3 - pand 128(%r10),%xmm5 - por %xmm4,%xmm0 - pand 144(%r10),%xmm2 - por %xmm5,%xmm1 - pand 160(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa -64(%rdi),%xmm4 - movdqa -48(%rdi),%xmm5 - movdqa -32(%rdi),%xmm2 - pand 176(%r10),%xmm4 - movdqa -16(%rdi),%xmm3 - pand 192(%r10),%xmm5 - por %xmm4,%xmm0 - pand 208(%r10),%xmm2 - por %xmm5,%xmm1 - pand 224(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - movdqa 0(%rdi),%xmm4 - movdqa 16(%rdi),%xmm5 - movdqa 32(%rdi),%xmm2 - pand 240(%r10),%xmm4 - movdqa 48(%rdi),%xmm3 - pand 256(%r10),%xmm5 - por %xmm4,%xmm0 - pand 272(%r10),%xmm2 - por %xmm5,%xmm1 - pand 288(%r10),%xmm3 - por %xmm2,%xmm0 - por %xmm3,%xmm1 - pxor %xmm1,%xmm0 - pshufd $0x4e,%xmm0,%xmm1 - por %xmm1,%xmm0 - leaq 256(%rdi),%rdi -.byte 102,72,15,126,194 - leaq 64+32+8(%rsp),%rbx - - movq %rdx,%r9 - mulxq 0(%rsi),%r8,%rax - mulxq 8(%rsi),%r11,%r12 - addq %rax,%r11 - mulxq 16(%rsi),%rax,%r13 - adcq %rax,%r12 - adcq $0,%r13 - mulxq 24(%rsi),%rax,%r14 - - movq %r8,%r15 - imulq 32+8(%rsp),%r8 - xorq %rbp,%rbp - movq %r8,%rdx - - movq %rdi,8+8(%rsp) - - leaq 32(%rsi),%rsi - adcxq %rax,%r13 - adcxq %rbp,%r14 - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%r15 - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - mulxq 16(%rcx),%rax,%r12 - movq 24+8(%rsp),%rdi - movq %r10,-32(%rbx) - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r11,-24(%rbx) - adcxq %rax,%r12 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r12,-16(%rbx) - jmp L$mulx4x_1st - -.p2align 5 -L$mulx4x_1st: - adcxq %rbp,%r15 - mulxq 0(%rsi),%r10,%rax - adcxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 -.byte 0x67,0x67 - movq %r8,%rdx - adcxq %rax,%r13 - adcxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - movq %r11,-32(%rbx) - adoxq %r15,%r13 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - leaq 32(%rcx),%rcx - movq %r13,-16(%rbx) - - decq %rdi - jnz L$mulx4x_1st - - movq 8(%rsp),%rax - adcq %rbp,%r15 - leaq (%rsi,%rax,1),%rsi - addq %r15,%r14 - movq 8+8(%rsp),%rdi - adcq %rbp,%rbp - movq %r14,-8(%rbx) - jmp L$mulx4x_outer - -.p2align 5 -L$mulx4x_outer: - leaq 16-256(%rbx),%r10 - pxor %xmm4,%xmm4 -.byte 0x67,0x67 - pxor %xmm5,%xmm5 - movdqa -128(%rdi),%xmm0 - movdqa -112(%rdi),%xmm1 - movdqa -96(%rdi),%xmm2 - pand 256(%r10),%xmm0 - movdqa -80(%rdi),%xmm3 - pand 272(%r10),%xmm1 - por %xmm0,%xmm4 - pand 288(%r10),%xmm2 - por %xmm1,%xmm5 - pand 304(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%rdi),%xmm0 - movdqa -48(%rdi),%xmm1 - movdqa -32(%rdi),%xmm2 - pand 320(%r10),%xmm0 - movdqa -16(%rdi),%xmm3 - pand 336(%r10),%xmm1 - por %xmm0,%xmm4 - pand 352(%r10),%xmm2 - por %xmm1,%xmm5 - pand 368(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%rdi),%xmm0 - movdqa 16(%rdi),%xmm1 - movdqa 32(%rdi),%xmm2 - pand 384(%r10),%xmm0 - movdqa 48(%rdi),%xmm3 - pand 400(%r10),%xmm1 - por %xmm0,%xmm4 - pand 416(%r10),%xmm2 - por %xmm1,%xmm5 - pand 432(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%rdi),%xmm0 - movdqa 80(%rdi),%xmm1 - movdqa 96(%rdi),%xmm2 - pand 448(%r10),%xmm0 - movdqa 112(%rdi),%xmm3 - pand 464(%r10),%xmm1 - por %xmm0,%xmm4 - pand 480(%r10),%xmm2 - por %xmm1,%xmm5 - pand 496(%r10),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - leaq 256(%rdi),%rdi -.byte 102,72,15,126,194 - - movq %rbp,(%rbx) - leaq 32(%rbx,%rax,1),%rbx - mulxq 0(%rsi),%r8,%r11 - xorq %rbp,%rbp - movq %rdx,%r9 - mulxq 8(%rsi),%r14,%r12 - adoxq -32(%rbx),%r8 - adcxq %r14,%r11 - mulxq 16(%rsi),%r15,%r13 - adoxq -24(%rbx),%r11 - adcxq %r15,%r12 - mulxq 24(%rsi),%rdx,%r14 - adoxq -16(%rbx),%r12 - adcxq %rdx,%r13 - leaq (%rcx,%rax,1),%rcx - leaq 32(%rsi),%rsi - adoxq -8(%rbx),%r13 - adcxq %rbp,%r14 - adoxq %rbp,%r14 - - movq %r8,%r15 - imulq 32+8(%rsp),%r8 - - movq %r8,%rdx - xorq %rbp,%rbp - movq %rdi,8+8(%rsp) - - mulxq 0(%rcx),%rax,%r10 - adcxq %rax,%r15 - adoxq %r11,%r10 - mulxq 8(%rcx),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - mulxq 16(%rcx),%rax,%r12 - adcxq %rax,%r11 - adoxq %r13,%r12 - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - movq 24+8(%rsp),%rdi - movq %r10,-32(%rbx) - adcxq %rax,%r12 - movq %r11,-24(%rbx) - adoxq %rbp,%r15 - movq %r12,-16(%rbx) - leaq 32(%rcx),%rcx - jmp L$mulx4x_inner - -.p2align 5 -L$mulx4x_inner: - mulxq 0(%rsi),%r10,%rax - adcxq %rbp,%r15 - adoxq %r14,%r10 - mulxq 8(%rsi),%r11,%r14 - adcxq 0(%rbx),%r10 - adoxq %rax,%r11 - mulxq 16(%rsi),%r12,%rax - adcxq 8(%rbx),%r11 - adoxq %r14,%r12 - mulxq 24(%rsi),%r13,%r14 - movq %r8,%rdx - adcxq 16(%rbx),%r12 - adoxq %rax,%r13 - adcxq 24(%rbx),%r13 - adoxq %rbp,%r14 - leaq 32(%rsi),%rsi - leaq 32(%rbx),%rbx - adcxq %rbp,%r14 - - adoxq %r15,%r10 - mulxq 0(%rcx),%rax,%r15 - adcxq %rax,%r10 - adoxq %r15,%r11 - mulxq 8(%rcx),%rax,%r15 - adcxq %rax,%r11 - adoxq %r15,%r12 - mulxq 16(%rcx),%rax,%r15 - movq %r10,-40(%rbx) - adcxq %rax,%r12 - adoxq %r15,%r13 - movq %r11,-32(%rbx) - mulxq 24(%rcx),%rax,%r15 - movq %r9,%rdx - leaq 32(%rcx),%rcx - movq %r12,-24(%rbx) - adcxq %rax,%r13 - adoxq %rbp,%r15 - movq %r13,-16(%rbx) - - decq %rdi - jnz L$mulx4x_inner - - movq 0+8(%rsp),%rax - adcq %rbp,%r15 - subq 0(%rbx),%rdi - movq 8+8(%rsp),%rdi - movq 16+8(%rsp),%r10 - adcq %r15,%r14 - leaq (%rsi,%rax,1),%rsi - adcq %rbp,%rbp - movq %r14,-8(%rbx) - - cmpq %r10,%rdi - jb L$mulx4x_outer - - movq -8(%rcx),%r10 - movq %rbp,%r8 - movq (%rcx,%rax,1),%r12 - leaq (%rcx,%rax,1),%rbp - movq %rax,%rcx - leaq (%rbx,%rax,1),%rdi - xorl %eax,%eax - xorq %r15,%r15 - subq %r14,%r10 - adcq %r15,%r15 - orq %r15,%r8 - sarq $3+2,%rcx - subq %r8,%rax - movq 56+8(%rsp),%rdx - decq %r12 - movq 8(%rbp),%r13 - xorq %r8,%r8 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp L$sqrx4x_sub_entry - - - -.p2align 5 -bn_powerx5: - - movq %rsp,%rax - -L$powerx5_enter: - pushq %rbx - - pushq %rbp - - pushq %r12 - - pushq %r13 - - pushq %r14 - - pushq %r15 - -L$powerx5_prologue: - - shll $3,%r9d - leaq (%r9,%r9,2),%r10 - negq %r9 - movq (%r8),%r8 - - - - - - - - - leaq -320(%rsp,%r9,2),%r11 - movq %rsp,%rbp - subq %rdi,%r11 - andq $4095,%r11 - cmpq %r11,%r10 - jb L$pwrx_sp_alt - subq %r11,%rbp - leaq -320(%rbp,%r9,2),%rbp - jmp L$pwrx_sp_done - -.p2align 5 -L$pwrx_sp_alt: - leaq 4096-320(,%r9,2),%r10 - leaq -320(%rbp,%r9,2),%rbp - subq %r10,%r11 - movq $0,%r10 - cmovcq %r10,%r11 - subq %r11,%rbp -L$pwrx_sp_done: - andq $-64,%rbp - movq %rsp,%r11 - subq %rbp,%r11 - andq $-4096,%r11 - leaq (%r11,%rbp,1),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$pwrx_page_walk - jmp L$pwrx_page_walk_done - -L$pwrx_page_walk: - leaq -4096(%rsp),%rsp - movq (%rsp),%r10 - cmpq %rbp,%rsp - ja L$pwrx_page_walk -L$pwrx_page_walk_done: - - movq %r9,%r10 - negq %r9 - - - - - - - - - - - - - pxor %xmm0,%xmm0 -.byte 102,72,15,110,207 -.byte 102,72,15,110,209 -.byte 102,73,15,110,218 -.byte 102,72,15,110,226 - movq %r8,32(%rsp) - movq %rax,40(%rsp) - -L$powerx5_body: - - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - - movq %r10,%r9 - movq %rsi,%rdi -.byte 102,72,15,126,209 -.byte 102,72,15,126,226 - movq 40(%rsp),%rax - - call mulx4x_internal - - movq 40(%rsp),%rsi - - movq $1,%rax - - movq -48(%rsi),%r15 - - movq -40(%rsi),%r14 - - movq -32(%rsi),%r13 - - movq -24(%rsi),%r12 - - movq -16(%rsi),%rbp - - movq -8(%rsi),%rbx - - leaq (%rsi),%rsp - -L$powerx5_epilogue: - .byte 0xf3,0xc3 - - - -.globl _bn_sqrx8x_internal -.private_extern _bn_sqrx8x_internal -.private_extern _bn_sqrx8x_internal - -.p2align 5 -_bn_sqrx8x_internal: -__bn_sqrx8x_internal: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - leaq 48+8(%rsp),%rdi - leaq (%rsi,%r9,1),%rbp - movq %r9,0+8(%rsp) - movq %rbp,8+8(%rsp) - jmp L$sqr8x_zero_start - -.p2align 5 -.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 -L$sqrx8x_zero: -.byte 0x3e - movdqa %xmm0,0(%rdi) - movdqa %xmm0,16(%rdi) - movdqa %xmm0,32(%rdi) - movdqa %xmm0,48(%rdi) -L$sqr8x_zero_start: - movdqa %xmm0,64(%rdi) - movdqa %xmm0,80(%rdi) - movdqa %xmm0,96(%rdi) - movdqa %xmm0,112(%rdi) - leaq 128(%rdi),%rdi - subq $64,%r9 - jnz L$sqrx8x_zero - - movq 0(%rsi),%rdx - - xorq %r10,%r10 - xorq %r11,%r11 - xorq %r12,%r12 - xorq %r13,%r13 - xorq %r14,%r14 - xorq %r15,%r15 - leaq 48+8(%rsp),%rdi - xorq %rbp,%rbp - jmp L$sqrx8x_outer_loop - -.p2align 5 -L$sqrx8x_outer_loop: - mulxq 8(%rsi),%r8,%rax - adcxq %r9,%r8 - adoxq %rax,%r10 - mulxq 16(%rsi),%r9,%rax - adcxq %r10,%r9 - adoxq %rax,%r11 -.byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 - adcxq %r11,%r10 - adoxq %rax,%r12 -.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 - adcxq %r12,%r11 - adoxq %rax,%r13 - mulxq 40(%rsi),%r12,%rax - adcxq %r13,%r12 - adoxq %rax,%r14 - mulxq 48(%rsi),%r13,%rax - adcxq %r14,%r13 - adoxq %r15,%rax - mulxq 56(%rsi),%r14,%r15 - movq 8(%rsi),%rdx - adcxq %rax,%r14 - adoxq %rbp,%r15 - adcq 64(%rdi),%r15 - movq %r8,8(%rdi) - movq %r9,16(%rdi) - sbbq %rcx,%rcx - xorq %rbp,%rbp - - - mulxq 16(%rsi),%r8,%rbx - mulxq 24(%rsi),%r9,%rax - adcxq %r10,%r8 - adoxq %rbx,%r9 - mulxq 32(%rsi),%r10,%rbx - adcxq %r11,%r9 - adoxq %rax,%r10 -.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 - adcxq %r12,%r10 - adoxq %rbx,%r11 -.byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 - adcxq %r13,%r11 - adoxq %r14,%r12 -.byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 - movq 16(%rsi),%rdx - adcxq %rax,%r12 - adoxq %rbx,%r13 - adcxq %r15,%r13 - adoxq %rbp,%r14 - adcxq %rbp,%r14 - - movq %r8,24(%rdi) - movq %r9,32(%rdi) - - mulxq 24(%rsi),%r8,%rbx - mulxq 32(%rsi),%r9,%rax - adcxq %r10,%r8 - adoxq %rbx,%r9 - mulxq 40(%rsi),%r10,%rbx - adcxq %r11,%r9 - adoxq %rax,%r10 -.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 - adcxq %r12,%r10 - adoxq %r13,%r11 -.byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 -.byte 0x3e - movq 24(%rsi),%rdx - adcxq %rbx,%r11 - adoxq %rax,%r12 - adcxq %r14,%r12 - movq %r8,40(%rdi) - movq %r9,48(%rdi) - mulxq 32(%rsi),%r8,%rax - adoxq %rbp,%r13 - adcxq %rbp,%r13 - - mulxq 40(%rsi),%r9,%rbx - adcxq %r10,%r8 - adoxq %rax,%r9 - mulxq 48(%rsi),%r10,%rax - adcxq %r11,%r9 - adoxq %r12,%r10 - mulxq 56(%rsi),%r11,%r12 - movq 32(%rsi),%rdx - movq 40(%rsi),%r14 - adcxq %rbx,%r10 - adoxq %rax,%r11 - movq 48(%rsi),%r15 - adcxq %r13,%r11 - adoxq %rbp,%r12 - adcxq %rbp,%r12 - - movq %r8,56(%rdi) - movq %r9,64(%rdi) - - mulxq %r14,%r9,%rax - movq 56(%rsi),%r8 - adcxq %r10,%r9 - mulxq %r15,%r10,%rbx - adoxq %rax,%r10 - adcxq %r11,%r10 - mulxq %r8,%r11,%rax - movq %r14,%rdx - adoxq %rbx,%r11 - adcxq %r12,%r11 - - adcxq %rbp,%rax - - mulxq %r15,%r14,%rbx - mulxq %r8,%r12,%r13 - movq %r15,%rdx - leaq 64(%rsi),%rsi - adcxq %r14,%r11 - adoxq %rbx,%r12 - adcxq %rax,%r12 - adoxq %rbp,%r13 - -.byte 0x67,0x67 - mulxq %r8,%r8,%r14 - adcxq %r8,%r13 - adcxq %rbp,%r14 - - cmpq 8+8(%rsp),%rsi - je L$sqrx8x_outer_break - - negq %rcx - movq $-8,%rcx - movq %rbp,%r15 - movq 64(%rdi),%r8 - adcxq 72(%rdi),%r9 - adcxq 80(%rdi),%r10 - adcxq 88(%rdi),%r11 - adcq 96(%rdi),%r12 - adcq 104(%rdi),%r13 - adcq 112(%rdi),%r14 - adcq 120(%rdi),%r15 - leaq (%rsi),%rbp - leaq 128(%rdi),%rdi - sbbq %rax,%rax - - movq -64(%rsi),%rdx - movq %rax,16+8(%rsp) - movq %rdi,24+8(%rsp) - - - xorl %eax,%eax - jmp L$sqrx8x_loop - -.p2align 5 -L$sqrx8x_loop: - movq %r8,%rbx - mulxq 0(%rbp),%rax,%r8 - adcxq %rax,%rbx - adoxq %r9,%r8 - - mulxq 8(%rbp),%rax,%r9 - adcxq %rax,%r8 - adoxq %r10,%r9 - - mulxq 16(%rbp),%rax,%r10 - adcxq %rax,%r9 - adoxq %r11,%r10 - - mulxq 24(%rbp),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - -.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 - adcxq %rax,%r11 - adoxq %r13,%r12 - - mulxq 40(%rbp),%rax,%r13 - adcxq %rax,%r12 - adoxq %r14,%r13 - - mulxq 48(%rbp),%rax,%r14 - movq %rbx,(%rdi,%rcx,8) - movl $0,%ebx - adcxq %rax,%r13 - adoxq %r15,%r14 - -.byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 - movq 8(%rsi,%rcx,8),%rdx - adcxq %rax,%r14 - adoxq %rbx,%r15 - adcxq %rbx,%r15 - -.byte 0x67 - incq %rcx - jnz L$sqrx8x_loop - - leaq 64(%rbp),%rbp - movq $-8,%rcx - cmpq 8+8(%rsp),%rbp - je L$sqrx8x_break - - subq 16+8(%rsp),%rbx -.byte 0x66 - movq -64(%rsi),%rdx - adcxq 0(%rdi),%r8 - adcxq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - leaq 64(%rdi),%rdi -.byte 0x67 - sbbq %rax,%rax - xorl %ebx,%ebx - movq %rax,16+8(%rsp) - jmp L$sqrx8x_loop - -.p2align 5 -L$sqrx8x_break: - xorq %rbp,%rbp - subq 16+8(%rsp),%rbx - adcxq %rbp,%r8 - movq 24+8(%rsp),%rcx - adcxq %rbp,%r9 - movq 0(%rsi),%rdx - adcq $0,%r10 - movq %r8,0(%rdi) - adcq $0,%r11 - adcq $0,%r12 - adcq $0,%r13 - adcq $0,%r14 - adcq $0,%r15 - cmpq %rcx,%rdi - je L$sqrx8x_outer_loop - - movq %r9,8(%rdi) - movq 8(%rcx),%r9 - movq %r10,16(%rdi) - movq 16(%rcx),%r10 - movq %r11,24(%rdi) - movq 24(%rcx),%r11 - movq %r12,32(%rdi) - movq 32(%rcx),%r12 - movq %r13,40(%rdi) - movq 40(%rcx),%r13 - movq %r14,48(%rdi) - movq 48(%rcx),%r14 - movq %r15,56(%rdi) - movq 56(%rcx),%r15 - movq %rcx,%rdi - jmp L$sqrx8x_outer_loop - -.p2align 5 -L$sqrx8x_outer_break: - movq %r9,72(%rdi) -.byte 102,72,15,126,217 - movq %r10,80(%rdi) - movq %r11,88(%rdi) - movq %r12,96(%rdi) - movq %r13,104(%rdi) - movq %r14,112(%rdi) - leaq 48+8(%rsp),%rdi - movq (%rsi,%rcx,1),%rdx - - movq 8(%rdi),%r11 - xorq %r10,%r10 - movq 0+8(%rsp),%r9 - adoxq %r11,%r11 - movq 16(%rdi),%r12 - movq 24(%rdi),%r13 - - -.p2align 5 -L$sqrx4x_shift_n_add: - mulxq %rdx,%rax,%rbx - adoxq %r12,%r12 - adcxq %r10,%rax -.byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 -.byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 - adoxq %r13,%r13 - adcxq %r11,%rbx - movq 40(%rdi),%r11 - movq %rax,0(%rdi) - movq %rbx,8(%rdi) - - mulxq %rdx,%rax,%rbx - adoxq %r10,%r10 - adcxq %r12,%rax - movq 16(%rsi,%rcx,1),%rdx - movq 48(%rdi),%r12 - adoxq %r11,%r11 - adcxq %r13,%rbx - movq 56(%rdi),%r13 - movq %rax,16(%rdi) - movq %rbx,24(%rdi) - - mulxq %rdx,%rax,%rbx - adoxq %r12,%r12 - adcxq %r10,%rax - movq 24(%rsi,%rcx,1),%rdx - leaq 32(%rcx),%rcx - movq 64(%rdi),%r10 - adoxq %r13,%r13 - adcxq %r11,%rbx - movq 72(%rdi),%r11 - movq %rax,32(%rdi) - movq %rbx,40(%rdi) - - mulxq %rdx,%rax,%rbx - adoxq %r10,%r10 - adcxq %r12,%rax - jrcxz L$sqrx4x_shift_n_add_break -.byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 - adoxq %r11,%r11 - adcxq %r13,%rbx - movq 80(%rdi),%r12 - movq 88(%rdi),%r13 - movq %rax,48(%rdi) - movq %rbx,56(%rdi) - leaq 64(%rdi),%rdi - nop - jmp L$sqrx4x_shift_n_add - -.p2align 5 -L$sqrx4x_shift_n_add_break: - adcxq %r13,%rbx - movq %rax,48(%rdi) - movq %rbx,56(%rdi) - leaq 64(%rdi),%rdi -.byte 102,72,15,126,213 -__bn_sqrx8x_reduction: - xorl %eax,%eax - movq 32+8(%rsp),%rbx - movq 48+8(%rsp),%rdx - leaq -64(%rbp,%r9,1),%rcx - - movq %rcx,0+8(%rsp) - movq %rdi,8+8(%rsp) - - leaq 48+8(%rsp),%rdi - jmp L$sqrx8x_reduction_loop - -.p2align 5 -L$sqrx8x_reduction_loop: - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%r12 - movq %rdx,%r8 - imulq %rbx,%rdx - movq 40(%rdi),%r13 - movq 48(%rdi),%r14 - movq 56(%rdi),%r15 - movq %rax,24+8(%rsp) - - leaq 64(%rdi),%rdi - xorq %rsi,%rsi - movq $-8,%rcx - jmp L$sqrx8x_reduce - -.p2align 5 -L$sqrx8x_reduce: - movq %r8,%rbx - mulxq 0(%rbp),%rax,%r8 - adcxq %rbx,%rax - adoxq %r9,%r8 - - mulxq 8(%rbp),%rbx,%r9 - adcxq %rbx,%r8 - adoxq %r10,%r9 - - mulxq 16(%rbp),%rbx,%r10 - adcxq %rbx,%r9 - adoxq %r11,%r10 - - mulxq 24(%rbp),%rbx,%r11 - adcxq %rbx,%r10 - adoxq %r12,%r11 - -.byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 - movq %rdx,%rax - movq %r8,%rdx - adcxq %rbx,%r11 - adoxq %r13,%r12 - - mulxq 32+8(%rsp),%rbx,%rdx - movq %rax,%rdx - movq %rax,64+48+8(%rsp,%rcx,8) - - mulxq 40(%rbp),%rax,%r13 - adcxq %rax,%r12 - adoxq %r14,%r13 - - mulxq 48(%rbp),%rax,%r14 - adcxq %rax,%r13 - adoxq %r15,%r14 - - mulxq 56(%rbp),%rax,%r15 - movq %rbx,%rdx - adcxq %rax,%r14 - adoxq %rsi,%r15 - adcxq %rsi,%r15 - -.byte 0x67,0x67,0x67 - incq %rcx - jnz L$sqrx8x_reduce - - movq %rsi,%rax - cmpq 0+8(%rsp),%rbp - jae L$sqrx8x_no_tail - - movq 48+8(%rsp),%rdx - addq 0(%rdi),%r8 - leaq 64(%rbp),%rbp - movq $-8,%rcx - adcxq 8(%rdi),%r9 - adcxq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - leaq 64(%rdi),%rdi - sbbq %rax,%rax - - xorq %rsi,%rsi - movq %rax,16+8(%rsp) - jmp L$sqrx8x_tail - -.p2align 5 -L$sqrx8x_tail: - movq %r8,%rbx - mulxq 0(%rbp),%rax,%r8 - adcxq %rax,%rbx - adoxq %r9,%r8 - - mulxq 8(%rbp),%rax,%r9 - adcxq %rax,%r8 - adoxq %r10,%r9 - - mulxq 16(%rbp),%rax,%r10 - adcxq %rax,%r9 - adoxq %r11,%r10 - - mulxq 24(%rbp),%rax,%r11 - adcxq %rax,%r10 - adoxq %r12,%r11 - -.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 - adcxq %rax,%r11 - adoxq %r13,%r12 - - mulxq 40(%rbp),%rax,%r13 - adcxq %rax,%r12 - adoxq %r14,%r13 - - mulxq 48(%rbp),%rax,%r14 - adcxq %rax,%r13 - adoxq %r15,%r14 - - mulxq 56(%rbp),%rax,%r15 - movq 72+48+8(%rsp,%rcx,8),%rdx - adcxq %rax,%r14 - adoxq %rsi,%r15 - movq %rbx,(%rdi,%rcx,8) - movq %r8,%rbx - adcxq %rsi,%r15 - - incq %rcx - jnz L$sqrx8x_tail - - cmpq 0+8(%rsp),%rbp - jae L$sqrx8x_tail_done - - subq 16+8(%rsp),%rsi - movq 48+8(%rsp),%rdx - leaq 64(%rbp),%rbp - adcq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - leaq 64(%rdi),%rdi - sbbq %rax,%rax - subq $8,%rcx - - xorq %rsi,%rsi - movq %rax,16+8(%rsp) - jmp L$sqrx8x_tail - -.p2align 5 -L$sqrx8x_tail_done: - xorq %rax,%rax - addq 24+8(%rsp),%r8 - adcq $0,%r9 - adcq $0,%r10 - adcq $0,%r11 - adcq $0,%r12 - adcq $0,%r13 - adcq $0,%r14 - adcq $0,%r15 - adcq $0,%rax - - subq 16+8(%rsp),%rsi -L$sqrx8x_no_tail: - adcq 0(%rdi),%r8 -.byte 102,72,15,126,217 - adcq 8(%rdi),%r9 - movq 56(%rbp),%rsi -.byte 102,72,15,126,213 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - adcq 32(%rdi),%r12 - adcq 40(%rdi),%r13 - adcq 48(%rdi),%r14 - adcq 56(%rdi),%r15 - adcq $0,%rax - - movq 32+8(%rsp),%rbx - movq 64(%rdi,%rcx,1),%rdx - - movq %r8,0(%rdi) - leaq 64(%rdi),%r8 - movq %r9,8(%rdi) - movq %r10,16(%rdi) - movq %r11,24(%rdi) - movq %r12,32(%rdi) - movq %r13,40(%rdi) - movq %r14,48(%rdi) - movq %r15,56(%rdi) - - leaq 64(%rdi,%rcx,1),%rdi - cmpq 8+8(%rsp),%r8 - jb L$sqrx8x_reduction_loop - .byte 0xf3,0xc3 - - -.p2align 5 - -__bn_postx4x_internal: - - movq 0(%rbp),%r12 - movq %rcx,%r10 - movq %rcx,%r9 - negq %rax - sarq $3+2,%rcx - -.byte 102,72,15,126,202 -.byte 102,72,15,126,206 - decq %r12 - movq 8(%rbp),%r13 - xorq %r8,%r8 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 - jmp L$sqrx4x_sub_entry - -.p2align 4 -L$sqrx4x_sub: - movq 0(%rbp),%r12 - movq 8(%rbp),%r13 - movq 16(%rbp),%r14 - movq 24(%rbp),%r15 -L$sqrx4x_sub_entry: - andnq %rax,%r12,%r12 - leaq 32(%rbp),%rbp - andnq %rax,%r13,%r13 - andnq %rax,%r14,%r14 - andnq %rax,%r15,%r15 - - negq %r8 - adcq 0(%rdi),%r12 - adcq 8(%rdi),%r13 - adcq 16(%rdi),%r14 - adcq 24(%rdi),%r15 - movq %r12,0(%rdx) - leaq 32(%rdi),%rdi - movq %r13,8(%rdx) - sbbq %r8,%r8 - movq %r14,16(%rdx) - movq %r15,24(%rdx) - leaq 32(%rdx),%rdx - - incq %rcx - jnz L$sqrx4x_sub - - negq %r9 - - .byte 0xf3,0xc3 - - -.globl _bn_scatter5 -.private_extern _bn_scatter5 - -.p2align 4 -_bn_scatter5: - - cmpl $0,%esi - jz L$scatter_epilogue - leaq (%rdx,%rcx,8),%rdx -L$scatter: - movq (%rdi),%rax - leaq 8(%rdi),%rdi - movq %rax,(%rdx) - leaq 256(%rdx),%rdx - subl $1,%esi - jnz L$scatter -L$scatter_epilogue: - .byte 0xf3,0xc3 - - - -.globl _bn_gather5 -.private_extern _bn_gather5 - -.p2align 5 -_bn_gather5: - -L$SEH_begin_bn_gather5: - -.byte 0x4c,0x8d,0x14,0x24 - -.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 - leaq L$inc(%rip),%rax - andq $-16,%rsp - - movd %ecx,%xmm5 - movdqa 0(%rax),%xmm0 - movdqa 16(%rax),%xmm1 - leaq 128(%rdx),%r11 - leaq 128(%rsp),%rax - - pshufd $0,%xmm5,%xmm5 - movdqa %xmm1,%xmm4 - movdqa %xmm1,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,-128(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,-112(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,-96(%rax) - movdqa %xmm4,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,-80(%rax) - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,-64(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,-48(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,-32(%rax) - movdqa %xmm4,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,-16(%rax) - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,0(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,16(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,32(%rax) - movdqa %xmm4,%xmm2 - paddd %xmm0,%xmm1 - pcmpeqd %xmm5,%xmm0 - movdqa %xmm3,48(%rax) - movdqa %xmm4,%xmm3 - - paddd %xmm1,%xmm2 - pcmpeqd %xmm5,%xmm1 - movdqa %xmm0,64(%rax) - movdqa %xmm4,%xmm0 - - paddd %xmm2,%xmm3 - pcmpeqd %xmm5,%xmm2 - movdqa %xmm1,80(%rax) - movdqa %xmm4,%xmm1 - - paddd %xmm3,%xmm0 - pcmpeqd %xmm5,%xmm3 - movdqa %xmm2,96(%rax) - movdqa %xmm4,%xmm2 - movdqa %xmm3,112(%rax) - jmp L$gather - -.p2align 5 -L$gather: - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - movdqa -128(%r11),%xmm0 - movdqa -112(%r11),%xmm1 - movdqa -96(%r11),%xmm2 - pand -128(%rax),%xmm0 - movdqa -80(%r11),%xmm3 - pand -112(%rax),%xmm1 - por %xmm0,%xmm4 - pand -96(%rax),%xmm2 - por %xmm1,%xmm5 - pand -80(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa -64(%r11),%xmm0 - movdqa -48(%r11),%xmm1 - movdqa -32(%r11),%xmm2 - pand -64(%rax),%xmm0 - movdqa -16(%r11),%xmm3 - pand -48(%rax),%xmm1 - por %xmm0,%xmm4 - pand -32(%rax),%xmm2 - por %xmm1,%xmm5 - pand -16(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 0(%r11),%xmm0 - movdqa 16(%r11),%xmm1 - movdqa 32(%r11),%xmm2 - pand 0(%rax),%xmm0 - movdqa 48(%r11),%xmm3 - pand 16(%rax),%xmm1 - por %xmm0,%xmm4 - pand 32(%rax),%xmm2 - por %xmm1,%xmm5 - pand 48(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - movdqa 64(%r11),%xmm0 - movdqa 80(%r11),%xmm1 - movdqa 96(%r11),%xmm2 - pand 64(%rax),%xmm0 - movdqa 112(%r11),%xmm3 - pand 80(%rax),%xmm1 - por %xmm0,%xmm4 - pand 96(%rax),%xmm2 - por %xmm1,%xmm5 - pand 112(%rax),%xmm3 - por %xmm2,%xmm4 - por %xmm3,%xmm5 - por %xmm5,%xmm4 - leaq 256(%r11),%r11 - pshufd $0x4e,%xmm4,%xmm0 - por %xmm4,%xmm0 - movq %xmm0,(%rdi) - leaq 8(%rdi),%rdi - subl $1,%esi - jnz L$gather - - leaq (%r10),%rsp - - .byte 0xf3,0xc3 -L$SEH_end_bn_gather5: - - -.p2align 6 -L$inc: -.long 0,0, 1,1 -.long 2,2, 2,2 -.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/test/trampoline-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/test/trampoline-x86_64.S deleted file mode 100644 index 863e6b0452..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/test/trampoline-x86_64.S +++ /dev/null @@ -1,513 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - - - - - - - - -.globl _abi_test_trampoline -.private_extern _abi_test_trampoline -.p2align 4 -_abi_test_trampoline: -L$abi_test_trampoline_seh_begin: - - - - - - - - - - - subq $120,%rsp - -L$abi_test_trampoline_seh_prolog_alloc: - movq %r8,48(%rsp) - movq %rbx,64(%rsp) - -L$abi_test_trampoline_seh_prolog_rbx: - movq %rbp,72(%rsp) - -L$abi_test_trampoline_seh_prolog_rbp: - movq %r12,80(%rsp) - -L$abi_test_trampoline_seh_prolog_r12: - movq %r13,88(%rsp) - -L$abi_test_trampoline_seh_prolog_r13: - movq %r14,96(%rsp) - -L$abi_test_trampoline_seh_prolog_r14: - movq %r15,104(%rsp) - -L$abi_test_trampoline_seh_prolog_r15: -L$abi_test_trampoline_seh_prolog_end: - movq 0(%rsi),%rbx - movq 8(%rsi),%rbp - movq 16(%rsi),%r12 - movq 24(%rsi),%r13 - movq 32(%rsi),%r14 - movq 40(%rsi),%r15 - - movq %rdi,32(%rsp) - movq %rsi,40(%rsp) - - - - - movq %rdx,%r10 - movq %rcx,%r11 - decq %r11 - js L$args_done - movq (%r10),%rdi - addq $8,%r10 - decq %r11 - js L$args_done - movq (%r10),%rsi - addq $8,%r10 - decq %r11 - js L$args_done - movq (%r10),%rdx - addq $8,%r10 - decq %r11 - js L$args_done - movq (%r10),%rcx - addq $8,%r10 - decq %r11 - js L$args_done - movq (%r10),%r8 - addq $8,%r10 - decq %r11 - js L$args_done - movq (%r10),%r9 - addq $8,%r10 - leaq 0(%rsp),%rax -L$args_loop: - decq %r11 - js L$args_done - - - - - - - movq %r11,56(%rsp) - movq (%r10),%r11 - movq %r11,(%rax) - movq 56(%rsp),%r11 - - addq $8,%r10 - addq $8,%rax - jmp L$args_loop - -L$args_done: - movq 32(%rsp),%rax - movq 48(%rsp),%r10 - testq %r10,%r10 - jz L$no_unwind - - - pushfq - orq $0x100,0(%rsp) - popfq - - - - nop -.globl _abi_test_unwind_start -.private_extern _abi_test_unwind_start -_abi_test_unwind_start: - - call *%rax -.globl _abi_test_unwind_return -.private_extern _abi_test_unwind_return -_abi_test_unwind_return: - - - - - pushfq - andq $-0x101,0(%rsp) - popfq -.globl _abi_test_unwind_stop -.private_extern _abi_test_unwind_stop -_abi_test_unwind_stop: - - jmp L$call_done - -L$no_unwind: - call *%rax - -L$call_done: - - movq 40(%rsp),%rsi - movq %rbx,0(%rsi) - movq %rbp,8(%rsi) - movq %r12,16(%rsi) - movq %r13,24(%rsi) - movq %r14,32(%rsi) - movq %r15,40(%rsi) - movq 64(%rsp),%rbx - - movq 72(%rsp),%rbp - - movq 80(%rsp),%r12 - - movq 88(%rsp),%r13 - - movq 96(%rsp),%r14 - - movq 104(%rsp),%r15 - - addq $120,%rsp - - - - .byte 0xf3,0xc3 - -L$abi_test_trampoline_seh_end: - - -.globl _abi_test_clobber_rax -.private_extern _abi_test_clobber_rax -.p2align 4 -_abi_test_clobber_rax: - xorq %rax,%rax - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_rbx -.private_extern _abi_test_clobber_rbx -.p2align 4 -_abi_test_clobber_rbx: - xorq %rbx,%rbx - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_rcx -.private_extern _abi_test_clobber_rcx -.p2align 4 -_abi_test_clobber_rcx: - xorq %rcx,%rcx - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_rdx -.private_extern _abi_test_clobber_rdx -.p2align 4 -_abi_test_clobber_rdx: - xorq %rdx,%rdx - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_rdi -.private_extern _abi_test_clobber_rdi -.p2align 4 -_abi_test_clobber_rdi: - xorq %rdi,%rdi - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_rsi -.private_extern _abi_test_clobber_rsi -.p2align 4 -_abi_test_clobber_rsi: - xorq %rsi,%rsi - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_rbp -.private_extern _abi_test_clobber_rbp -.p2align 4 -_abi_test_clobber_rbp: - xorq %rbp,%rbp - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r8 -.private_extern _abi_test_clobber_r8 -.p2align 4 -_abi_test_clobber_r8: - xorq %r8,%r8 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r9 -.private_extern _abi_test_clobber_r9 -.p2align 4 -_abi_test_clobber_r9: - xorq %r9,%r9 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r10 -.private_extern _abi_test_clobber_r10 -.p2align 4 -_abi_test_clobber_r10: - xorq %r10,%r10 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r11 -.private_extern _abi_test_clobber_r11 -.p2align 4 -_abi_test_clobber_r11: - xorq %r11,%r11 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r12 -.private_extern _abi_test_clobber_r12 -.p2align 4 -_abi_test_clobber_r12: - xorq %r12,%r12 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r13 -.private_extern _abi_test_clobber_r13 -.p2align 4 -_abi_test_clobber_r13: - xorq %r13,%r13 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r14 -.private_extern _abi_test_clobber_r14 -.p2align 4 -_abi_test_clobber_r14: - xorq %r14,%r14 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_r15 -.private_extern _abi_test_clobber_r15 -.p2align 4 -_abi_test_clobber_r15: - xorq %r15,%r15 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm0 -.private_extern _abi_test_clobber_xmm0 -.p2align 4 -_abi_test_clobber_xmm0: - pxor %xmm0,%xmm0 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm1 -.private_extern _abi_test_clobber_xmm1 -.p2align 4 -_abi_test_clobber_xmm1: - pxor %xmm1,%xmm1 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm2 -.private_extern _abi_test_clobber_xmm2 -.p2align 4 -_abi_test_clobber_xmm2: - pxor %xmm2,%xmm2 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm3 -.private_extern _abi_test_clobber_xmm3 -.p2align 4 -_abi_test_clobber_xmm3: - pxor %xmm3,%xmm3 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm4 -.private_extern _abi_test_clobber_xmm4 -.p2align 4 -_abi_test_clobber_xmm4: - pxor %xmm4,%xmm4 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm5 -.private_extern _abi_test_clobber_xmm5 -.p2align 4 -_abi_test_clobber_xmm5: - pxor %xmm5,%xmm5 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm6 -.private_extern _abi_test_clobber_xmm6 -.p2align 4 -_abi_test_clobber_xmm6: - pxor %xmm6,%xmm6 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm7 -.private_extern _abi_test_clobber_xmm7 -.p2align 4 -_abi_test_clobber_xmm7: - pxor %xmm7,%xmm7 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm8 -.private_extern _abi_test_clobber_xmm8 -.p2align 4 -_abi_test_clobber_xmm8: - pxor %xmm8,%xmm8 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm9 -.private_extern _abi_test_clobber_xmm9 -.p2align 4 -_abi_test_clobber_xmm9: - pxor %xmm9,%xmm9 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm10 -.private_extern _abi_test_clobber_xmm10 -.p2align 4 -_abi_test_clobber_xmm10: - pxor %xmm10,%xmm10 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm11 -.private_extern _abi_test_clobber_xmm11 -.p2align 4 -_abi_test_clobber_xmm11: - pxor %xmm11,%xmm11 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm12 -.private_extern _abi_test_clobber_xmm12 -.p2align 4 -_abi_test_clobber_xmm12: - pxor %xmm12,%xmm12 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm13 -.private_extern _abi_test_clobber_xmm13 -.p2align 4 -_abi_test_clobber_xmm13: - pxor %xmm13,%xmm13 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm14 -.private_extern _abi_test_clobber_xmm14 -.p2align 4 -_abi_test_clobber_xmm14: - pxor %xmm14,%xmm14 - .byte 0xf3,0xc3 - - -.globl _abi_test_clobber_xmm15 -.private_extern _abi_test_clobber_xmm15 -.p2align 4 -_abi_test_clobber_xmm15: - pxor %xmm15,%xmm15 - .byte 0xf3,0xc3 - - - - - -.globl _abi_test_bad_unwind_wrong_register -.private_extern _abi_test_bad_unwind_wrong_register -.p2align 4 -_abi_test_bad_unwind_wrong_register: - -L$abi_test_bad_unwind_wrong_register_seh_begin: - pushq %r12 - -L$abi_test_bad_unwind_wrong_register_seh_push_r13: - - - - nop - popq %r12 - - .byte 0xf3,0xc3 -L$abi_test_bad_unwind_wrong_register_seh_end: - - - - - - - -.globl _abi_test_bad_unwind_temporary -.private_extern _abi_test_bad_unwind_temporary -.p2align 4 -_abi_test_bad_unwind_temporary: - -L$abi_test_bad_unwind_temporary_seh_begin: - pushq %r12 - -L$abi_test_bad_unwind_temporary_seh_push_r12: - - movq %r12,%rax - incq %rax - movq %rax,(%rsp) - - - - movq %r12,(%rsp) - - - popq %r12 - - .byte 0xf3,0xc3 -L$abi_test_bad_unwind_temporary_seh_end: - - - - - - - -.globl _abi_test_get_and_clear_direction_flag -.private_extern _abi_test_get_and_clear_direction_flag -_abi_test_get_and_clear_direction_flag: - pushfq - popq %rax - andq $0x400,%rax - shrq $10,%rax - cld - .byte 0xf3,0xc3 - - - - - -.globl _abi_test_set_direction_flag -.private_extern _abi_test_set_direction_flag -_abi_test_set_direction_flag: - std - .byte 0xf3,0xc3 - -#endif diff --git a/packager/third_party/boringssl/mac-x86_64/crypto/third_party/sike/asm/fp-x86_64.S b/packager/third_party/boringssl/mac-x86_64/crypto/third_party/sike/asm/fp-x86_64.S deleted file mode 100644 index f1e7ea4f63..0000000000 --- a/packager/third_party/boringssl/mac-x86_64/crypto/third_party/sike/asm/fp-x86_64.S +++ /dev/null @@ -1,1869 +0,0 @@ -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) -#define OPENSSL_NO_ASM -#endif -#endif - -#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM) -#if defined(BORINGSSL_PREFIX) -#include -#endif -.text - - -L$p434x2: -.quad 0xFFFFFFFFFFFFFFFE -.quad 0xFFFFFFFFFFFFFFFF -.quad 0xFB82ECF5C5FFFFFF -.quad 0xF78CB8F062B15D47 -.quad 0xD9F8BFAD038A40AC -.quad 0x0004683E4E2EE688 - - -L$p434p1: -.quad 0xFDC1767AE3000000 -.quad 0x7BC65C783158AEA3 -.quad 0x6CFC5FD681C52056 -.quad 0x0002341F27177344 - - -.private_extern _OPENSSL_ia32cap_P -.globl _sike_fpadd -.private_extern _sike_fpadd - -_sike_fpadd: - - pushq %r12 - - - pushq %r13 - - - pushq %r14 - - - - xorq %rax,%rax - - movq 0(%rdi),%r8 - addq 0(%rsi),%r8 - movq 8(%rdi),%r9 - adcq 8(%rsi),%r9 - movq 16(%rdi),%r10 - adcq 16(%rsi),%r10 - movq 24(%rdi),%r11 - adcq 24(%rsi),%r11 - movq 32(%rdi),%r12 - adcq 32(%rsi),%r12 - movq 40(%rdi),%r13 - adcq 40(%rsi),%r13 - movq 48(%rdi),%r14 - adcq 48(%rsi),%r14 - - movq L$p434x2(%rip),%rcx - subq %rcx,%r8 - movq 8+L$p434x2(%rip),%rcx - sbbq %rcx,%r9 - sbbq %rcx,%r10 - movq 16+L$p434x2(%rip),%rcx - sbbq %rcx,%r11 - movq 24+L$p434x2(%rip),%rcx - sbbq %rcx,%r12 - movq 32+L$p434x2(%rip),%rcx - sbbq %rcx,%r13 - movq 40+L$p434x2(%rip),%rcx - sbbq %rcx,%r14 - - sbbq $0,%rax - - movq L$p434x2(%rip),%rdi - andq %rax,%rdi - movq 8+L$p434x2(%rip),%rsi - andq %rax,%rsi - movq 16+L$p434x2(%rip),%rcx - andq %rax,%rcx - - addq %rdi,%r8 - movq %r8,0(%rdx) - adcq %rsi,%r9 - movq %r9,8(%rdx) - adcq %rsi,%r10 - movq %r10,16(%rdx) - adcq %rcx,%r11 - movq %r11,24(%rdx) - - setc %cl - movq 24+L$p434x2(%rip),%r8 - andq %rax,%r8 - movq 32+L$p434x2(%rip),%r9 - andq %rax,%r9 - movq 40+L$p434x2(%rip),%r10 - andq %rax,%r10 - btq $0,%rcx - - adcq %r8,%r12 - movq %r12,32(%rdx) - adcq %r9,%r13 - movq %r13,40(%rdx) - adcq %r10,%r14 - movq %r14,48(%rdx) - - popq %r14 - - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - -.globl _sike_cswap_asm -.private_extern _sike_cswap_asm - -_sike_cswap_asm: - - - movq %rdx,%xmm3 - - - - - - pshufd $68,%xmm3,%xmm3 - - movdqu 0(%rdi),%xmm0 - movdqu 0(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,0(%rdi) - movdqu %xmm1,0(%rsi) - - movdqu 16(%rdi),%xmm0 - movdqu 16(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,16(%rdi) - movdqu %xmm1,16(%rsi) - - movdqu 32(%rdi),%xmm0 - movdqu 32(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,32(%rdi) - movdqu %xmm1,32(%rsi) - - movdqu 48(%rdi),%xmm0 - movdqu 48(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,48(%rdi) - movdqu %xmm1,48(%rsi) - - movdqu 64(%rdi),%xmm0 - movdqu 64(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,64(%rdi) - movdqu %xmm1,64(%rsi) - - movdqu 80(%rdi),%xmm0 - movdqu 80(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,80(%rdi) - movdqu %xmm1,80(%rsi) - - movdqu 96(%rdi),%xmm0 - movdqu 96(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,96(%rdi) - movdqu %xmm1,96(%rsi) - - movdqu 112(%rdi),%xmm0 - movdqu 112(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,112(%rdi) - movdqu %xmm1,112(%rsi) - - movdqu 128(%rdi),%xmm0 - movdqu 128(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,128(%rdi) - movdqu %xmm1,128(%rsi) - - movdqu 144(%rdi),%xmm0 - movdqu 144(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,144(%rdi) - movdqu %xmm1,144(%rsi) - - movdqu 160(%rdi),%xmm0 - movdqu 160(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,160(%rdi) - movdqu %xmm1,160(%rsi) - - movdqu 176(%rdi),%xmm0 - movdqu 176(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,176(%rdi) - movdqu %xmm1,176(%rsi) - - movdqu 192(%rdi),%xmm0 - movdqu 192(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,192(%rdi) - movdqu %xmm1,192(%rsi) - - movdqu 208(%rdi),%xmm0 - movdqu 208(%rsi),%xmm1 - movdqa %xmm1,%xmm2 - pxor %xmm0,%xmm2 - pand %xmm3,%xmm2 - pxor %xmm2,%xmm0 - pxor %xmm2,%xmm1 - movdqu %xmm0,208(%rdi) - movdqu %xmm1,208(%rsi) - - .byte 0xf3,0xc3 -.globl _sike_fpsub -.private_extern _sike_fpsub - -_sike_fpsub: - - pushq %r12 - - - pushq %r13 - - - pushq %r14 - - - - xorq %rax,%rax - - movq 0(%rdi),%r8 - subq 0(%rsi),%r8 - movq 8(%rdi),%r9 - sbbq 8(%rsi),%r9 - movq 16(%rdi),%r10 - sbbq 16(%rsi),%r10 - movq 24(%rdi),%r11 - sbbq 24(%rsi),%r11 - movq 32(%rdi),%r12 - sbbq 32(%rsi),%r12 - movq 40(%rdi),%r13 - sbbq 40(%rsi),%r13 - movq 48(%rdi),%r14 - sbbq 48(%rsi),%r14 - - sbbq $0x0,%rax - - movq L$p434x2(%rip),%rdi - andq %rax,%rdi - movq 8+L$p434x2(%rip),%rsi - andq %rax,%rsi - movq 16+L$p434x2(%rip),%rcx - andq %rax,%rcx - - addq %rdi,%r8 - movq %r8,0(%rdx) - adcq %rsi,%r9 - movq %r9,8(%rdx) - adcq %rsi,%r10 - movq %r10,16(%rdx) - adcq %rcx,%r11 - movq %r11,24(%rdx) - - setc %cl - movq 24+L$p434x2(%rip),%r8 - andq %rax,%r8 - movq 32+L$p434x2(%rip),%r9 - andq %rax,%r9 - movq 40+L$p434x2(%rip),%r10 - andq %rax,%r10 - btq $0x0,%rcx - - adcq %r8,%r12 - adcq %r9,%r13 - adcq %r10,%r14 - movq %r12,32(%rdx) - movq %r13,40(%rdx) - movq %r14,48(%rdx) - - popq %r14 - - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - -.globl _sike_mpadd_asm -.private_extern _sike_mpadd_asm - -_sike_mpadd_asm: - - movq 0(%rdi),%r8; - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%rcx - addq 0(%rsi),%r8 - adcq 8(%rsi),%r9 - adcq 16(%rsi),%r10 - adcq 24(%rsi),%r11 - adcq 32(%rsi),%rcx - movq %r8,0(%rdx) - movq %r9,8(%rdx) - movq %r10,16(%rdx) - movq %r11,24(%rdx) - movq %rcx,32(%rdx) - - movq 40(%rdi),%r8 - movq 48(%rdi),%r9 - adcq 40(%rsi),%r8 - adcq 48(%rsi),%r9 - movq %r8,40(%rdx) - movq %r9,48(%rdx) - .byte 0xf3,0xc3 - -.globl _sike_mpsubx2_asm -.private_extern _sike_mpsubx2_asm - -_sike_mpsubx2_asm: - - xorq %rax,%rax - - movq 0(%rdi),%r8 - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - movq 32(%rdi),%rcx - subq 0(%rsi),%r8 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - sbbq 24(%rsi),%r11 - sbbq 32(%rsi),%rcx - movq %r8,0(%rdx) - movq %r9,8(%rdx) - movq %r10,16(%rdx) - movq %r11,24(%rdx) - movq %rcx,32(%rdx) - - movq 40(%rdi),%r8 - movq 48(%rdi),%r9 - movq 56(%rdi),%r10 - movq 64(%rdi),%r11 - movq 72(%rdi),%rcx - sbbq 40(%rsi),%r8 - sbbq 48(%rsi),%r9 - sbbq 56(%rsi),%r10 - sbbq 64(%rsi),%r11 - sbbq 72(%rsi),%rcx - movq %r8,40(%rdx) - movq %r9,48(%rdx) - movq %r10,56(%rdx) - movq %r11,64(%rdx) - movq %rcx,72(%rdx) - - movq 80(%rdi),%r8 - movq 88(%rdi),%r9 - movq 96(%rdi),%r10 - movq 104(%rdi),%r11 - sbbq 80(%rsi),%r8 - sbbq 88(%rsi),%r9 - sbbq 96(%rsi),%r10 - sbbq 104(%rsi),%r11 - sbbq $0x0,%rax - movq %r8,80(%rdx) - movq %r9,88(%rdx) - movq %r10,96(%rdx) - movq %r11,104(%rdx) - .byte 0xf3,0xc3 - -.globl _sike_mpdblsubx2_asm -.private_extern _sike_mpdblsubx2_asm - -_sike_mpdblsubx2_asm: - - pushq %r12 - - - pushq %r13 - - - - xorq %rax,%rax - - - movq 0(%rdx),%r8 - movq 8(%rdx),%r9 - movq 16(%rdx),%r10 - movq 24(%rdx),%r11 - movq 32(%rdx),%r12 - movq 40(%rdx),%r13 - movq 48(%rdx),%rcx - subq 0(%rdi),%r8 - sbbq 8(%rdi),%r9 - sbbq 16(%rdi),%r10 - sbbq 24(%rdi),%r11 - sbbq 32(%rdi),%r12 - sbbq 40(%rdi),%r13 - sbbq 48(%rdi),%rcx - adcq $0x0,%rax - - - subq 0(%rsi),%r8 - sbbq 8(%rsi),%r9 - sbbq 16(%rsi),%r10 - sbbq 24(%rsi),%r11 - sbbq 32(%rsi),%r12 - sbbq 40(%rsi),%r13 - sbbq 48(%rsi),%rcx - adcq $0x0,%rax - - - movq %r8,0(%rdx) - movq %r9,8(%rdx) - movq %r10,16(%rdx) - movq %r11,24(%rdx) - movq %r12,32(%rdx) - movq %r13,40(%rdx) - movq %rcx,48(%rdx) - - - movq 56(%rdx),%r8 - movq 64(%rdx),%r9 - movq 72(%rdx),%r10 - movq 80(%rdx),%r11 - movq 88(%rdx),%r12 - movq 96(%rdx),%r13 - movq 104(%rdx),%rcx - - subq %rax,%r8 - sbbq 56(%rdi),%r8 - sbbq 64(%rdi),%r9 - sbbq 72(%rdi),%r10 - sbbq 80(%rdi),%r11 - sbbq 88(%rdi),%r12 - sbbq 96(%rdi),%r13 - sbbq 104(%rdi),%rcx - - - subq 56(%rsi),%r8 - sbbq 64(%rsi),%r9 - sbbq 72(%rsi),%r10 - sbbq 80(%rsi),%r11 - sbbq 88(%rsi),%r12 - sbbq 96(%rsi),%r13 - sbbq 104(%rsi),%rcx - - - movq %r8,56(%rdx) - movq %r9,64(%rdx) - movq %r10,72(%rdx) - movq %r11,80(%rdx) - movq %r12,88(%rdx) - movq %r13,96(%rdx) - movq %rcx,104(%rdx) - - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - - -L$rdc_bdw: - - - - - - - - - xorq %rax,%rax - movq 0+0(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r8,%r9 - mulxq 8+L$p434p1(%rip),%r12,%r10 - mulxq 16+L$p434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+L$p434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - xorq %rax,%rax - movq 0+8(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r13,%rcx - adcxq %r13,%r9 - adcxq %rcx,%r10 - - mulxq 8+L$p434p1(%rip),%rcx,%r13 - adcxq %r13,%r11 - adoxq %rcx,%r10 - - mulxq 16+L$p434p1(%rip),%rcx,%r13 - adcxq %r13,%r12 - adoxq %rcx,%r11 - - mulxq 24+L$p434p1(%rip),%rcx,%r13 - adcxq %rax,%r13 - adoxq %rcx,%r12 - adoxq %rax,%r13 - - xorq %rcx,%rcx - addq 24(%rdi),%r8 - adcq 32(%rdi),%r9 - adcq 40(%rdi),%r10 - adcq 48(%rdi),%r11 - adcq 56(%rdi),%r12 - adcq 64(%rdi),%r13 - adcq 72(%rdi),%rcx - movq %r8,24(%rdi) - movq %r9,32(%rdi) - movq %r10,40(%rdi) - movq %r11,48(%rdi) - movq %r12,56(%rdi) - movq %r13,64(%rdi) - movq %rcx,72(%rdi) - movq 80(%rdi),%r8 - movq 88(%rdi),%r9 - movq 96(%rdi),%r10 - movq 104(%rdi),%r11 - adcq $0x0,%r8 - adcq $0x0,%r9 - adcq $0x0,%r10 - adcq $0x0,%r11 - movq %r8,80(%rdi) - movq %r9,88(%rdi) - movq %r10,96(%rdi) - movq %r11,104(%rdi) - - xorq %rax,%rax - movq 16+0(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r8,%r9 - mulxq 8+L$p434p1(%rip),%r12,%r10 - mulxq 16+L$p434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+L$p434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - xorq %rax,%rax - movq 16+8(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r13,%rcx - adcxq %r13,%r9 - adcxq %rcx,%r10 - - mulxq 8+L$p434p1(%rip),%rcx,%r13 - adcxq %r13,%r11 - adoxq %rcx,%r10 - - mulxq 16+L$p434p1(%rip),%rcx,%r13 - adcxq %r13,%r12 - adoxq %rcx,%r11 - - mulxq 24+L$p434p1(%rip),%rcx,%r13 - adcxq %rax,%r13 - adoxq %rcx,%r12 - adoxq %rax,%r13 - - xorq %rcx,%rcx - addq 40(%rdi),%r8 - adcq 48(%rdi),%r9 - adcq 56(%rdi),%r10 - adcq 64(%rdi),%r11 - adcq 72(%rdi),%r12 - adcq 80(%rdi),%r13 - adcq 88(%rdi),%rcx - movq %r8,40(%rdi) - movq %r9,48(%rdi) - movq %r10,56(%rdi) - movq %r11,64(%rdi) - movq %r12,72(%rdi) - movq %r13,80(%rdi) - movq %rcx,88(%rdi) - movq 96(%rdi),%r8 - movq 104(%rdi),%r9 - adcq $0x0,%r8 - adcq $0x0,%r9 - movq %r8,96(%rdi) - movq %r9,104(%rdi) - - xorq %rax,%rax - movq 32+0(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r8,%r9 - mulxq 8+L$p434p1(%rip),%r12,%r10 - mulxq 16+L$p434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+L$p434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - xorq %rax,%rax - movq 32+8(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r13,%rcx - adcxq %r13,%r9 - adcxq %rcx,%r10 - - mulxq 8+L$p434p1(%rip),%rcx,%r13 - adcxq %r13,%r11 - adoxq %rcx,%r10 - - mulxq 16+L$p434p1(%rip),%rcx,%r13 - adcxq %r13,%r12 - adoxq %rcx,%r11 - - mulxq 24+L$p434p1(%rip),%rcx,%r13 - adcxq %rax,%r13 - adoxq %rcx,%r12 - adoxq %rax,%r13 - - xorq %rcx,%rcx - addq 56(%rdi),%r8 - adcq 64(%rdi),%r9 - adcq 72(%rdi),%r10 - adcq 80(%rdi),%r11 - adcq 88(%rdi),%r12 - adcq 96(%rdi),%r13 - adcq 104(%rdi),%rcx - movq %r8,0(%rsi) - movq %r9,8(%rsi) - movq %r10,72(%rdi) - movq %r11,80(%rdi) - movq %r12,88(%rdi) - movq %r13,96(%rdi) - movq %rcx,104(%rdi) - - xorq %rax,%rax - movq 48(%rdi),%rdx - mulxq 0+L$p434p1(%rip),%r8,%r9 - mulxq 8+L$p434p1(%rip),%r12,%r10 - mulxq 16+L$p434p1(%rip),%r13,%r11 - - adoxq %r12,%r9 - adoxq %r13,%r10 - - mulxq 24+L$p434p1(%rip),%r13,%r12 - adoxq %r13,%r11 - adoxq %rax,%r12 - - addq 72(%rdi),%r8 - adcq 80(%rdi),%r9 - adcq 88(%rdi),%r10 - adcq 96(%rdi),%r11 - adcq 104(%rdi),%r12 - movq %r8,16(%rsi) - movq %r9,24(%rsi) - movq %r10,32(%rsi) - movq %r11,40(%rsi) - movq %r12,48(%rsi) - - - popq %r15 - - - popq %r14 - - - popq %r13 - - - popq %r12 - - - .byte 0xf3,0xc3 - -.globl _sike_fprdc -.private_extern _sike_fprdc - -_sike_fprdc: - - pushq %r12 - - - pushq %r13 - - - pushq %r14 - - - pushq %r15 - - - - - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$rdc_bdw - - - - - movq 0+0(%rdi),%r14 - movq 0+L$p434p1(%rip),%rax - mulq %r14 - xorq %r10,%r10 - movq %rax,%r8 - movq %rdx,%r9 - - - movq 8+L$p434p1(%rip),%rax - mulq %r14 - xorq %r11,%r11 - addq %rax,%r9 - adcq %rdx,%r10 - - - movq 0+8(%rdi),%rcx - movq 0+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - - xorq %r12,%r12 - movq 16+L$p434p1(%rip),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 8+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 24+L$p434p1(%rip),%rax - mulq %r14 - xorq %r13,%r13 - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 16+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 24+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r12 - adcq %rdx,%r13 - - - xorq %rcx,%rcx - addq 24(%rdi),%r8 - adcq 32(%rdi),%r9 - adcq 40(%rdi),%r10 - adcq 48(%rdi),%r11 - adcq 56(%rdi),%r12 - adcq 64(%rdi),%r13 - adcq 72(%rdi),%rcx - movq %r8,24(%rdi) - movq %r9,32(%rdi) - movq %r10,40(%rdi) - movq %r11,48(%rdi) - movq %r12,56(%rdi) - movq %r13,64(%rdi) - movq %rcx,72(%rdi) - movq 80(%rdi),%r8 - movq 88(%rdi),%r9 - movq 96(%rdi),%r10 - movq 104(%rdi),%r11 - adcq $0x0,%r8 - adcq $0x0,%r9 - adcq $0x0,%r10 - adcq $0x0,%r11 - movq %r8,80(%rdi) - movq %r9,88(%rdi) - movq %r10,96(%rdi) - movq %r11,104(%rdi) - - - movq 16+0(%rdi),%r14 - movq 0+L$p434p1(%rip),%rax - mulq %r14 - xorq %r10,%r10 - movq %rax,%r8 - movq %rdx,%r9 - - - movq 8+L$p434p1(%rip),%rax - mulq %r14 - xorq %r11,%r11 - addq %rax,%r9 - adcq %rdx,%r10 - - - movq 16+8(%rdi),%rcx - movq 0+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - - xorq %r12,%r12 - movq 16+L$p434p1(%rip),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 8+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 24+L$p434p1(%rip),%rax - mulq %r14 - xorq %r13,%r13 - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 16+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 24+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r12 - adcq %rdx,%r13 - - - xorq %rcx,%rcx - addq 40(%rdi),%r8 - adcq 48(%rdi),%r9 - adcq 56(%rdi),%r10 - adcq 64(%rdi),%r11 - adcq 72(%rdi),%r12 - adcq 80(%rdi),%r13 - adcq 88(%rdi),%rcx - movq %r8,40(%rdi) - movq %r9,48(%rdi) - movq %r10,56(%rdi) - movq %r11,64(%rdi) - movq %r12,72(%rdi) - movq %r13,80(%rdi) - movq %rcx,88(%rdi) - movq 96(%rdi),%r8 - movq 104(%rdi),%r9 - adcq $0x0,%r8 - adcq $0x0,%r9 - movq %r8,96(%rdi) - movq %r9,104(%rdi) - - - movq 32+0(%rdi),%r14 - movq 0+L$p434p1(%rip),%rax - mulq %r14 - xorq %r10,%r10 - movq %rax,%r8 - movq %rdx,%r9 - - - movq 8+L$p434p1(%rip),%rax - mulq %r14 - xorq %r11,%r11 - addq %rax,%r9 - adcq %rdx,%r10 - - - movq 32+8(%rdi),%rcx - movq 0+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - - xorq %r12,%r12 - movq 16+L$p434p1(%rip),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 8+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r10 - adcq %rdx,%r11 - adcq $0x0,%r12 - - - movq 24+L$p434p1(%rip),%rax - mulq %r14 - xorq %r13,%r13 - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 16+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r11 - adcq %rdx,%r12 - adcq $0x0,%r13 - - - movq 24+L$p434p1(%rip),%rax - mulq %rcx - addq %rax,%r12 - adcq %rdx,%r13 - - - xorq %rcx,%rcx - addq 56(%rdi),%r8 - adcq 64(%rdi),%r9 - adcq 72(%rdi),%r10 - adcq 80(%rdi),%r11 - adcq 88(%rdi),%r12 - adcq 96(%rdi),%r13 - adcq 104(%rdi),%rcx - movq %r8,0(%rsi) - movq %r9,8(%rsi) - movq %r10,72(%rdi) - movq %r11,80(%rdi) - movq %r12,88(%rdi) - movq %r13,96(%rdi) - movq %rcx,104(%rdi) - - movq 48(%rdi),%r13 - - xorq %r10,%r10 - movq 0+L$p434p1(%rip),%rax - mulq %r13 - movq %rax,%r8 - movq %rdx,%r9 - - xorq %r11,%r11 - movq 8+L$p434p1(%rip),%rax - mulq %r13 - addq %rax,%r9 - adcq %rdx,%r10 - - xorq %r12,%r12 - movq 16+L$p434p1(%rip),%rax - mulq %r13 - addq %rax,%r10 - adcq %rdx,%r11 - - movq 24+L$p434p1(%rip),%rax - mulq %r13 - addq %rax,%r11 - adcq %rdx,%r12 - - addq 72(%rdi),%r8 - adcq 80(%rdi),%r9 - adcq 88(%rdi),%r10 - adcq 96(%rdi),%r11 - adcq 104(%rdi),%r12 - movq %r8,16(%rsi) - movq %r9,24(%rsi) - movq %r10,32(%rsi) - movq %r11,40(%rsi) - movq %r12,48(%rsi) - - - popq %r15 - - popq %r14 - - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - -L$mul_bdw: - - - - - - - - - - movq %rdx,%rcx - xorq %rax,%rax - - - movq 0(%rdi),%r8 - movq 8(%rdi),%r9 - movq 16(%rdi),%r10 - movq 24(%rdi),%r11 - - pushq %rbx - - - pushq %rbp - - - subq $96,%rsp - - - addq 32(%rdi),%r8 - adcq 40(%rdi),%r9 - adcq 48(%rdi),%r10 - adcq $0x0,%r11 - sbbq $0x0,%rax - movq %r8,0(%rsp) - movq %r9,8(%rsp) - movq %r10,16(%rsp) - movq %r11,24(%rsp) - - - xorq %rbx,%rbx - movq 0(%rsi),%r12 - movq 8(%rsi),%r13 - movq 16(%rsi),%r14 - movq 24(%rsi),%r15 - addq 32(%rsi),%r12 - adcq 40(%rsi),%r13 - adcq 48(%rsi),%r14 - adcq $0x0,%r15 - sbbq $0x0,%rbx - movq %r12,32(%rsp) - movq %r13,40(%rsp) - movq %r14,48(%rsp) - movq %r15,56(%rsp) - - - andq %rax,%r12 - andq %rax,%r13 - andq %rax,%r14 - andq %rax,%r15 - - - andq %rbx,%r8 - andq %rbx,%r9 - andq %rbx,%r10 - andq %rbx,%r11 - - - addq %r12,%r8 - adcq %r13,%r9 - adcq %r14,%r10 - adcq %r15,%r11 - movq %r8,64(%rsp) - movq %r9,72(%rsp) - movq %r10,80(%rsp) - movq %r11,88(%rsp) - - - movq 0+0(%rsp),%rdx - mulxq 32+0(%rsp),%r9,%r8 - movq %r9,0+0(%rsp) - mulxq 32+8(%rsp),%r10,%r9 - xorq %rax,%rax - adoxq %r10,%r8 - mulxq 32+16(%rsp),%r11,%r10 - adoxq %r11,%r9 - mulxq 32+24(%rsp),%r12,%r11 - adoxq %r12,%r10 - - movq 0+8(%rsp),%rdx - mulxq 32+0(%rsp),%r12,%r13 - adoxq %rax,%r11 - xorq %rax,%rax - mulxq 32+8(%rsp),%r15,%r14 - adoxq %r8,%r12 - movq %r12,0+8(%rsp) - adcxq %r15,%r13 - mulxq 32+16(%rsp),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r9,%r13 - mulxq 32+24(%rsp),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r10,%r14 - - movq 0+16(%rsp),%rdx - mulxq 32+0(%rsp),%r8,%r9 - adoxq %r11,%r15 - adoxq %rax,%rbx - xorq %rax,%rax - mulxq 32+8(%rsp),%r11,%r10 - adoxq %r13,%r8 - movq %r8,0+16(%rsp) - adcxq %r11,%r9 - mulxq 32+16(%rsp),%r12,%r11 - adcxq %r12,%r10 - adoxq %r14,%r9 - mulxq 32+24(%rsp),%rbp,%r12 - adcxq %rbp,%r11 - adcxq %rax,%r12 - - adoxq %r15,%r10 - adoxq %rbx,%r11 - adoxq %rax,%r12 - - movq 0+24(%rsp),%rdx - mulxq 32+0(%rsp),%r8,%r13 - xorq %rax,%rax - mulxq 32+8(%rsp),%r15,%r14 - adcxq %r15,%r13 - adoxq %r8,%r9 - mulxq 32+16(%rsp),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r13,%r10 - mulxq 32+24(%rsp),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r14,%r11 - adoxq %r15,%r12 - adoxq %rax,%rbx - movq %r9,0+24(%rsp) - movq %r10,0+32(%rsp) - movq %r11,0+40(%rsp) - movq %r12,0+48(%rsp) - movq %rbx,0+56(%rsp) - - - - movq 0+0(%rdi),%rdx - mulxq 0+0(%rsi),%r9,%r8 - movq %r9,0+0(%rcx) - mulxq 0+8(%rsi),%r10,%r9 - xorq %rax,%rax - adoxq %r10,%r8 - mulxq 0+16(%rsi),%r11,%r10 - adoxq %r11,%r9 - mulxq 0+24(%rsi),%r12,%r11 - adoxq %r12,%r10 - - movq 0+8(%rdi),%rdx - mulxq 0+0(%rsi),%r12,%r13 - adoxq %rax,%r11 - xorq %rax,%rax - mulxq 0+8(%rsi),%r15,%r14 - adoxq %r8,%r12 - movq %r12,0+8(%rcx) - adcxq %r15,%r13 - mulxq 0+16(%rsi),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r9,%r13 - mulxq 0+24(%rsi),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r10,%r14 - - movq 0+16(%rdi),%rdx - mulxq 0+0(%rsi),%r8,%r9 - adoxq %r11,%r15 - adoxq %rax,%rbx - xorq %rax,%rax - mulxq 0+8(%rsi),%r11,%r10 - adoxq %r13,%r8 - movq %r8,0+16(%rcx) - adcxq %r11,%r9 - mulxq 0+16(%rsi),%r12,%r11 - adcxq %r12,%r10 - adoxq %r14,%r9 - mulxq 0+24(%rsi),%rbp,%r12 - adcxq %rbp,%r11 - adcxq %rax,%r12 - - adoxq %r15,%r10 - adoxq %rbx,%r11 - adoxq %rax,%r12 - - movq 0+24(%rdi),%rdx - mulxq 0+0(%rsi),%r8,%r13 - xorq %rax,%rax - mulxq 0+8(%rsi),%r15,%r14 - adcxq %r15,%r13 - adoxq %r8,%r9 - mulxq 0+16(%rsi),%rbx,%r15 - adcxq %rbx,%r14 - adoxq %r13,%r10 - mulxq 0+24(%rsi),%rbp,%rbx - adcxq %rbp,%r15 - adcxq %rax,%rbx - adoxq %r14,%r11 - adoxq %r15,%r12 - adoxq %rax,%rbx - movq %r9,0+24(%rcx) - movq %r10,0+32(%rcx) - movq %r11,0+40(%rcx) - movq %r12,0+48(%rcx) - movq %rbx,0+56(%rcx) - - - - movq 32+0(%rdi),%rdx - mulxq 32+0(%rsi),%r9,%r8 - movq %r9,64+0(%rcx) - mulxq 32+8(%rsi),%r10,%r9 - xorq %rax,%rax - adoxq %r10,%r8 - mulxq 32+16(%rsi),%r11,%r10 - adoxq %r11,%r9 - - movq 32+8(%rdi),%rdx - mulxq 32+0(%rsi),%r12,%r11 - adoxq %rax,%r10 - xorq %rax,%rax - - mulxq 32+8(%rsi),%r14,%r13 - adoxq %r8,%r12 - movq %r12,64+8(%rcx) - adcxq %r14,%r11 - - mulxq 32+16(%rsi),%r8,%r14 - adoxq %r9,%r11 - adcxq %r8,%r13 - adcxq %rax,%r14 - adoxq %r10,%r13 - - movq 32+16(%rdi),%rdx - mulxq 32+0(%rsi),%r8,%r9 - adoxq %rax,%r14 - xorq %rax,%rax - - mulxq 32+8(%rsi),%r10,%r12 - adoxq %r11,%r8 - movq %r8,64+16(%rcx) - adcxq %r13,%r9 - - mulxq 32+16(%rsi),%r11,%r8 - adcxq %r14,%r12 - adcxq %rax,%r8 - adoxq %r10,%r9 - adoxq %r12,%r11 - adoxq %rax,%r8 - movq %r9,64+24(%rcx) - movq %r11,64+32(%rcx) - movq %r8,64+40(%rcx) - - - - - movq 64(%rsp),%r8 - movq 72(%rsp),%r9 - movq 80(%rsp),%r10 - movq 88(%rsp),%r11 - - movq 32(%rsp),%rax - addq %rax,%r8 - movq 40(%rsp),%rax - adcq %rax,%r9 - movq 48(%rsp),%rax - adcq %rax,%r10 - movq 56(%rsp),%rax - adcq %rax,%r11 - - - movq 0(%rsp),%r12 - movq 8(%rsp),%r13 - movq 16(%rsp),%r14 - movq 24(%rsp),%r15 - subq 0(%rcx),%r12 - sbbq 8(%rcx),%r13 - sbbq 16(%rcx),%r14 - sbbq 24(%rcx),%r15 - sbbq 32(%rcx),%r8 - sbbq 40(%rcx),%r9 - sbbq 48(%rcx),%r10 - sbbq 56(%rcx),%r11 - - - subq 64(%rcx),%r12 - sbbq 72(%rcx),%r13 - sbbq 80(%rcx),%r14 - sbbq 88(%rcx),%r15 - sbbq 96(%rcx),%r8 - sbbq 104(%rcx),%r9 - sbbq $0x0,%r10 - sbbq $0x0,%r11 - - addq 32(%rcx),%r12 - movq %r12,32(%rcx) - adcq 40(%rcx),%r13 - movq %r13,40(%rcx) - adcq 48(%rcx),%r14 - movq %r14,48(%rcx) - adcq 56(%rcx),%r15 - movq %r15,56(%rcx) - adcq 64(%rcx),%r8 - movq %r8,64(%rcx) - adcq 72(%rcx),%r9 - movq %r9,72(%rcx) - adcq 80(%rcx),%r10 - movq %r10,80(%rcx) - adcq 88(%rcx),%r11 - movq %r11,88(%rcx) - movq 96(%rcx),%r12 - adcq $0x0,%r12 - movq %r12,96(%rcx) - movq 104(%rcx),%r13 - adcq $0x0,%r13 - movq %r13,104(%rcx) - - addq $96,%rsp - - popq %rbp - - - popq %rbx - - - - - popq %r15 - - - popq %r14 - - - popq %r13 - - - popq %r12 - - - .byte 0xf3,0xc3 - - -.globl _sike_mpmul -.private_extern _sike_mpmul - -_sike_mpmul: - - pushq %r12 - - - pushq %r13 - - - pushq %r14 - - - pushq %r15 - - - - - - leaq _OPENSSL_ia32cap_P(%rip),%rcx - movq 8(%rcx),%rcx - andl $0x80100,%ecx - cmpl $0x80100,%ecx - je L$mul_bdw - - - - movq %rdx,%rcx - - subq $112,%rsp - - - - xorq %rax,%rax - movq 32(%rdi),%r8 - movq 40(%rdi),%r9 - movq 48(%rdi),%r10 - xorq %r11,%r11 - addq 0(%rdi),%r8 - adcq 8(%rdi),%r9 - adcq 16(%rdi),%r10 - adcq 24(%rdi),%r11 - - sbbq $0,%rax - movq %rax,64(%rsp) - - movq %r8,0(%rcx) - movq %r9,8(%rcx) - movq %r10,16(%rcx) - movq %r11,24(%rcx) - - - xorq %rdx,%rdx - movq 32(%rsi),%r12 - movq 40(%rsi),%r13 - movq 48(%rsi),%r14 - xorq %r15,%r15 - addq 0(%rsi),%r12 - adcq 8(%rsi),%r13 - adcq 16(%rsi),%r14 - adcq 24(%rsi),%r15 - sbbq $0x0,%rdx - - movq %rdx,72(%rsp) - - - movq (%rcx),%rax - mulq %r12 - movq %rax,(%rsp) - movq %rdx,%r8 - - xorq %r9,%r9 - movq (%rcx),%rax - mulq %r13 - addq %rax,%r8 - adcq %rdx,%r9 - - xorq %r10,%r10 - movq 8(%rcx),%rax - mulq %r12 - addq %rax,%r8 - movq %r8,8(%rsp) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq (%rcx),%rax - mulq %r14 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 16(%rcx),%rax - mulq %r12 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 8(%rcx),%rax - mulq %r13 - addq %rax,%r9 - movq %r9,16(%rsp) - adcq %rdx,%r10 - adcq $0x0,%r8 - - xorq %r9,%r9 - movq (%rcx),%rax - mulq %r15 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 24(%rcx),%rax - mulq %r12 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 8(%rcx),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 16(%rcx),%rax - mulq %r13 - addq %rax,%r10 - movq %r10,24(%rsp) - adcq %rdx,%r8 - adcq $0x0,%r9 - - xorq %r10,%r10 - movq 8(%rcx),%rax - mulq %r15 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 24(%rcx),%rax - mulq %r13 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 16(%rcx),%rax - mulq %r14 - addq %rax,%r8 - movq %r8,32(%rsp) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r11,%r11 - movq 16(%rcx),%rax - mulq %r15 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r11 - - movq 24(%rcx),%rax - mulq %r14 - addq %rax,%r9 - movq %r9,40(%rsp) - adcq %rdx,%r10 - adcq $0x0,%r11 - - movq 24(%rcx),%rax - mulq %r15 - addq %rax,%r10 - movq %r10,48(%rsp) - adcq %rdx,%r11 - movq %r11,56(%rsp) - - - movq 64(%rsp),%rax - andq %rax,%r12 - andq %rax,%r13 - andq %rax,%r14 - andq %rax,%r15 - - - movq 72(%rsp),%rax - movq 0(%rcx),%r8 - andq %rax,%r8 - movq 8(%rcx),%r9 - andq %rax,%r9 - movq 16(%rcx),%r10 - andq %rax,%r10 - movq 24(%rcx),%r11 - andq %rax,%r11 - - - addq %r8,%r12 - adcq %r9,%r13 - adcq %r10,%r14 - adcq %r11,%r15 - - - movq 32(%rsp),%rax - addq %rax,%r12 - movq 40(%rsp),%rax - adcq %rax,%r13 - movq 48(%rsp),%rax - adcq %rax,%r14 - movq 56(%rsp),%rax - adcq %rax,%r15 - movq %r12,80(%rsp) - movq %r13,88(%rsp) - movq %r14,96(%rsp) - movq %r15,104(%rsp) - - - movq (%rdi),%r11 - movq (%rsi),%rax - mulq %r11 - xorq %r9,%r9 - movq %rax,(%rcx) - movq %rdx,%r8 - - movq 16(%rdi),%r14 - movq 8(%rsi),%rax - mulq %r11 - xorq %r10,%r10 - addq %rax,%r8 - adcq %rdx,%r9 - - movq 8(%rdi),%r12 - movq (%rsi),%rax - mulq %r12 - addq %rax,%r8 - movq %r8,8(%rcx) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq 16(%rsi),%rax - mulq %r11 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq (%rsi),%r13 - movq %r14,%rax - mulq %r13 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 8(%rsi),%rax - mulq %r12 - addq %rax,%r9 - movq %r9,16(%rcx) - adcq %rdx,%r10 - adcq $0x0,%r8 - - xorq %r9,%r9 - movq 24(%rsi),%rax - mulq %r11 - movq 24(%rdi),%r15 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq %r15,%rax - mulq %r13 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 16(%rsi),%rax - mulq %r12 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r9 - - movq 8(%rsi),%rax - mulq %r14 - addq %rax,%r10 - movq %r10,24(%rcx) - adcq %rdx,%r8 - adcq $0x0,%r9 - - xorq %r10,%r10 - movq 24(%rsi),%rax - mulq %r12 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 8(%rsi),%rax - mulq %r15 - addq %rax,%r8 - adcq %rdx,%r9 - adcq $0x0,%r10 - - movq 16(%rsi),%rax - mulq %r14 - addq %rax,%r8 - movq %r8,32(%rcx) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq 24(%rsi),%rax - mulq %r14 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 16(%rsi),%rax - mulq %r15 - addq %rax,%r9 - movq %r9,40(%rcx) - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 24(%rsi),%rax - mulq %r15 - addq %rax,%r10 - movq %r10,48(%rcx) - adcq %rdx,%r8 - movq %r8,56(%rcx) - - - - movq 32(%rdi),%r11 - movq 32(%rsi),%rax - mulq %r11 - xorq %r9,%r9 - movq %rax,64(%rcx) - movq %rdx,%r8 - - movq 48(%rdi),%r14 - movq 40(%rsi),%rax - mulq %r11 - xorq %r10,%r10 - addq %rax,%r8 - adcq %rdx,%r9 - - movq 40(%rdi),%r12 - movq 32(%rsi),%rax - mulq %r12 - addq %rax,%r8 - movq %r8,72(%rcx) - adcq %rdx,%r9 - adcq $0x0,%r10 - - xorq %r8,%r8 - movq 48(%rsi),%rax - mulq %r11 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 32(%rsi),%r13 - movq %r14,%rax - mulq %r13 - addq %rax,%r9 - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 40(%rsi),%rax - mulq %r12 - addq %rax,%r9 - movq %r9,80(%rcx) - adcq %rdx,%r10 - adcq $0x0,%r8 - - movq 48(%rsi),%rax - mulq %r12 - xorq %r12,%r12 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r12 - - movq 40(%rsi),%rax - mulq %r14 - addq %rax,%r10 - adcq %rdx,%r8 - adcq $0x0,%r12 - movq %r10,88(%rcx) - - movq 48(%rsi),%rax - mulq %r14 - addq %rax,%r8 - adcq $0x0,%r12 - movq %r8,96(%rcx) - - addq %r12,%rdx - - - movq 0(%rsp),%r8 - subq 0(%rcx),%r8 - movq 8(%rsp),%r9 - sbbq 8(%rcx),%r9 - movq 16(%rsp),%r10 - sbbq 16(%rcx),%r10 - movq 24(%rsp),%r11 - sbbq 24(%rcx),%r11 - movq 80(%rsp),%r12 - sbbq 32(%rcx),%r12 - movq 88(%rsp),%r13 - sbbq 40(%rcx),%r13 - movq 96(%rsp),%r14 - sbbq 48(%rcx),%r14 - movq 104(%rsp),%r15 - sbbq 56(%rcx),%r15 - - - movq 64(%rcx),%rax - subq %rax,%r8 - movq 72(%rcx),%rax - sbbq %rax,%r9 - movq 80(%rcx),%rax - sbbq %rax,%r10 - movq 88(%rcx),%rax - sbbq %rax,%r11 - movq 96(%rcx),%rax - sbbq %rax,%r12 - sbbq %rdx,%r13 - sbbq $0x0,%r14 - sbbq $0x0,%r15 - - - addq 32(%rcx),%r8 - movq %r8,32(%rcx) - adcq 40(%rcx),%r9 - movq %r9,40(%rcx) - adcq 48(%rcx),%r10 - movq %r10,48(%rcx) - adcq 56(%rcx),%r11 - movq %r11,56(%rcx) - adcq 64(%rcx),%r12 - movq %r12,64(%rcx) - adcq 72(%rcx),%r13 - movq %r13,72(%rcx) - adcq 80(%rcx),%r14 - movq %r14,80(%rcx) - adcq 88(%rcx),%r15 - movq %r15,88(%rcx) - movq 96(%rcx),%r12 - adcq $0x0,%r12 - movq %r12,96(%rcx) - adcq $0x0,%rdx - movq %rdx,104(%rcx) - - addq $112,%rsp - - - - popq %r15 - - popq %r14 - - popq %r13 - - popq %r12 - - .byte 0xf3,0xc3 - -#endif diff --git a/packager/third_party/boringssl/roll_boringssl.py b/packager/third_party/boringssl/roll_boringssl.py deleted file mode 100755 index f1f009c2f7..0000000000 --- a/packager/third_party/boringssl/roll_boringssl.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Rolls third_party/boringssl/src in DEPS and updates generated build files.""" - -import os -import os.path -import shutil -import subprocess -import sys - - -SCRIPT_PATH = os.path.abspath(__file__) -SRC_PATH = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.dirname(SCRIPT_PATH)))) -DEPS_PATH = os.path.join(SRC_PATH, 'DEPS') -BORINGSSL_PATH = os.path.join(SRC_PATH, 'packager', 'third_party', 'boringssl') -BORINGSSL_SRC_PATH = os.path.join(BORINGSSL_PATH, 'src') - -if not os.path.isfile(DEPS_PATH) or not os.path.isdir(BORINGSSL_SRC_PATH): - raise Exception('Could not find packager checkout') - -# Pull OS_ARCH_COMBOS out of the BoringSSL script. -sys.path.append(os.path.join(BORINGSSL_SRC_PATH, 'util')) -import generate_build_files - -GENERATED_FILES = [ - 'BUILD.generated.gni', - 'BUILD.generated_tests.gni', - 'boringssl.gypi', - 'err_data.c', -] - - -def IsPristine(repo): - """Returns True if a git checkout is pristine.""" - cmd = ['git', 'diff', '--ignore-submodules'] - return not (subprocess.check_output(cmd, cwd=repo).strip() or - subprocess.check_output(cmd + ['--cached'], cwd=repo).strip()) - - -def RevParse(repo, rev): - """Resolves a string to a git commit.""" - return subprocess.check_output(['git', 'rev-parse', rev], cwd=repo).strip() - - -def UpdateDEPS(deps, from_hash, to_hash): - """Updates all references of |from_hash| to |to_hash| in |deps|.""" - with open(deps, 'rb') as f: - contents = f.read() - if from_hash not in contents: - raise Exception('%s not in DEPS' % from_hash) - contents = contents.replace(from_hash, to_hash) - with open(deps, 'wb') as f: - f.write(contents) - - -def main(): - if len(sys.argv) > 2: - sys.stderr.write('Usage: %s [COMMIT]' % sys.argv[0]) - return 1 - - if not IsPristine(SRC_PATH): - print >>sys.stderr, 'Packager checkout not pristine.' - return 0 - if not IsPristine(BORINGSSL_SRC_PATH): - print >>sys.stderr, 'BoringSSL checkout not pristine.' - return 0 - - if len(sys.argv) > 1: - commit = RevParse(BORINGSSL_SRC_PATH, sys.argv[1]) - else: - subprocess.check_call(['git', 'fetch', 'origin'], cwd=BORINGSSL_SRC_PATH) - commit = RevParse(BORINGSSL_SRC_PATH, 'origin/master') - - head = RevParse(BORINGSSL_SRC_PATH, 'HEAD') - if head == commit: - print 'BoringSSL already up to date.' - return 0 - - print 'Rolling BoringSSL from %s to %s...' % (head, commit) - - UpdateDEPS(DEPS_PATH, head, commit) - - # Checkout third_party/boringssl/src to generate new files. - subprocess.check_call(['git', 'checkout', commit], cwd=BORINGSSL_SRC_PATH) - - # Clear the old generated files. - for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS: - path = os.path.join(BORINGSSL_PATH, osname + '-' + arch) - if os.path.exists(path): - shutil.rmtree(path) - for file in GENERATED_FILES: - path = os.path.join(BORINGSSL_PATH, file) - if os.path.exists(path): - os.unlink(path) - - # Generate new ones. - subprocess.check_call(['python', - os.path.join(BORINGSSL_SRC_PATH, 'util', - 'generate_build_files.py'), - 'gn', 'gyp'], - cwd=BORINGSSL_PATH) - - # Commit everything. - subprocess.check_call(['git', 'add', DEPS_PATH], cwd=SRC_PATH) - for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS: - path = os.path.join(BORINGSSL_PATH, osname + '-' + arch) - subprocess.check_call(['git', 'add', path], cwd=SRC_PATH) - for file in GENERATED_FILES: - path = os.path.join(BORINGSSL_PATH, file) - subprocess.check_call(['git', 'add', path], cwd=SRC_PATH) - - message = """Roll src/third_party/boringssl/src %s..%s - -https://boringssl.googlesource.com/boringssl/+log/%s..%s - -BUG=none -""" % (head[:9], commit[:9], head, commit) - subprocess.check_call(['git', 'commit', '-m', message], cwd=SRC_PATH) - - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/packager/third_party/boringssl/win-x86/crypto/chacha/chacha-x86.asm b/packager/third_party/boringssl/win-x86/crypto/chacha/chacha-x86.asm deleted file mode 100644 index 7b59adf1db..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/chacha/chacha-x86.asm +++ /dev/null @@ -1,983 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -global _ChaCha20_ctr32 -align 16 -_ChaCha20_ctr32: -L$_ChaCha20_ctr32_begin: - push ebp - push ebx - push esi - push edi - xor eax,eax - cmp eax,DWORD [28+esp] - je NEAR L$000no_data - call L$pic_point -L$pic_point: - pop eax - lea ebp,[_OPENSSL_ia32cap_P] - test DWORD [ebp],16777216 - jz NEAR L$001x86 - test DWORD [4+ebp],512 - jz NEAR L$001x86 - jmp NEAR L$ssse3_shortcut -L$001x86: - mov esi,DWORD [32+esp] - mov edi,DWORD [36+esp] - sub esp,132 - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [80+esp],eax - mov DWORD [84+esp],ebx - mov DWORD [88+esp],ecx - mov DWORD [92+esp],edx - mov eax,DWORD [16+esi] - mov ebx,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov edx,DWORD [28+esi] - mov DWORD [96+esp],eax - mov DWORD [100+esp],ebx - mov DWORD [104+esp],ecx - mov DWORD [108+esp],edx - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - sub eax,1 - mov DWORD [112+esp],eax - mov DWORD [116+esp],ebx - mov DWORD [120+esp],ecx - mov DWORD [124+esp],edx - jmp NEAR L$002entry -align 16 -L$003outer_loop: - mov DWORD [156+esp],ebx - mov DWORD [152+esp],eax - mov DWORD [160+esp],ecx -L$002entry: - mov eax,1634760805 - mov DWORD [4+esp],857760878 - mov DWORD [8+esp],2036477234 - mov DWORD [12+esp],1797285236 - mov ebx,DWORD [84+esp] - mov ebp,DWORD [88+esp] - mov ecx,DWORD [104+esp] - mov esi,DWORD [108+esp] - mov edx,DWORD [116+esp] - mov edi,DWORD [120+esp] - mov DWORD [20+esp],ebx - mov DWORD [24+esp],ebp - mov DWORD [40+esp],ecx - mov DWORD [44+esp],esi - mov DWORD [52+esp],edx - mov DWORD [56+esp],edi - mov ebx,DWORD [92+esp] - mov edi,DWORD [124+esp] - mov edx,DWORD [112+esp] - mov ebp,DWORD [80+esp] - mov ecx,DWORD [96+esp] - mov esi,DWORD [100+esp] - add edx,1 - mov DWORD [28+esp],ebx - mov DWORD [60+esp],edi - mov DWORD [112+esp],edx - mov ebx,10 - jmp NEAR L$004loop -align 16 -L$004loop: - add eax,ebp - mov DWORD [128+esp],ebx - mov ebx,ebp - xor edx,eax - rol edx,16 - add ecx,edx - xor ebx,ecx - mov edi,DWORD [52+esp] - rol ebx,12 - mov ebp,DWORD [20+esp] - add eax,ebx - xor edx,eax - mov DWORD [esp],eax - rol edx,8 - mov eax,DWORD [4+esp] - add ecx,edx - mov DWORD [48+esp],edx - xor ebx,ecx - add eax,ebp - rol ebx,7 - xor edi,eax - mov DWORD [32+esp],ecx - rol edi,16 - mov DWORD [16+esp],ebx - add esi,edi - mov ecx,DWORD [40+esp] - xor ebp,esi - mov edx,DWORD [56+esp] - rol ebp,12 - mov ebx,DWORD [24+esp] - add eax,ebp - xor edi,eax - mov DWORD [4+esp],eax - rol edi,8 - mov eax,DWORD [8+esp] - add esi,edi - mov DWORD [52+esp],edi - xor ebp,esi - add eax,ebx - rol ebp,7 - xor edx,eax - mov DWORD [36+esp],esi - rol edx,16 - mov DWORD [20+esp],ebp - add ecx,edx - mov esi,DWORD [44+esp] - xor ebx,ecx - mov edi,DWORD [60+esp] - rol ebx,12 - mov ebp,DWORD [28+esp] - add eax,ebx - xor edx,eax - mov DWORD [8+esp],eax - rol edx,8 - mov eax,DWORD [12+esp] - add ecx,edx - mov DWORD [56+esp],edx - xor ebx,ecx - add eax,ebp - rol ebx,7 - xor edi,eax - rol edi,16 - mov DWORD [24+esp],ebx - add esi,edi - xor ebp,esi - rol ebp,12 - mov ebx,DWORD [20+esp] - add eax,ebp - xor edi,eax - mov DWORD [12+esp],eax - rol edi,8 - mov eax,DWORD [esp] - add esi,edi - mov edx,edi - xor ebp,esi - add eax,ebx - rol ebp,7 - xor edx,eax - rol edx,16 - mov DWORD [28+esp],ebp - add ecx,edx - xor ebx,ecx - mov edi,DWORD [48+esp] - rol ebx,12 - mov ebp,DWORD [24+esp] - add eax,ebx - xor edx,eax - mov DWORD [esp],eax - rol edx,8 - mov eax,DWORD [4+esp] - add ecx,edx - mov DWORD [60+esp],edx - xor ebx,ecx - add eax,ebp - rol ebx,7 - xor edi,eax - mov DWORD [40+esp],ecx - rol edi,16 - mov DWORD [20+esp],ebx - add esi,edi - mov ecx,DWORD [32+esp] - xor ebp,esi - mov edx,DWORD [52+esp] - rol ebp,12 - mov ebx,DWORD [28+esp] - add eax,ebp - xor edi,eax - mov DWORD [4+esp],eax - rol edi,8 - mov eax,DWORD [8+esp] - add esi,edi - mov DWORD [48+esp],edi - xor ebp,esi - add eax,ebx - rol ebp,7 - xor edx,eax - mov DWORD [44+esp],esi - rol edx,16 - mov DWORD [24+esp],ebp - add ecx,edx - mov esi,DWORD [36+esp] - xor ebx,ecx - mov edi,DWORD [56+esp] - rol ebx,12 - mov ebp,DWORD [16+esp] - add eax,ebx - xor edx,eax - mov DWORD [8+esp],eax - rol edx,8 - mov eax,DWORD [12+esp] - add ecx,edx - mov DWORD [52+esp],edx - xor ebx,ecx - add eax,ebp - rol ebx,7 - xor edi,eax - rol edi,16 - mov DWORD [28+esp],ebx - add esi,edi - xor ebp,esi - mov edx,DWORD [48+esp] - rol ebp,12 - mov ebx,DWORD [128+esp] - add eax,ebp - xor edi,eax - mov DWORD [12+esp],eax - rol edi,8 - mov eax,DWORD [esp] - add esi,edi - mov DWORD [56+esp],edi - xor ebp,esi - rol ebp,7 - dec ebx - jnz NEAR L$004loop - mov ebx,DWORD [160+esp] - add eax,1634760805 - add ebp,DWORD [80+esp] - add ecx,DWORD [96+esp] - add esi,DWORD [100+esp] - cmp ebx,64 - jb NEAR L$005tail - mov ebx,DWORD [156+esp] - add edx,DWORD [112+esp] - add edi,DWORD [120+esp] - xor eax,DWORD [ebx] - xor ebp,DWORD [16+ebx] - mov DWORD [esp],eax - mov eax,DWORD [152+esp] - xor ecx,DWORD [32+ebx] - xor esi,DWORD [36+ebx] - xor edx,DWORD [48+ebx] - xor edi,DWORD [56+ebx] - mov DWORD [16+eax],ebp - mov DWORD [32+eax],ecx - mov DWORD [36+eax],esi - mov DWORD [48+eax],edx - mov DWORD [56+eax],edi - mov ebp,DWORD [4+esp] - mov ecx,DWORD [8+esp] - mov esi,DWORD [12+esp] - mov edx,DWORD [20+esp] - mov edi,DWORD [24+esp] - add ebp,857760878 - add ecx,2036477234 - add esi,1797285236 - add edx,DWORD [84+esp] - add edi,DWORD [88+esp] - xor ebp,DWORD [4+ebx] - xor ecx,DWORD [8+ebx] - xor esi,DWORD [12+ebx] - xor edx,DWORD [20+ebx] - xor edi,DWORD [24+ebx] - mov DWORD [4+eax],ebp - mov DWORD [8+eax],ecx - mov DWORD [12+eax],esi - mov DWORD [20+eax],edx - mov DWORD [24+eax],edi - mov ebp,DWORD [28+esp] - mov ecx,DWORD [40+esp] - mov esi,DWORD [44+esp] - mov edx,DWORD [52+esp] - mov edi,DWORD [60+esp] - add ebp,DWORD [92+esp] - add ecx,DWORD [104+esp] - add esi,DWORD [108+esp] - add edx,DWORD [116+esp] - add edi,DWORD [124+esp] - xor ebp,DWORD [28+ebx] - xor ecx,DWORD [40+ebx] - xor esi,DWORD [44+ebx] - xor edx,DWORD [52+ebx] - xor edi,DWORD [60+ebx] - lea ebx,[64+ebx] - mov DWORD [28+eax],ebp - mov ebp,DWORD [esp] - mov DWORD [40+eax],ecx - mov ecx,DWORD [160+esp] - mov DWORD [44+eax],esi - mov DWORD [52+eax],edx - mov DWORD [60+eax],edi - mov DWORD [eax],ebp - lea eax,[64+eax] - sub ecx,64 - jnz NEAR L$003outer_loop - jmp NEAR L$006done -L$005tail: - add edx,DWORD [112+esp] - add edi,DWORD [120+esp] - mov DWORD [esp],eax - mov DWORD [16+esp],ebp - mov DWORD [32+esp],ecx - mov DWORD [36+esp],esi - mov DWORD [48+esp],edx - mov DWORD [56+esp],edi - mov ebp,DWORD [4+esp] - mov ecx,DWORD [8+esp] - mov esi,DWORD [12+esp] - mov edx,DWORD [20+esp] - mov edi,DWORD [24+esp] - add ebp,857760878 - add ecx,2036477234 - add esi,1797285236 - add edx,DWORD [84+esp] - add edi,DWORD [88+esp] - mov DWORD [4+esp],ebp - mov DWORD [8+esp],ecx - mov DWORD [12+esp],esi - mov DWORD [20+esp],edx - mov DWORD [24+esp],edi - mov ebp,DWORD [28+esp] - mov ecx,DWORD [40+esp] - mov esi,DWORD [44+esp] - mov edx,DWORD [52+esp] - mov edi,DWORD [60+esp] - add ebp,DWORD [92+esp] - add ecx,DWORD [104+esp] - add esi,DWORD [108+esp] - add edx,DWORD [116+esp] - add edi,DWORD [124+esp] - mov DWORD [28+esp],ebp - mov ebp,DWORD [156+esp] - mov DWORD [40+esp],ecx - mov ecx,DWORD [152+esp] - mov DWORD [44+esp],esi - xor esi,esi - mov DWORD [52+esp],edx - mov DWORD [60+esp],edi - xor eax,eax - xor edx,edx -L$007tail_loop: - mov al,BYTE [ebp*1+esi] - mov dl,BYTE [esi*1+esp] - lea esi,[1+esi] - xor al,dl - mov BYTE [esi*1+ecx-1],al - dec ebx - jnz NEAR L$007tail_loop -L$006done: - add esp,132 -L$000no_data: - pop edi - pop esi - pop ebx - pop ebp - ret -global _ChaCha20_ssse3 -align 16 -_ChaCha20_ssse3: -L$_ChaCha20_ssse3_begin: - push ebp - push ebx - push esi - push edi -L$ssse3_shortcut: - mov edi,DWORD [20+esp] - mov esi,DWORD [24+esp] - mov ecx,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebx,DWORD [36+esp] - mov ebp,esp - sub esp,524 - and esp,-64 - mov DWORD [512+esp],ebp - lea eax,[(L$ssse3_data-L$pic_point)+eax] - movdqu xmm3,[ebx] - cmp ecx,256 - jb NEAR L$0081x - mov DWORD [516+esp],edx - mov DWORD [520+esp],ebx - sub ecx,256 - lea ebp,[384+esp] - movdqu xmm7,[edx] - pshufd xmm0,xmm3,0 - pshufd xmm1,xmm3,85 - pshufd xmm2,xmm3,170 - pshufd xmm3,xmm3,255 - paddd xmm0,[48+eax] - pshufd xmm4,xmm7,0 - pshufd xmm5,xmm7,85 - psubd xmm0,[64+eax] - pshufd xmm6,xmm7,170 - pshufd xmm7,xmm7,255 - movdqa [64+ebp],xmm0 - movdqa [80+ebp],xmm1 - movdqa [96+ebp],xmm2 - movdqa [112+ebp],xmm3 - movdqu xmm3,[16+edx] - movdqa [ebp-64],xmm4 - movdqa [ebp-48],xmm5 - movdqa [ebp-32],xmm6 - movdqa [ebp-16],xmm7 - movdqa xmm7,[32+eax] - lea ebx,[128+esp] - pshufd xmm0,xmm3,0 - pshufd xmm1,xmm3,85 - pshufd xmm2,xmm3,170 - pshufd xmm3,xmm3,255 - pshufd xmm4,xmm7,0 - pshufd xmm5,xmm7,85 - pshufd xmm6,xmm7,170 - pshufd xmm7,xmm7,255 - movdqa [ebp],xmm0 - movdqa [16+ebp],xmm1 - movdqa [32+ebp],xmm2 - movdqa [48+ebp],xmm3 - movdqa [ebp-128],xmm4 - movdqa [ebp-112],xmm5 - movdqa [ebp-96],xmm6 - movdqa [ebp-80],xmm7 - lea esi,[128+esi] - lea edi,[128+edi] - jmp NEAR L$009outer_loop -align 16 -L$009outer_loop: - movdqa xmm1,[ebp-112] - movdqa xmm2,[ebp-96] - movdqa xmm3,[ebp-80] - movdqa xmm5,[ebp-48] - movdqa xmm6,[ebp-32] - movdqa xmm7,[ebp-16] - movdqa [ebx-112],xmm1 - movdqa [ebx-96],xmm2 - movdqa [ebx-80],xmm3 - movdqa [ebx-48],xmm5 - movdqa [ebx-32],xmm6 - movdqa [ebx-16],xmm7 - movdqa xmm2,[32+ebp] - movdqa xmm3,[48+ebp] - movdqa xmm4,[64+ebp] - movdqa xmm5,[80+ebp] - movdqa xmm6,[96+ebp] - movdqa xmm7,[112+ebp] - paddd xmm4,[64+eax] - movdqa [32+ebx],xmm2 - movdqa [48+ebx],xmm3 - movdqa [64+ebx],xmm4 - movdqa [80+ebx],xmm5 - movdqa [96+ebx],xmm6 - movdqa [112+ebx],xmm7 - movdqa [64+ebp],xmm4 - movdqa xmm0,[ebp-128] - movdqa xmm6,xmm4 - movdqa xmm3,[ebp-64] - movdqa xmm4,[ebp] - movdqa xmm5,[16+ebp] - mov edx,10 - nop -align 16 -L$010loop: - paddd xmm0,xmm3 - movdqa xmm2,xmm3 - pxor xmm6,xmm0 - pshufb xmm6,[eax] - paddd xmm4,xmm6 - pxor xmm2,xmm4 - movdqa xmm3,[ebx-48] - movdqa xmm1,xmm2 - pslld xmm2,12 - psrld xmm1,20 - por xmm2,xmm1 - movdqa xmm1,[ebx-112] - paddd xmm0,xmm2 - movdqa xmm7,[80+ebx] - pxor xmm6,xmm0 - movdqa [ebx-128],xmm0 - pshufb xmm6,[16+eax] - paddd xmm4,xmm6 - movdqa [64+ebx],xmm6 - pxor xmm2,xmm4 - paddd xmm1,xmm3 - movdqa xmm0,xmm2 - pslld xmm2,7 - psrld xmm0,25 - pxor xmm7,xmm1 - por xmm2,xmm0 - movdqa [ebx],xmm4 - pshufb xmm7,[eax] - movdqa [ebx-64],xmm2 - paddd xmm5,xmm7 - movdqa xmm4,[32+ebx] - pxor xmm3,xmm5 - movdqa xmm2,[ebx-32] - movdqa xmm0,xmm3 - pslld xmm3,12 - psrld xmm0,20 - por xmm3,xmm0 - movdqa xmm0,[ebx-96] - paddd xmm1,xmm3 - movdqa xmm6,[96+ebx] - pxor xmm7,xmm1 - movdqa [ebx-112],xmm1 - pshufb xmm7,[16+eax] - paddd xmm5,xmm7 - movdqa [80+ebx],xmm7 - pxor xmm3,xmm5 - paddd xmm0,xmm2 - movdqa xmm1,xmm3 - pslld xmm3,7 - psrld xmm1,25 - pxor xmm6,xmm0 - por xmm3,xmm1 - movdqa [16+ebx],xmm5 - pshufb xmm6,[eax] - movdqa [ebx-48],xmm3 - paddd xmm4,xmm6 - movdqa xmm5,[48+ebx] - pxor xmm2,xmm4 - movdqa xmm3,[ebx-16] - movdqa xmm1,xmm2 - pslld xmm2,12 - psrld xmm1,20 - por xmm2,xmm1 - movdqa xmm1,[ebx-80] - paddd xmm0,xmm2 - movdqa xmm7,[112+ebx] - pxor xmm6,xmm0 - movdqa [ebx-96],xmm0 - pshufb xmm6,[16+eax] - paddd xmm4,xmm6 - movdqa [96+ebx],xmm6 - pxor xmm2,xmm4 - paddd xmm1,xmm3 - movdqa xmm0,xmm2 - pslld xmm2,7 - psrld xmm0,25 - pxor xmm7,xmm1 - por xmm2,xmm0 - pshufb xmm7,[eax] - movdqa [ebx-32],xmm2 - paddd xmm5,xmm7 - pxor xmm3,xmm5 - movdqa xmm2,[ebx-48] - movdqa xmm0,xmm3 - pslld xmm3,12 - psrld xmm0,20 - por xmm3,xmm0 - movdqa xmm0,[ebx-128] - paddd xmm1,xmm3 - pxor xmm7,xmm1 - movdqa [ebx-80],xmm1 - pshufb xmm7,[16+eax] - paddd xmm5,xmm7 - movdqa xmm6,xmm7 - pxor xmm3,xmm5 - paddd xmm0,xmm2 - movdqa xmm1,xmm3 - pslld xmm3,7 - psrld xmm1,25 - pxor xmm6,xmm0 - por xmm3,xmm1 - pshufb xmm6,[eax] - movdqa [ebx-16],xmm3 - paddd xmm4,xmm6 - pxor xmm2,xmm4 - movdqa xmm3,[ebx-32] - movdqa xmm1,xmm2 - pslld xmm2,12 - psrld xmm1,20 - por xmm2,xmm1 - movdqa xmm1,[ebx-112] - paddd xmm0,xmm2 - movdqa xmm7,[64+ebx] - pxor xmm6,xmm0 - movdqa [ebx-128],xmm0 - pshufb xmm6,[16+eax] - paddd xmm4,xmm6 - movdqa [112+ebx],xmm6 - pxor xmm2,xmm4 - paddd xmm1,xmm3 - movdqa xmm0,xmm2 - pslld xmm2,7 - psrld xmm0,25 - pxor xmm7,xmm1 - por xmm2,xmm0 - movdqa [32+ebx],xmm4 - pshufb xmm7,[eax] - movdqa [ebx-48],xmm2 - paddd xmm5,xmm7 - movdqa xmm4,[ebx] - pxor xmm3,xmm5 - movdqa xmm2,[ebx-16] - movdqa xmm0,xmm3 - pslld xmm3,12 - psrld xmm0,20 - por xmm3,xmm0 - movdqa xmm0,[ebx-96] - paddd xmm1,xmm3 - movdqa xmm6,[80+ebx] - pxor xmm7,xmm1 - movdqa [ebx-112],xmm1 - pshufb xmm7,[16+eax] - paddd xmm5,xmm7 - movdqa [64+ebx],xmm7 - pxor xmm3,xmm5 - paddd xmm0,xmm2 - movdqa xmm1,xmm3 - pslld xmm3,7 - psrld xmm1,25 - pxor xmm6,xmm0 - por xmm3,xmm1 - movdqa [48+ebx],xmm5 - pshufb xmm6,[eax] - movdqa [ebx-32],xmm3 - paddd xmm4,xmm6 - movdqa xmm5,[16+ebx] - pxor xmm2,xmm4 - movdqa xmm3,[ebx-64] - movdqa xmm1,xmm2 - pslld xmm2,12 - psrld xmm1,20 - por xmm2,xmm1 - movdqa xmm1,[ebx-80] - paddd xmm0,xmm2 - movdqa xmm7,[96+ebx] - pxor xmm6,xmm0 - movdqa [ebx-96],xmm0 - pshufb xmm6,[16+eax] - paddd xmm4,xmm6 - movdqa [80+ebx],xmm6 - pxor xmm2,xmm4 - paddd xmm1,xmm3 - movdqa xmm0,xmm2 - pslld xmm2,7 - psrld xmm0,25 - pxor xmm7,xmm1 - por xmm2,xmm0 - pshufb xmm7,[eax] - movdqa [ebx-16],xmm2 - paddd xmm5,xmm7 - pxor xmm3,xmm5 - movdqa xmm0,xmm3 - pslld xmm3,12 - psrld xmm0,20 - por xmm3,xmm0 - movdqa xmm0,[ebx-128] - paddd xmm1,xmm3 - movdqa xmm6,[64+ebx] - pxor xmm7,xmm1 - movdqa [ebx-80],xmm1 - pshufb xmm7,[16+eax] - paddd xmm5,xmm7 - movdqa [96+ebx],xmm7 - pxor xmm3,xmm5 - movdqa xmm1,xmm3 - pslld xmm3,7 - psrld xmm1,25 - por xmm3,xmm1 - dec edx - jnz NEAR L$010loop - movdqa [ebx-64],xmm3 - movdqa [ebx],xmm4 - movdqa [16+ebx],xmm5 - movdqa [64+ebx],xmm6 - movdqa [96+ebx],xmm7 - movdqa xmm1,[ebx-112] - movdqa xmm2,[ebx-96] - movdqa xmm3,[ebx-80] - paddd xmm0,[ebp-128] - paddd xmm1,[ebp-112] - paddd xmm2,[ebp-96] - paddd xmm3,[ebp-80] - movdqa xmm6,xmm0 - punpckldq xmm0,xmm1 - movdqa xmm7,xmm2 - punpckldq xmm2,xmm3 - punpckhdq xmm6,xmm1 - punpckhdq xmm7,xmm3 - movdqa xmm1,xmm0 - punpcklqdq xmm0,xmm2 - movdqa xmm3,xmm6 - punpcklqdq xmm6,xmm7 - punpckhqdq xmm1,xmm2 - punpckhqdq xmm3,xmm7 - movdqu xmm4,[esi-128] - movdqu xmm5,[esi-64] - movdqu xmm2,[esi] - movdqu xmm7,[64+esi] - lea esi,[16+esi] - pxor xmm4,xmm0 - movdqa xmm0,[ebx-64] - pxor xmm5,xmm1 - movdqa xmm1,[ebx-48] - pxor xmm6,xmm2 - movdqa xmm2,[ebx-32] - pxor xmm7,xmm3 - movdqa xmm3,[ebx-16] - movdqu [edi-128],xmm4 - movdqu [edi-64],xmm5 - movdqu [edi],xmm6 - movdqu [64+edi],xmm7 - lea edi,[16+edi] - paddd xmm0,[ebp-64] - paddd xmm1,[ebp-48] - paddd xmm2,[ebp-32] - paddd xmm3,[ebp-16] - movdqa xmm6,xmm0 - punpckldq xmm0,xmm1 - movdqa xmm7,xmm2 - punpckldq xmm2,xmm3 - punpckhdq xmm6,xmm1 - punpckhdq xmm7,xmm3 - movdqa xmm1,xmm0 - punpcklqdq xmm0,xmm2 - movdqa xmm3,xmm6 - punpcklqdq xmm6,xmm7 - punpckhqdq xmm1,xmm2 - punpckhqdq xmm3,xmm7 - movdqu xmm4,[esi-128] - movdqu xmm5,[esi-64] - movdqu xmm2,[esi] - movdqu xmm7,[64+esi] - lea esi,[16+esi] - pxor xmm4,xmm0 - movdqa xmm0,[ebx] - pxor xmm5,xmm1 - movdqa xmm1,[16+ebx] - pxor xmm6,xmm2 - movdqa xmm2,[32+ebx] - pxor xmm7,xmm3 - movdqa xmm3,[48+ebx] - movdqu [edi-128],xmm4 - movdqu [edi-64],xmm5 - movdqu [edi],xmm6 - movdqu [64+edi],xmm7 - lea edi,[16+edi] - paddd xmm0,[ebp] - paddd xmm1,[16+ebp] - paddd xmm2,[32+ebp] - paddd xmm3,[48+ebp] - movdqa xmm6,xmm0 - punpckldq xmm0,xmm1 - movdqa xmm7,xmm2 - punpckldq xmm2,xmm3 - punpckhdq xmm6,xmm1 - punpckhdq xmm7,xmm3 - movdqa xmm1,xmm0 - punpcklqdq xmm0,xmm2 - movdqa xmm3,xmm6 - punpcklqdq xmm6,xmm7 - punpckhqdq xmm1,xmm2 - punpckhqdq xmm3,xmm7 - movdqu xmm4,[esi-128] - movdqu xmm5,[esi-64] - movdqu xmm2,[esi] - movdqu xmm7,[64+esi] - lea esi,[16+esi] - pxor xmm4,xmm0 - movdqa xmm0,[64+ebx] - pxor xmm5,xmm1 - movdqa xmm1,[80+ebx] - pxor xmm6,xmm2 - movdqa xmm2,[96+ebx] - pxor xmm7,xmm3 - movdqa xmm3,[112+ebx] - movdqu [edi-128],xmm4 - movdqu [edi-64],xmm5 - movdqu [edi],xmm6 - movdqu [64+edi],xmm7 - lea edi,[16+edi] - paddd xmm0,[64+ebp] - paddd xmm1,[80+ebp] - paddd xmm2,[96+ebp] - paddd xmm3,[112+ebp] - movdqa xmm6,xmm0 - punpckldq xmm0,xmm1 - movdqa xmm7,xmm2 - punpckldq xmm2,xmm3 - punpckhdq xmm6,xmm1 - punpckhdq xmm7,xmm3 - movdqa xmm1,xmm0 - punpcklqdq xmm0,xmm2 - movdqa xmm3,xmm6 - punpcklqdq xmm6,xmm7 - punpckhqdq xmm1,xmm2 - punpckhqdq xmm3,xmm7 - movdqu xmm4,[esi-128] - movdqu xmm5,[esi-64] - movdqu xmm2,[esi] - movdqu xmm7,[64+esi] - lea esi,[208+esi] - pxor xmm4,xmm0 - pxor xmm5,xmm1 - pxor xmm6,xmm2 - pxor xmm7,xmm3 - movdqu [edi-128],xmm4 - movdqu [edi-64],xmm5 - movdqu [edi],xmm6 - movdqu [64+edi],xmm7 - lea edi,[208+edi] - sub ecx,256 - jnc NEAR L$009outer_loop - add ecx,256 - jz NEAR L$011done - mov ebx,DWORD [520+esp] - lea esi,[esi-128] - mov edx,DWORD [516+esp] - lea edi,[edi-128] - movd xmm2,DWORD [64+ebp] - movdqu xmm3,[ebx] - paddd xmm2,[96+eax] - pand xmm3,[112+eax] - por xmm3,xmm2 -L$0081x: - movdqa xmm0,[32+eax] - movdqu xmm1,[edx] - movdqu xmm2,[16+edx] - movdqa xmm6,[eax] - movdqa xmm7,[16+eax] - mov DWORD [48+esp],ebp - movdqa [esp],xmm0 - movdqa [16+esp],xmm1 - movdqa [32+esp],xmm2 - movdqa [48+esp],xmm3 - mov edx,10 - jmp NEAR L$012loop1x -align 16 -L$013outer1x: - movdqa xmm3,[80+eax] - movdqa xmm0,[esp] - movdqa xmm1,[16+esp] - movdqa xmm2,[32+esp] - paddd xmm3,[48+esp] - mov edx,10 - movdqa [48+esp],xmm3 - jmp NEAR L$012loop1x -align 16 -L$012loop1x: - paddd xmm0,xmm1 - pxor xmm3,xmm0 -db 102,15,56,0,222 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,20 - pslld xmm4,12 - por xmm1,xmm4 - paddd xmm0,xmm1 - pxor xmm3,xmm0 -db 102,15,56,0,223 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,25 - pslld xmm4,7 - por xmm1,xmm4 - pshufd xmm2,xmm2,78 - pshufd xmm1,xmm1,57 - pshufd xmm3,xmm3,147 - nop - paddd xmm0,xmm1 - pxor xmm3,xmm0 -db 102,15,56,0,222 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,20 - pslld xmm4,12 - por xmm1,xmm4 - paddd xmm0,xmm1 - pxor xmm3,xmm0 -db 102,15,56,0,223 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,25 - pslld xmm4,7 - por xmm1,xmm4 - pshufd xmm2,xmm2,78 - pshufd xmm1,xmm1,147 - pshufd xmm3,xmm3,57 - dec edx - jnz NEAR L$012loop1x - paddd xmm0,[esp] - paddd xmm1,[16+esp] - paddd xmm2,[32+esp] - paddd xmm3,[48+esp] - cmp ecx,64 - jb NEAR L$014tail - movdqu xmm4,[esi] - movdqu xmm5,[16+esi] - pxor xmm0,xmm4 - movdqu xmm4,[32+esi] - pxor xmm1,xmm5 - movdqu xmm5,[48+esi] - pxor xmm2,xmm4 - pxor xmm3,xmm5 - lea esi,[64+esi] - movdqu [edi],xmm0 - movdqu [16+edi],xmm1 - movdqu [32+edi],xmm2 - movdqu [48+edi],xmm3 - lea edi,[64+edi] - sub ecx,64 - jnz NEAR L$013outer1x - jmp NEAR L$011done -L$014tail: - movdqa [esp],xmm0 - movdqa [16+esp],xmm1 - movdqa [32+esp],xmm2 - movdqa [48+esp],xmm3 - xor eax,eax - xor edx,edx - xor ebp,ebp -L$015tail_loop: - mov al,BYTE [ebp*1+esp] - mov dl,BYTE [ebp*1+esi] - lea ebp,[1+ebp] - xor al,dl - mov BYTE [ebp*1+edi-1],al - dec ecx - jnz NEAR L$015tail_loop -L$011done: - mov esp,DWORD [512+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -align 64 -L$ssse3_data: -db 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 -db 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 -dd 1634760805,857760878,2036477234,1797285236 -dd 0,1,2,3 -dd 4,4,4,4 -dd 1,0,0,0 -dd 4,0,0,0 -dd 0,-1,-1,-1 -align 64 -db 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 -db 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 -db 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 -db 114,103,62,0 -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/aes-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/aes-586.asm deleted file mode 100644 index c3a47d88f2..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/aes-586.asm +++ /dev/null @@ -1,3225 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -align 16 -__x86_AES_encrypt_compact: - mov DWORD [20+esp],edi - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [24+esp],esi - mov edi,DWORD [ebp-128] - mov esi,DWORD [ebp-96] - mov edi,DWORD [ebp-64] - mov esi,DWORD [ebp-32] - mov edi,DWORD [ebp] - mov esi,DWORD [32+ebp] - mov edi,DWORD [64+ebp] - mov esi,DWORD [96+ebp] -align 16 -L$000loop: - mov esi,eax - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,bh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,ecx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,edx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - shr ebx,16 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,ch - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,edx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,eax - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - shr ecx,24 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,dh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,eax - shr edi,16 - and edx,255 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - movzx edi,bh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - and edx,255 - movzx edx,BYTE [edx*1+ebp-128] - movzx eax,ah - movzx eax,BYTE [eax*1+ebp-128] - shl eax,8 - xor edx,eax - mov eax,DWORD [4+esp] - and ebx,255 - movzx ebx,BYTE [ebx*1+ebp-128] - shl ebx,16 - xor edx,ebx - mov ebx,DWORD [8+esp] - movzx ecx,BYTE [ecx*1+ebp-128] - shl ecx,24 - xor edx,ecx - mov ecx,esi - mov ebp,2155905152 - and ebp,ecx - lea edi,[ecx*1+ecx] - mov esi,ebp - shr ebp,7 - and edi,4278124286 - sub esi,ebp - mov ebp,ecx - and esi,454761243 - ror ebp,16 - xor esi,edi - mov edi,ecx - xor ecx,esi - ror edi,24 - xor esi,ebp - rol ecx,24 - xor esi,edi - mov ebp,2155905152 - xor ecx,esi - and ebp,edx - lea edi,[edx*1+edx] - mov esi,ebp - shr ebp,7 - and edi,4278124286 - sub esi,ebp - mov ebp,edx - and esi,454761243 - ror ebp,16 - xor esi,edi - mov edi,edx - xor edx,esi - ror edi,24 - xor esi,ebp - rol edx,24 - xor esi,edi - mov ebp,2155905152 - xor edx,esi - and ebp,eax - lea edi,[eax*1+eax] - mov esi,ebp - shr ebp,7 - and edi,4278124286 - sub esi,ebp - mov ebp,eax - and esi,454761243 - ror ebp,16 - xor esi,edi - mov edi,eax - xor eax,esi - ror edi,24 - xor esi,ebp - rol eax,24 - xor esi,edi - mov ebp,2155905152 - xor eax,esi - and ebp,ebx - lea edi,[ebx*1+ebx] - mov esi,ebp - shr ebp,7 - and edi,4278124286 - sub esi,ebp - mov ebp,ebx - and esi,454761243 - ror ebp,16 - xor esi,edi - mov edi,ebx - xor ebx,esi - ror edi,24 - xor esi,ebp - rol ebx,24 - xor esi,edi - xor ebx,esi - mov edi,DWORD [20+esp] - mov ebp,DWORD [28+esp] - add edi,16 - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - cmp edi,DWORD [24+esp] - mov DWORD [20+esp],edi - jb NEAR L$000loop - mov esi,eax - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,bh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,ecx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,edx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - shr ebx,16 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,ch - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,edx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,eax - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - shr ecx,24 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,dh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,eax - shr edi,16 - and edx,255 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - movzx edi,bh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov edi,DWORD [20+esp] - and edx,255 - movzx edx,BYTE [edx*1+ebp-128] - movzx eax,ah - movzx eax,BYTE [eax*1+ebp-128] - shl eax,8 - xor edx,eax - mov eax,DWORD [4+esp] - and ebx,255 - movzx ebx,BYTE [ebx*1+ebp-128] - shl ebx,16 - xor edx,ebx - mov ebx,DWORD [8+esp] - movzx ecx,BYTE [ecx*1+ebp-128] - shl ecx,24 - xor edx,ecx - mov ecx,esi - xor eax,DWORD [16+edi] - xor ebx,DWORD [20+edi] - xor ecx,DWORD [24+edi] - xor edx,DWORD [28+edi] - ret -align 16 -__sse_AES_encrypt_compact: - pxor mm0,[edi] - pxor mm4,[8+edi] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [24+esp],esi - mov eax,454761243 - mov DWORD [8+esp],eax - mov DWORD [12+esp],eax - mov eax,DWORD [ebp-128] - mov ebx,DWORD [ebp-96] - mov ecx,DWORD [ebp-64] - mov edx,DWORD [ebp-32] - mov eax,DWORD [ebp] - mov ebx,DWORD [32+ebp] - mov ecx,DWORD [64+ebp] - mov edx,DWORD [96+ebp] -align 16 -L$001loop: - pshufw mm1,mm0,8 - pshufw mm5,mm4,13 - movd eax,mm1 - movd ebx,mm5 - mov DWORD [20+esp],edi - movzx esi,al - movzx edx,ah - pshufw mm2,mm0,13 - movzx ecx,BYTE [esi*1+ebp-128] - movzx edi,bl - movzx edx,BYTE [edx*1+ebp-128] - shr eax,16 - shl edx,8 - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shl esi,16 - pshufw mm6,mm4,8 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,ah - shl esi,24 - shr ebx,16 - or edx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shl esi,8 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,al - shl esi,24 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bl - movd eax,mm2 - movd mm0,ecx - movzx ecx,BYTE [edi*1+ebp-128] - movzx edi,ah - shl ecx,16 - movd ebx,mm6 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shl esi,24 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bl - shl esi,8 - shr ebx,16 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,al - shr eax,16 - movd mm1,ecx - movzx ecx,BYTE [edi*1+ebp-128] - movzx edi,ah - shl ecx,16 - and eax,255 - or ecx,esi - punpckldq mm0,mm1 - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shl esi,24 - and ebx,255 - movzx eax,BYTE [eax*1+ebp-128] - or ecx,esi - shl eax,16 - movzx esi,BYTE [edi*1+ebp-128] - or edx,eax - shl esi,8 - movzx ebx,BYTE [ebx*1+ebp-128] - or ecx,esi - or edx,ebx - mov edi,DWORD [20+esp] - movd mm4,ecx - movd mm5,edx - punpckldq mm4,mm5 - add edi,16 - cmp edi,DWORD [24+esp] - ja NEAR L$002out - movq mm2,[8+esp] - pxor mm3,mm3 - pxor mm7,mm7 - movq mm1,mm0 - movq mm5,mm4 - pcmpgtb mm3,mm0 - pcmpgtb mm7,mm4 - pand mm3,mm2 - pand mm7,mm2 - pshufw mm2,mm0,177 - pshufw mm6,mm4,177 - paddb mm0,mm0 - paddb mm4,mm4 - pxor mm0,mm3 - pxor mm4,mm7 - pshufw mm3,mm2,177 - pshufw mm7,mm6,177 - pxor mm1,mm0 - pxor mm5,mm4 - pxor mm0,mm2 - pxor mm4,mm6 - movq mm2,mm3 - movq mm6,mm7 - pslld mm3,8 - pslld mm7,8 - psrld mm2,24 - psrld mm6,24 - pxor mm0,mm3 - pxor mm4,mm7 - pxor mm0,mm2 - pxor mm4,mm6 - movq mm3,mm1 - movq mm7,mm5 - movq mm2,[edi] - movq mm6,[8+edi] - psrld mm1,8 - psrld mm5,8 - mov eax,DWORD [ebp-128] - pslld mm3,24 - pslld mm7,24 - mov ebx,DWORD [ebp-64] - pxor mm0,mm1 - pxor mm4,mm5 - mov ecx,DWORD [ebp] - pxor mm0,mm3 - pxor mm4,mm7 - mov edx,DWORD [64+ebp] - pxor mm0,mm2 - pxor mm4,mm6 - jmp NEAR L$001loop -align 16 -L$002out: - pxor mm0,[edi] - pxor mm4,[8+edi] - ret -align 16 -__x86_AES_encrypt: - mov DWORD [20+esp],edi - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [24+esp],esi -align 16 -L$003loop: - mov esi,eax - and esi,255 - mov esi,DWORD [esi*8+ebp] - movzx edi,bh - xor esi,DWORD [3+edi*8+ebp] - mov edi,ecx - shr edi,16 - and edi,255 - xor esi,DWORD [2+edi*8+ebp] - mov edi,edx - shr edi,24 - xor esi,DWORD [1+edi*8+ebp] - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - shr ebx,16 - mov esi,DWORD [esi*8+ebp] - movzx edi,ch - xor esi,DWORD [3+edi*8+ebp] - mov edi,edx - shr edi,16 - and edi,255 - xor esi,DWORD [2+edi*8+ebp] - mov edi,eax - shr edi,24 - xor esi,DWORD [1+edi*8+ebp] - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - shr ecx,24 - mov esi,DWORD [esi*8+ebp] - movzx edi,dh - xor esi,DWORD [3+edi*8+ebp] - mov edi,eax - shr edi,16 - and edx,255 - and edi,255 - xor esi,DWORD [2+edi*8+ebp] - movzx edi,bh - xor esi,DWORD [1+edi*8+ebp] - mov edi,DWORD [20+esp] - mov edx,DWORD [edx*8+ebp] - movzx eax,ah - xor edx,DWORD [3+eax*8+ebp] - mov eax,DWORD [4+esp] - and ebx,255 - xor edx,DWORD [2+ebx*8+ebp] - mov ebx,DWORD [8+esp] - xor edx,DWORD [1+ecx*8+ebp] - mov ecx,esi - add edi,16 - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - cmp edi,DWORD [24+esp] - mov DWORD [20+esp],edi - jb NEAR L$003loop - mov esi,eax - and esi,255 - mov esi,DWORD [2+esi*8+ebp] - and esi,255 - movzx edi,bh - mov edi,DWORD [edi*8+ebp] - and edi,65280 - xor esi,edi - mov edi,ecx - shr edi,16 - and edi,255 - mov edi,DWORD [edi*8+ebp] - and edi,16711680 - xor esi,edi - mov edi,edx - shr edi,24 - mov edi,DWORD [2+edi*8+ebp] - and edi,4278190080 - xor esi,edi - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - shr ebx,16 - mov esi,DWORD [2+esi*8+ebp] - and esi,255 - movzx edi,ch - mov edi,DWORD [edi*8+ebp] - and edi,65280 - xor esi,edi - mov edi,edx - shr edi,16 - and edi,255 - mov edi,DWORD [edi*8+ebp] - and edi,16711680 - xor esi,edi - mov edi,eax - shr edi,24 - mov edi,DWORD [2+edi*8+ebp] - and edi,4278190080 - xor esi,edi - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - shr ecx,24 - mov esi,DWORD [2+esi*8+ebp] - and esi,255 - movzx edi,dh - mov edi,DWORD [edi*8+ebp] - and edi,65280 - xor esi,edi - mov edi,eax - shr edi,16 - and edx,255 - and edi,255 - mov edi,DWORD [edi*8+ebp] - and edi,16711680 - xor esi,edi - movzx edi,bh - mov edi,DWORD [2+edi*8+ebp] - and edi,4278190080 - xor esi,edi - mov edi,DWORD [20+esp] - and edx,255 - mov edx,DWORD [2+edx*8+ebp] - and edx,255 - movzx eax,ah - mov eax,DWORD [eax*8+ebp] - and eax,65280 - xor edx,eax - mov eax,DWORD [4+esp] - and ebx,255 - mov ebx,DWORD [ebx*8+ebp] - and ebx,16711680 - xor edx,ebx - mov ebx,DWORD [8+esp] - mov ecx,DWORD [2+ecx*8+ebp] - and ecx,4278190080 - xor edx,ecx - mov ecx,esi - add edi,16 - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - ret -align 64 -L$AES_Te: -dd 2774754246,2774754246 -dd 2222750968,2222750968 -dd 2574743534,2574743534 -dd 2373680118,2373680118 -dd 234025727,234025727 -dd 3177933782,3177933782 -dd 2976870366,2976870366 -dd 1422247313,1422247313 -dd 1345335392,1345335392 -dd 50397442,50397442 -dd 2842126286,2842126286 -dd 2099981142,2099981142 -dd 436141799,436141799 -dd 1658312629,1658312629 -dd 3870010189,3870010189 -dd 2591454956,2591454956 -dd 1170918031,1170918031 -dd 2642575903,2642575903 -dd 1086966153,1086966153 -dd 2273148410,2273148410 -dd 368769775,368769775 -dd 3948501426,3948501426 -dd 3376891790,3376891790 -dd 200339707,200339707 -dd 3970805057,3970805057 -dd 1742001331,1742001331 -dd 4255294047,4255294047 -dd 3937382213,3937382213 -dd 3214711843,3214711843 -dd 4154762323,4154762323 -dd 2524082916,2524082916 -dd 1539358875,1539358875 -dd 3266819957,3266819957 -dd 486407649,486407649 -dd 2928907069,2928907069 -dd 1780885068,1780885068 -dd 1513502316,1513502316 -dd 1094664062,1094664062 -dd 49805301,49805301 -dd 1338821763,1338821763 -dd 1546925160,1546925160 -dd 4104496465,4104496465 -dd 887481809,887481809 -dd 150073849,150073849 -dd 2473685474,2473685474 -dd 1943591083,1943591083 -dd 1395732834,1395732834 -dd 1058346282,1058346282 -dd 201589768,201589768 -dd 1388824469,1388824469 -dd 1696801606,1696801606 -dd 1589887901,1589887901 -dd 672667696,672667696 -dd 2711000631,2711000631 -dd 251987210,251987210 -dd 3046808111,3046808111 -dd 151455502,151455502 -dd 907153956,907153956 -dd 2608889883,2608889883 -dd 1038279391,1038279391 -dd 652995533,652995533 -dd 1764173646,1764173646 -dd 3451040383,3451040383 -dd 2675275242,2675275242 -dd 453576978,453576978 -dd 2659418909,2659418909 -dd 1949051992,1949051992 -dd 773462580,773462580 -dd 756751158,756751158 -dd 2993581788,2993581788 -dd 3998898868,3998898868 -dd 4221608027,4221608027 -dd 4132590244,4132590244 -dd 1295727478,1295727478 -dd 1641469623,1641469623 -dd 3467883389,3467883389 -dd 2066295122,2066295122 -dd 1055122397,1055122397 -dd 1898917726,1898917726 -dd 2542044179,2542044179 -dd 4115878822,4115878822 -dd 1758581177,1758581177 -dd 0,0 -dd 753790401,753790401 -dd 1612718144,1612718144 -dd 536673507,536673507 -dd 3367088505,3367088505 -dd 3982187446,3982187446 -dd 3194645204,3194645204 -dd 1187761037,1187761037 -dd 3653156455,3653156455 -dd 1262041458,1262041458 -dd 3729410708,3729410708 -dd 3561770136,3561770136 -dd 3898103984,3898103984 -dd 1255133061,1255133061 -dd 1808847035,1808847035 -dd 720367557,720367557 -dd 3853167183,3853167183 -dd 385612781,385612781 -dd 3309519750,3309519750 -dd 3612167578,3612167578 -dd 1429418854,1429418854 -dd 2491778321,2491778321 -dd 3477423498,3477423498 -dd 284817897,284817897 -dd 100794884,100794884 -dd 2172616702,2172616702 -dd 4031795360,4031795360 -dd 1144798328,1144798328 -dd 3131023141,3131023141 -dd 3819481163,3819481163 -dd 4082192802,4082192802 -dd 4272137053,4272137053 -dd 3225436288,3225436288 -dd 2324664069,2324664069 -dd 2912064063,2912064063 -dd 3164445985,3164445985 -dd 1211644016,1211644016 -dd 83228145,83228145 -dd 3753688163,3753688163 -dd 3249976951,3249976951 -dd 1977277103,1977277103 -dd 1663115586,1663115586 -dd 806359072,806359072 -dd 452984805,452984805 -dd 250868733,250868733 -dd 1842533055,1842533055 -dd 1288555905,1288555905 -dd 336333848,336333848 -dd 890442534,890442534 -dd 804056259,804056259 -dd 3781124030,3781124030 -dd 2727843637,2727843637 -dd 3427026056,3427026056 -dd 957814574,957814574 -dd 1472513171,1472513171 -dd 4071073621,4071073621 -dd 2189328124,2189328124 -dd 1195195770,1195195770 -dd 2892260552,2892260552 -dd 3881655738,3881655738 -dd 723065138,723065138 -dd 2507371494,2507371494 -dd 2690670784,2690670784 -dd 2558624025,2558624025 -dd 3511635870,3511635870 -dd 2145180835,2145180835 -dd 1713513028,1713513028 -dd 2116692564,2116692564 -dd 2878378043,2878378043 -dd 2206763019,2206763019 -dd 3393603212,3393603212 -dd 703524551,703524551 -dd 3552098411,3552098411 -dd 1007948840,1007948840 -dd 2044649127,2044649127 -dd 3797835452,3797835452 -dd 487262998,487262998 -dd 1994120109,1994120109 -dd 1004593371,1004593371 -dd 1446130276,1446130276 -dd 1312438900,1312438900 -dd 503974420,503974420 -dd 3679013266,3679013266 -dd 168166924,168166924 -dd 1814307912,1814307912 -dd 3831258296,3831258296 -dd 1573044895,1573044895 -dd 1859376061,1859376061 -dd 4021070915,4021070915 -dd 2791465668,2791465668 -dd 2828112185,2828112185 -dd 2761266481,2761266481 -dd 937747667,937747667 -dd 2339994098,2339994098 -dd 854058965,854058965 -dd 1137232011,1137232011 -dd 1496790894,1496790894 -dd 3077402074,3077402074 -dd 2358086913,2358086913 -dd 1691735473,1691735473 -dd 3528347292,3528347292 -dd 3769215305,3769215305 -dd 3027004632,3027004632 -dd 4199962284,4199962284 -dd 133494003,133494003 -dd 636152527,636152527 -dd 2942657994,2942657994 -dd 2390391540,2390391540 -dd 3920539207,3920539207 -dd 403179536,403179536 -dd 3585784431,3585784431 -dd 2289596656,2289596656 -dd 1864705354,1864705354 -dd 1915629148,1915629148 -dd 605822008,605822008 -dd 4054230615,4054230615 -dd 3350508659,3350508659 -dd 1371981463,1371981463 -dd 602466507,602466507 -dd 2094914977,2094914977 -dd 2624877800,2624877800 -dd 555687742,555687742 -dd 3712699286,3712699286 -dd 3703422305,3703422305 -dd 2257292045,2257292045 -dd 2240449039,2240449039 -dd 2423288032,2423288032 -dd 1111375484,1111375484 -dd 3300242801,3300242801 -dd 2858837708,2858837708 -dd 3628615824,3628615824 -dd 84083462,84083462 -dd 32962295,32962295 -dd 302911004,302911004 -dd 2741068226,2741068226 -dd 1597322602,1597322602 -dd 4183250862,4183250862 -dd 3501832553,3501832553 -dd 2441512471,2441512471 -dd 1489093017,1489093017 -dd 656219450,656219450 -dd 3114180135,3114180135 -dd 954327513,954327513 -dd 335083755,335083755 -dd 3013122091,3013122091 -dd 856756514,856756514 -dd 3144247762,3144247762 -dd 1893325225,1893325225 -dd 2307821063,2307821063 -dd 2811532339,2811532339 -dd 3063651117,3063651117 -dd 572399164,572399164 -dd 2458355477,2458355477 -dd 552200649,552200649 -dd 1238290055,1238290055 -dd 4283782570,4283782570 -dd 2015897680,2015897680 -dd 2061492133,2061492133 -dd 2408352771,2408352771 -dd 4171342169,4171342169 -dd 2156497161,2156497161 -dd 386731290,386731290 -dd 3669999461,3669999461 -dd 837215959,837215959 -dd 3326231172,3326231172 -dd 3093850320,3093850320 -dd 3275833730,3275833730 -dd 2962856233,2962856233 -dd 1999449434,1999449434 -dd 286199582,286199582 -dd 3417354363,3417354363 -dd 4233385128,4233385128 -dd 3602627437,3602627437 -dd 974525996,974525996 -db 99,124,119,123,242,107,111,197 -db 48,1,103,43,254,215,171,118 -db 202,130,201,125,250,89,71,240 -db 173,212,162,175,156,164,114,192 -db 183,253,147,38,54,63,247,204 -db 52,165,229,241,113,216,49,21 -db 4,199,35,195,24,150,5,154 -db 7,18,128,226,235,39,178,117 -db 9,131,44,26,27,110,90,160 -db 82,59,214,179,41,227,47,132 -db 83,209,0,237,32,252,177,91 -db 106,203,190,57,74,76,88,207 -db 208,239,170,251,67,77,51,133 -db 69,249,2,127,80,60,159,168 -db 81,163,64,143,146,157,56,245 -db 188,182,218,33,16,255,243,210 -db 205,12,19,236,95,151,68,23 -db 196,167,126,61,100,93,25,115 -db 96,129,79,220,34,42,144,136 -db 70,238,184,20,222,94,11,219 -db 224,50,58,10,73,6,36,92 -db 194,211,172,98,145,149,228,121 -db 231,200,55,109,141,213,78,169 -db 108,86,244,234,101,122,174,8 -db 186,120,37,46,28,166,180,198 -db 232,221,116,31,75,189,139,138 -db 112,62,181,102,72,3,246,14 -db 97,53,87,185,134,193,29,158 -db 225,248,152,17,105,217,142,148 -db 155,30,135,233,206,85,40,223 -db 140,161,137,13,191,230,66,104 -db 65,153,45,15,176,84,187,22 -db 99,124,119,123,242,107,111,197 -db 48,1,103,43,254,215,171,118 -db 202,130,201,125,250,89,71,240 -db 173,212,162,175,156,164,114,192 -db 183,253,147,38,54,63,247,204 -db 52,165,229,241,113,216,49,21 -db 4,199,35,195,24,150,5,154 -db 7,18,128,226,235,39,178,117 -db 9,131,44,26,27,110,90,160 -db 82,59,214,179,41,227,47,132 -db 83,209,0,237,32,252,177,91 -db 106,203,190,57,74,76,88,207 -db 208,239,170,251,67,77,51,133 -db 69,249,2,127,80,60,159,168 -db 81,163,64,143,146,157,56,245 -db 188,182,218,33,16,255,243,210 -db 205,12,19,236,95,151,68,23 -db 196,167,126,61,100,93,25,115 -db 96,129,79,220,34,42,144,136 -db 70,238,184,20,222,94,11,219 -db 224,50,58,10,73,6,36,92 -db 194,211,172,98,145,149,228,121 -db 231,200,55,109,141,213,78,169 -db 108,86,244,234,101,122,174,8 -db 186,120,37,46,28,166,180,198 -db 232,221,116,31,75,189,139,138 -db 112,62,181,102,72,3,246,14 -db 97,53,87,185,134,193,29,158 -db 225,248,152,17,105,217,142,148 -db 155,30,135,233,206,85,40,223 -db 140,161,137,13,191,230,66,104 -db 65,153,45,15,176,84,187,22 -db 99,124,119,123,242,107,111,197 -db 48,1,103,43,254,215,171,118 -db 202,130,201,125,250,89,71,240 -db 173,212,162,175,156,164,114,192 -db 183,253,147,38,54,63,247,204 -db 52,165,229,241,113,216,49,21 -db 4,199,35,195,24,150,5,154 -db 7,18,128,226,235,39,178,117 -db 9,131,44,26,27,110,90,160 -db 82,59,214,179,41,227,47,132 -db 83,209,0,237,32,252,177,91 -db 106,203,190,57,74,76,88,207 -db 208,239,170,251,67,77,51,133 -db 69,249,2,127,80,60,159,168 -db 81,163,64,143,146,157,56,245 -db 188,182,218,33,16,255,243,210 -db 205,12,19,236,95,151,68,23 -db 196,167,126,61,100,93,25,115 -db 96,129,79,220,34,42,144,136 -db 70,238,184,20,222,94,11,219 -db 224,50,58,10,73,6,36,92 -db 194,211,172,98,145,149,228,121 -db 231,200,55,109,141,213,78,169 -db 108,86,244,234,101,122,174,8 -db 186,120,37,46,28,166,180,198 -db 232,221,116,31,75,189,139,138 -db 112,62,181,102,72,3,246,14 -db 97,53,87,185,134,193,29,158 -db 225,248,152,17,105,217,142,148 -db 155,30,135,233,206,85,40,223 -db 140,161,137,13,191,230,66,104 -db 65,153,45,15,176,84,187,22 -db 99,124,119,123,242,107,111,197 -db 48,1,103,43,254,215,171,118 -db 202,130,201,125,250,89,71,240 -db 173,212,162,175,156,164,114,192 -db 183,253,147,38,54,63,247,204 -db 52,165,229,241,113,216,49,21 -db 4,199,35,195,24,150,5,154 -db 7,18,128,226,235,39,178,117 -db 9,131,44,26,27,110,90,160 -db 82,59,214,179,41,227,47,132 -db 83,209,0,237,32,252,177,91 -db 106,203,190,57,74,76,88,207 -db 208,239,170,251,67,77,51,133 -db 69,249,2,127,80,60,159,168 -db 81,163,64,143,146,157,56,245 -db 188,182,218,33,16,255,243,210 -db 205,12,19,236,95,151,68,23 -db 196,167,126,61,100,93,25,115 -db 96,129,79,220,34,42,144,136 -db 70,238,184,20,222,94,11,219 -db 224,50,58,10,73,6,36,92 -db 194,211,172,98,145,149,228,121 -db 231,200,55,109,141,213,78,169 -db 108,86,244,234,101,122,174,8 -db 186,120,37,46,28,166,180,198 -db 232,221,116,31,75,189,139,138 -db 112,62,181,102,72,3,246,14 -db 97,53,87,185,134,193,29,158 -db 225,248,152,17,105,217,142,148 -db 155,30,135,233,206,85,40,223 -db 140,161,137,13,191,230,66,104 -db 65,153,45,15,176,84,187,22 -dd 1,2,4,8 -dd 16,32,64,128 -dd 27,54,0,0 -dd 0,0,0,0 -global _aes_nohw_encrypt -align 16 -_aes_nohw_encrypt: -L$_aes_nohw_encrypt_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [28+esp] - mov eax,esp - sub esp,36 - and esp,-64 - lea ebx,[edi-127] - sub ebx,esp - neg ebx - and ebx,960 - sub esp,ebx - add esp,4 - mov DWORD [28+esp],eax - call L$004pic_point -L$004pic_point: - pop ebp - lea eax,[_OPENSSL_ia32cap_P] - lea ebp,[(L$AES_Te-L$004pic_point)+ebp] - lea ebx,[764+esp] - sub ebx,ebp - and ebx,768 - lea ebp,[2176+ebx*1+ebp] - bt DWORD [eax],25 - jnc NEAR L$005x86 - movq mm0,[esi] - movq mm4,[8+esi] - call __sse_AES_encrypt_compact - mov esp,DWORD [28+esp] - mov esi,DWORD [24+esp] - movq [esi],mm0 - movq [8+esi],mm4 - emms - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -L$005x86: - mov DWORD [24+esp],ebp - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - call __x86_AES_encrypt_compact - mov esp,DWORD [28+esp] - mov esi,DWORD [24+esp] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -__x86_AES_decrypt_compact: - mov DWORD [20+esp],edi - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [24+esp],esi - mov edi,DWORD [ebp-128] - mov esi,DWORD [ebp-96] - mov edi,DWORD [ebp-64] - mov esi,DWORD [ebp-32] - mov edi,DWORD [ebp] - mov esi,DWORD [32+ebp] - mov edi,DWORD [64+ebp] - mov esi,DWORD [96+ebp] -align 16 -L$006loop: - mov esi,eax - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,dh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,ecx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,ebx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,ah - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,edx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,ecx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,bh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,eax - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,edx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - and edx,255 - movzx edx,BYTE [edx*1+ebp-128] - movzx ecx,ch - movzx ecx,BYTE [ecx*1+ebp-128] - shl ecx,8 - xor edx,ecx - mov ecx,esi - shr ebx,16 - and ebx,255 - movzx ebx,BYTE [ebx*1+ebp-128] - shl ebx,16 - xor edx,ebx - shr eax,24 - movzx eax,BYTE [eax*1+ebp-128] - shl eax,24 - xor edx,eax - mov edi,2155905152 - and edi,ecx - mov esi,edi - shr edi,7 - lea eax,[ecx*1+ecx] - sub esi,edi - and eax,4278124286 - and esi,454761243 - xor eax,esi - mov edi,2155905152 - and edi,eax - mov esi,edi - shr edi,7 - lea ebx,[eax*1+eax] - sub esi,edi - and ebx,4278124286 - and esi,454761243 - xor eax,ecx - xor ebx,esi - mov edi,2155905152 - and edi,ebx - mov esi,edi - shr edi,7 - lea ebp,[ebx*1+ebx] - sub esi,edi - and ebp,4278124286 - and esi,454761243 - xor ebx,ecx - rol ecx,8 - xor ebp,esi - xor ecx,eax - xor eax,ebp - xor ecx,ebx - xor ebx,ebp - rol eax,24 - xor ecx,ebp - rol ebx,16 - xor ecx,eax - rol ebp,8 - xor ecx,ebx - mov eax,DWORD [4+esp] - xor ecx,ebp - mov DWORD [12+esp],ecx - mov edi,2155905152 - and edi,edx - mov esi,edi - shr edi,7 - lea ebx,[edx*1+edx] - sub esi,edi - and ebx,4278124286 - and esi,454761243 - xor ebx,esi - mov edi,2155905152 - and edi,ebx - mov esi,edi - shr edi,7 - lea ecx,[ebx*1+ebx] - sub esi,edi - and ecx,4278124286 - and esi,454761243 - xor ebx,edx - xor ecx,esi - mov edi,2155905152 - and edi,ecx - mov esi,edi - shr edi,7 - lea ebp,[ecx*1+ecx] - sub esi,edi - and ebp,4278124286 - and esi,454761243 - xor ecx,edx - rol edx,8 - xor ebp,esi - xor edx,ebx - xor ebx,ebp - xor edx,ecx - xor ecx,ebp - rol ebx,24 - xor edx,ebp - rol ecx,16 - xor edx,ebx - rol ebp,8 - xor edx,ecx - mov ebx,DWORD [8+esp] - xor edx,ebp - mov DWORD [16+esp],edx - mov edi,2155905152 - and edi,eax - mov esi,edi - shr edi,7 - lea ecx,[eax*1+eax] - sub esi,edi - and ecx,4278124286 - and esi,454761243 - xor ecx,esi - mov edi,2155905152 - and edi,ecx - mov esi,edi - shr edi,7 - lea edx,[ecx*1+ecx] - sub esi,edi - and edx,4278124286 - and esi,454761243 - xor ecx,eax - xor edx,esi - mov edi,2155905152 - and edi,edx - mov esi,edi - shr edi,7 - lea ebp,[edx*1+edx] - sub esi,edi - and ebp,4278124286 - and esi,454761243 - xor edx,eax - rol eax,8 - xor ebp,esi - xor eax,ecx - xor ecx,ebp - xor eax,edx - xor edx,ebp - rol ecx,24 - xor eax,ebp - rol edx,16 - xor eax,ecx - rol ebp,8 - xor eax,edx - xor eax,ebp - mov edi,2155905152 - and edi,ebx - mov esi,edi - shr edi,7 - lea ecx,[ebx*1+ebx] - sub esi,edi - and ecx,4278124286 - and esi,454761243 - xor ecx,esi - mov edi,2155905152 - and edi,ecx - mov esi,edi - shr edi,7 - lea edx,[ecx*1+ecx] - sub esi,edi - and edx,4278124286 - and esi,454761243 - xor ecx,ebx - xor edx,esi - mov edi,2155905152 - and edi,edx - mov esi,edi - shr edi,7 - lea ebp,[edx*1+edx] - sub esi,edi - and ebp,4278124286 - and esi,454761243 - xor edx,ebx - rol ebx,8 - xor ebp,esi - xor ebx,ecx - xor ecx,ebp - xor ebx,edx - xor edx,ebp - rol ecx,24 - xor ebx,ebp - rol edx,16 - xor ebx,ecx - rol ebp,8 - xor ebx,edx - mov ecx,DWORD [12+esp] - xor ebx,ebp - mov edx,DWORD [16+esp] - mov edi,DWORD [20+esp] - mov ebp,DWORD [28+esp] - add edi,16 - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - cmp edi,DWORD [24+esp] - mov DWORD [20+esp],edi - jb NEAR L$006loop - mov esi,eax - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,dh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,ecx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,ebx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,ah - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,edx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,ecx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - movzx esi,BYTE [esi*1+ebp-128] - movzx edi,bh - movzx edi,BYTE [edi*1+ebp-128] - shl edi,8 - xor esi,edi - mov edi,eax - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,16 - xor esi,edi - mov edi,edx - shr edi,24 - movzx edi,BYTE [edi*1+ebp-128] - shl edi,24 - xor esi,edi - mov edi,DWORD [20+esp] - and edx,255 - movzx edx,BYTE [edx*1+ebp-128] - movzx ecx,ch - movzx ecx,BYTE [ecx*1+ebp-128] - shl ecx,8 - xor edx,ecx - mov ecx,esi - shr ebx,16 - and ebx,255 - movzx ebx,BYTE [ebx*1+ebp-128] - shl ebx,16 - xor edx,ebx - mov ebx,DWORD [8+esp] - shr eax,24 - movzx eax,BYTE [eax*1+ebp-128] - shl eax,24 - xor edx,eax - mov eax,DWORD [4+esp] - xor eax,DWORD [16+edi] - xor ebx,DWORD [20+edi] - xor ecx,DWORD [24+edi] - xor edx,DWORD [28+edi] - ret -align 16 -__sse_AES_decrypt_compact: - pxor mm0,[edi] - pxor mm4,[8+edi] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [24+esp],esi - mov eax,454761243 - mov DWORD [8+esp],eax - mov DWORD [12+esp],eax - mov eax,DWORD [ebp-128] - mov ebx,DWORD [ebp-96] - mov ecx,DWORD [ebp-64] - mov edx,DWORD [ebp-32] - mov eax,DWORD [ebp] - mov ebx,DWORD [32+ebp] - mov ecx,DWORD [64+ebp] - mov edx,DWORD [96+ebp] -align 16 -L$007loop: - pshufw mm1,mm0,12 - pshufw mm5,mm4,9 - movd eax,mm1 - movd ebx,mm5 - mov DWORD [20+esp],edi - movzx esi,al - movzx edx,ah - pshufw mm2,mm0,6 - movzx ecx,BYTE [esi*1+ebp-128] - movzx edi,bl - movzx edx,BYTE [edx*1+ebp-128] - shr eax,16 - shl edx,8 - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shl esi,16 - pshufw mm6,mm4,3 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,ah - shl esi,24 - shr ebx,16 - or edx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shl esi,24 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,al - shl esi,8 - movd eax,mm2 - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bl - shl esi,16 - movd ebx,mm6 - movd mm0,ecx - movzx ecx,BYTE [edi*1+ebp-128] - movzx edi,al - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bl - or edx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,ah - shl esi,16 - shr eax,16 - or edx,esi - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,bh - shr ebx,16 - shl esi,8 - movd mm1,edx - movzx edx,BYTE [edi*1+ebp-128] - movzx edi,bh - shl edx,24 - and ebx,255 - or edx,esi - punpckldq mm0,mm1 - movzx esi,BYTE [edi*1+ebp-128] - movzx edi,al - shl esi,8 - movzx eax,ah - movzx ebx,BYTE [ebx*1+ebp-128] - or ecx,esi - movzx esi,BYTE [edi*1+ebp-128] - or edx,ebx - shl esi,16 - movzx eax,BYTE [eax*1+ebp-128] - or edx,esi - shl eax,24 - or ecx,eax - mov edi,DWORD [20+esp] - movd mm4,edx - movd mm5,ecx - punpckldq mm4,mm5 - add edi,16 - cmp edi,DWORD [24+esp] - ja NEAR L$008out - movq mm3,mm0 - movq mm7,mm4 - pshufw mm2,mm0,228 - pshufw mm6,mm4,228 - movq mm1,mm0 - movq mm5,mm4 - pshufw mm0,mm0,177 - pshufw mm4,mm4,177 - pslld mm2,8 - pslld mm6,8 - psrld mm3,8 - psrld mm7,8 - pxor mm0,mm2 - pxor mm4,mm6 - pxor mm0,mm3 - pxor mm4,mm7 - pslld mm2,16 - pslld mm6,16 - psrld mm3,16 - psrld mm7,16 - pxor mm0,mm2 - pxor mm4,mm6 - pxor mm0,mm3 - pxor mm4,mm7 - movq mm3,[8+esp] - pxor mm2,mm2 - pxor mm6,mm6 - pcmpgtb mm2,mm1 - pcmpgtb mm6,mm5 - pand mm2,mm3 - pand mm6,mm3 - paddb mm1,mm1 - paddb mm5,mm5 - pxor mm1,mm2 - pxor mm5,mm6 - movq mm3,mm1 - movq mm7,mm5 - movq mm2,mm1 - movq mm6,mm5 - pxor mm0,mm1 - pxor mm4,mm5 - pslld mm3,24 - pslld mm7,24 - psrld mm2,8 - psrld mm6,8 - pxor mm0,mm3 - pxor mm4,mm7 - pxor mm0,mm2 - pxor mm4,mm6 - movq mm2,[8+esp] - pxor mm3,mm3 - pxor mm7,mm7 - pcmpgtb mm3,mm1 - pcmpgtb mm7,mm5 - pand mm3,mm2 - pand mm7,mm2 - paddb mm1,mm1 - paddb mm5,mm5 - pxor mm1,mm3 - pxor mm5,mm7 - pshufw mm3,mm1,177 - pshufw mm7,mm5,177 - pxor mm0,mm1 - pxor mm4,mm5 - pxor mm0,mm3 - pxor mm4,mm7 - pxor mm3,mm3 - pxor mm7,mm7 - pcmpgtb mm3,mm1 - pcmpgtb mm7,mm5 - pand mm3,mm2 - pand mm7,mm2 - paddb mm1,mm1 - paddb mm5,mm5 - pxor mm1,mm3 - pxor mm5,mm7 - pxor mm0,mm1 - pxor mm4,mm5 - movq mm3,mm1 - movq mm7,mm5 - pshufw mm2,mm1,177 - pshufw mm6,mm5,177 - pxor mm0,mm2 - pxor mm4,mm6 - pslld mm1,8 - pslld mm5,8 - psrld mm3,8 - psrld mm7,8 - movq mm2,[edi] - movq mm6,[8+edi] - pxor mm0,mm1 - pxor mm4,mm5 - pxor mm0,mm3 - pxor mm4,mm7 - mov eax,DWORD [ebp-128] - pslld mm1,16 - pslld mm5,16 - mov ebx,DWORD [ebp-64] - psrld mm3,16 - psrld mm7,16 - mov ecx,DWORD [ebp] - pxor mm0,mm1 - pxor mm4,mm5 - mov edx,DWORD [64+ebp] - pxor mm0,mm3 - pxor mm4,mm7 - pxor mm0,mm2 - pxor mm4,mm6 - jmp NEAR L$007loop -align 16 -L$008out: - pxor mm0,[edi] - pxor mm4,[8+edi] - ret -align 16 -__x86_AES_decrypt: - mov DWORD [20+esp],edi - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [24+esp],esi -align 16 -L$009loop: - mov esi,eax - and esi,255 - mov esi,DWORD [esi*8+ebp] - movzx edi,dh - xor esi,DWORD [3+edi*8+ebp] - mov edi,ecx - shr edi,16 - and edi,255 - xor esi,DWORD [2+edi*8+ebp] - mov edi,ebx - shr edi,24 - xor esi,DWORD [1+edi*8+ebp] - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - mov esi,DWORD [esi*8+ebp] - movzx edi,ah - xor esi,DWORD [3+edi*8+ebp] - mov edi,edx - shr edi,16 - and edi,255 - xor esi,DWORD [2+edi*8+ebp] - mov edi,ecx - shr edi,24 - xor esi,DWORD [1+edi*8+ebp] - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - mov esi,DWORD [esi*8+ebp] - movzx edi,bh - xor esi,DWORD [3+edi*8+ebp] - mov edi,eax - shr edi,16 - and edi,255 - xor esi,DWORD [2+edi*8+ebp] - mov edi,edx - shr edi,24 - xor esi,DWORD [1+edi*8+ebp] - mov edi,DWORD [20+esp] - and edx,255 - mov edx,DWORD [edx*8+ebp] - movzx ecx,ch - xor edx,DWORD [3+ecx*8+ebp] - mov ecx,esi - shr ebx,16 - and ebx,255 - xor edx,DWORD [2+ebx*8+ebp] - mov ebx,DWORD [8+esp] - shr eax,24 - xor edx,DWORD [1+eax*8+ebp] - mov eax,DWORD [4+esp] - add edi,16 - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - cmp edi,DWORD [24+esp] - mov DWORD [20+esp],edi - jb NEAR L$009loop - lea ebp,[2176+ebp] - mov edi,DWORD [ebp-128] - mov esi,DWORD [ebp-96] - mov edi,DWORD [ebp-64] - mov esi,DWORD [ebp-32] - mov edi,DWORD [ebp] - mov esi,DWORD [32+ebp] - mov edi,DWORD [64+ebp] - mov esi,DWORD [96+ebp] - lea ebp,[ebp-128] - mov esi,eax - and esi,255 - movzx esi,BYTE [esi*1+ebp] - movzx edi,dh - movzx edi,BYTE [edi*1+ebp] - shl edi,8 - xor esi,edi - mov edi,ecx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp] - shl edi,16 - xor esi,edi - mov edi,ebx - shr edi,24 - movzx edi,BYTE [edi*1+ebp] - shl edi,24 - xor esi,edi - mov DWORD [4+esp],esi - mov esi,ebx - and esi,255 - movzx esi,BYTE [esi*1+ebp] - movzx edi,ah - movzx edi,BYTE [edi*1+ebp] - shl edi,8 - xor esi,edi - mov edi,edx - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp] - shl edi,16 - xor esi,edi - mov edi,ecx - shr edi,24 - movzx edi,BYTE [edi*1+ebp] - shl edi,24 - xor esi,edi - mov DWORD [8+esp],esi - mov esi,ecx - and esi,255 - movzx esi,BYTE [esi*1+ebp] - movzx edi,bh - movzx edi,BYTE [edi*1+ebp] - shl edi,8 - xor esi,edi - mov edi,eax - shr edi,16 - and edi,255 - movzx edi,BYTE [edi*1+ebp] - shl edi,16 - xor esi,edi - mov edi,edx - shr edi,24 - movzx edi,BYTE [edi*1+ebp] - shl edi,24 - xor esi,edi - mov edi,DWORD [20+esp] - and edx,255 - movzx edx,BYTE [edx*1+ebp] - movzx ecx,ch - movzx ecx,BYTE [ecx*1+ebp] - shl ecx,8 - xor edx,ecx - mov ecx,esi - shr ebx,16 - and ebx,255 - movzx ebx,BYTE [ebx*1+ebp] - shl ebx,16 - xor edx,ebx - mov ebx,DWORD [8+esp] - shr eax,24 - movzx eax,BYTE [eax*1+ebp] - shl eax,24 - xor edx,eax - mov eax,DWORD [4+esp] - lea ebp,[ebp-2048] - add edi,16 - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - ret -align 64 -L$AES_Td: -dd 1353184337,1353184337 -dd 1399144830,1399144830 -dd 3282310938,3282310938 -dd 2522752826,2522752826 -dd 3412831035,3412831035 -dd 4047871263,4047871263 -dd 2874735276,2874735276 -dd 2466505547,2466505547 -dd 1442459680,1442459680 -dd 4134368941,4134368941 -dd 2440481928,2440481928 -dd 625738485,625738485 -dd 4242007375,4242007375 -dd 3620416197,3620416197 -dd 2151953702,2151953702 -dd 2409849525,2409849525 -dd 1230680542,1230680542 -dd 1729870373,1729870373 -dd 2551114309,2551114309 -dd 3787521629,3787521629 -dd 41234371,41234371 -dd 317738113,317738113 -dd 2744600205,2744600205 -dd 3338261355,3338261355 -dd 3881799427,3881799427 -dd 2510066197,2510066197 -dd 3950669247,3950669247 -dd 3663286933,3663286933 -dd 763608788,763608788 -dd 3542185048,3542185048 -dd 694804553,694804553 -dd 1154009486,1154009486 -dd 1787413109,1787413109 -dd 2021232372,2021232372 -dd 1799248025,1799248025 -dd 3715217703,3715217703 -dd 3058688446,3058688446 -dd 397248752,397248752 -dd 1722556617,1722556617 -dd 3023752829,3023752829 -dd 407560035,407560035 -dd 2184256229,2184256229 -dd 1613975959,1613975959 -dd 1165972322,1165972322 -dd 3765920945,3765920945 -dd 2226023355,2226023355 -dd 480281086,480281086 -dd 2485848313,2485848313 -dd 1483229296,1483229296 -dd 436028815,436028815 -dd 2272059028,2272059028 -dd 3086515026,3086515026 -dd 601060267,601060267 -dd 3791801202,3791801202 -dd 1468997603,1468997603 -dd 715871590,715871590 -dd 120122290,120122290 -dd 63092015,63092015 -dd 2591802758,2591802758 -dd 2768779219,2768779219 -dd 4068943920,4068943920 -dd 2997206819,2997206819 -dd 3127509762,3127509762 -dd 1552029421,1552029421 -dd 723308426,723308426 -dd 2461301159,2461301159 -dd 4042393587,4042393587 -dd 2715969870,2715969870 -dd 3455375973,3455375973 -dd 3586000134,3586000134 -dd 526529745,526529745 -dd 2331944644,2331944644 -dd 2639474228,2639474228 -dd 2689987490,2689987490 -dd 853641733,853641733 -dd 1978398372,1978398372 -dd 971801355,971801355 -dd 2867814464,2867814464 -dd 111112542,111112542 -dd 1360031421,1360031421 -dd 4186579262,4186579262 -dd 1023860118,1023860118 -dd 2919579357,2919579357 -dd 1186850381,1186850381 -dd 3045938321,3045938321 -dd 90031217,90031217 -dd 1876166148,1876166148 -dd 4279586912,4279586912 -dd 620468249,620468249 -dd 2548678102,2548678102 -dd 3426959497,3426959497 -dd 2006899047,2006899047 -dd 3175278768,3175278768 -dd 2290845959,2290845959 -dd 945494503,945494503 -dd 3689859193,3689859193 -dd 1191869601,1191869601 -dd 3910091388,3910091388 -dd 3374220536,3374220536 -dd 0,0 -dd 2206629897,2206629897 -dd 1223502642,1223502642 -dd 2893025566,2893025566 -dd 1316117100,1316117100 -dd 4227796733,4227796733 -dd 1446544655,1446544655 -dd 517320253,517320253 -dd 658058550,658058550 -dd 1691946762,1691946762 -dd 564550760,564550760 -dd 3511966619,3511966619 -dd 976107044,976107044 -dd 2976320012,2976320012 -dd 266819475,266819475 -dd 3533106868,3533106868 -dd 2660342555,2660342555 -dd 1338359936,1338359936 -dd 2720062561,2720062561 -dd 1766553434,1766553434 -dd 370807324,370807324 -dd 179999714,179999714 -dd 3844776128,3844776128 -dd 1138762300,1138762300 -dd 488053522,488053522 -dd 185403662,185403662 -dd 2915535858,2915535858 -dd 3114841645,3114841645 -dd 3366526484,3366526484 -dd 2233069911,2233069911 -dd 1275557295,1275557295 -dd 3151862254,3151862254 -dd 4250959779,4250959779 -dd 2670068215,2670068215 -dd 3170202204,3170202204 -dd 3309004356,3309004356 -dd 880737115,880737115 -dd 1982415755,1982415755 -dd 3703972811,3703972811 -dd 1761406390,1761406390 -dd 1676797112,1676797112 -dd 3403428311,3403428311 -dd 277177154,277177154 -dd 1076008723,1076008723 -dd 538035844,538035844 -dd 2099530373,2099530373 -dd 4164795346,4164795346 -dd 288553390,288553390 -dd 1839278535,1839278535 -dd 1261411869,1261411869 -dd 4080055004,4080055004 -dd 3964831245,3964831245 -dd 3504587127,3504587127 -dd 1813426987,1813426987 -dd 2579067049,2579067049 -dd 4199060497,4199060497 -dd 577038663,577038663 -dd 3297574056,3297574056 -dd 440397984,440397984 -dd 3626794326,3626794326 -dd 4019204898,4019204898 -dd 3343796615,3343796615 -dd 3251714265,3251714265 -dd 4272081548,4272081548 -dd 906744984,906744984 -dd 3481400742,3481400742 -dd 685669029,685669029 -dd 646887386,646887386 -dd 2764025151,2764025151 -dd 3835509292,3835509292 -dd 227702864,227702864 -dd 2613862250,2613862250 -dd 1648787028,1648787028 -dd 3256061430,3256061430 -dd 3904428176,3904428176 -dd 1593260334,1593260334 -dd 4121936770,4121936770 -dd 3196083615,3196083615 -dd 2090061929,2090061929 -dd 2838353263,2838353263 -dd 3004310991,3004310991 -dd 999926984,999926984 -dd 2809993232,2809993232 -dd 1852021992,1852021992 -dd 2075868123,2075868123 -dd 158869197,158869197 -dd 4095236462,4095236462 -dd 28809964,28809964 -dd 2828685187,2828685187 -dd 1701746150,1701746150 -dd 2129067946,2129067946 -dd 147831841,147831841 -dd 3873969647,3873969647 -dd 3650873274,3650873274 -dd 3459673930,3459673930 -dd 3557400554,3557400554 -dd 3598495785,3598495785 -dd 2947720241,2947720241 -dd 824393514,824393514 -dd 815048134,815048134 -dd 3227951669,3227951669 -dd 935087732,935087732 -dd 2798289660,2798289660 -dd 2966458592,2966458592 -dd 366520115,366520115 -dd 1251476721,1251476721 -dd 4158319681,4158319681 -dd 240176511,240176511 -dd 804688151,804688151 -dd 2379631990,2379631990 -dd 1303441219,1303441219 -dd 1414376140,1414376140 -dd 3741619940,3741619940 -dd 3820343710,3820343710 -dd 461924940,461924940 -dd 3089050817,3089050817 -dd 2136040774,2136040774 -dd 82468509,82468509 -dd 1563790337,1563790337 -dd 1937016826,1937016826 -dd 776014843,776014843 -dd 1511876531,1511876531 -dd 1389550482,1389550482 -dd 861278441,861278441 -dd 323475053,323475053 -dd 2355222426,2355222426 -dd 2047648055,2047648055 -dd 2383738969,2383738969 -dd 2302415851,2302415851 -dd 3995576782,3995576782 -dd 902390199,902390199 -dd 3991215329,3991215329 -dd 1018251130,1018251130 -dd 1507840668,1507840668 -dd 1064563285,1064563285 -dd 2043548696,2043548696 -dd 3208103795,3208103795 -dd 3939366739,3939366739 -dd 1537932639,1537932639 -dd 342834655,342834655 -dd 2262516856,2262516856 -dd 2180231114,2180231114 -dd 1053059257,1053059257 -dd 741614648,741614648 -dd 1598071746,1598071746 -dd 1925389590,1925389590 -dd 203809468,203809468 -dd 2336832552,2336832552 -dd 1100287487,1100287487 -dd 1895934009,1895934009 -dd 3736275976,3736275976 -dd 2632234200,2632234200 -dd 2428589668,2428589668 -dd 1636092795,1636092795 -dd 1890988757,1890988757 -dd 1952214088,1952214088 -dd 1113045200,1113045200 -db 82,9,106,213,48,54,165,56 -db 191,64,163,158,129,243,215,251 -db 124,227,57,130,155,47,255,135 -db 52,142,67,68,196,222,233,203 -db 84,123,148,50,166,194,35,61 -db 238,76,149,11,66,250,195,78 -db 8,46,161,102,40,217,36,178 -db 118,91,162,73,109,139,209,37 -db 114,248,246,100,134,104,152,22 -db 212,164,92,204,93,101,182,146 -db 108,112,72,80,253,237,185,218 -db 94,21,70,87,167,141,157,132 -db 144,216,171,0,140,188,211,10 -db 247,228,88,5,184,179,69,6 -db 208,44,30,143,202,63,15,2 -db 193,175,189,3,1,19,138,107 -db 58,145,17,65,79,103,220,234 -db 151,242,207,206,240,180,230,115 -db 150,172,116,34,231,173,53,133 -db 226,249,55,232,28,117,223,110 -db 71,241,26,113,29,41,197,137 -db 111,183,98,14,170,24,190,27 -db 252,86,62,75,198,210,121,32 -db 154,219,192,254,120,205,90,244 -db 31,221,168,51,136,7,199,49 -db 177,18,16,89,39,128,236,95 -db 96,81,127,169,25,181,74,13 -db 45,229,122,159,147,201,156,239 -db 160,224,59,77,174,42,245,176 -db 200,235,187,60,131,83,153,97 -db 23,43,4,126,186,119,214,38 -db 225,105,20,99,85,33,12,125 -db 82,9,106,213,48,54,165,56 -db 191,64,163,158,129,243,215,251 -db 124,227,57,130,155,47,255,135 -db 52,142,67,68,196,222,233,203 -db 84,123,148,50,166,194,35,61 -db 238,76,149,11,66,250,195,78 -db 8,46,161,102,40,217,36,178 -db 118,91,162,73,109,139,209,37 -db 114,248,246,100,134,104,152,22 -db 212,164,92,204,93,101,182,146 -db 108,112,72,80,253,237,185,218 -db 94,21,70,87,167,141,157,132 -db 144,216,171,0,140,188,211,10 -db 247,228,88,5,184,179,69,6 -db 208,44,30,143,202,63,15,2 -db 193,175,189,3,1,19,138,107 -db 58,145,17,65,79,103,220,234 -db 151,242,207,206,240,180,230,115 -db 150,172,116,34,231,173,53,133 -db 226,249,55,232,28,117,223,110 -db 71,241,26,113,29,41,197,137 -db 111,183,98,14,170,24,190,27 -db 252,86,62,75,198,210,121,32 -db 154,219,192,254,120,205,90,244 -db 31,221,168,51,136,7,199,49 -db 177,18,16,89,39,128,236,95 -db 96,81,127,169,25,181,74,13 -db 45,229,122,159,147,201,156,239 -db 160,224,59,77,174,42,245,176 -db 200,235,187,60,131,83,153,97 -db 23,43,4,126,186,119,214,38 -db 225,105,20,99,85,33,12,125 -db 82,9,106,213,48,54,165,56 -db 191,64,163,158,129,243,215,251 -db 124,227,57,130,155,47,255,135 -db 52,142,67,68,196,222,233,203 -db 84,123,148,50,166,194,35,61 -db 238,76,149,11,66,250,195,78 -db 8,46,161,102,40,217,36,178 -db 118,91,162,73,109,139,209,37 -db 114,248,246,100,134,104,152,22 -db 212,164,92,204,93,101,182,146 -db 108,112,72,80,253,237,185,218 -db 94,21,70,87,167,141,157,132 -db 144,216,171,0,140,188,211,10 -db 247,228,88,5,184,179,69,6 -db 208,44,30,143,202,63,15,2 -db 193,175,189,3,1,19,138,107 -db 58,145,17,65,79,103,220,234 -db 151,242,207,206,240,180,230,115 -db 150,172,116,34,231,173,53,133 -db 226,249,55,232,28,117,223,110 -db 71,241,26,113,29,41,197,137 -db 111,183,98,14,170,24,190,27 -db 252,86,62,75,198,210,121,32 -db 154,219,192,254,120,205,90,244 -db 31,221,168,51,136,7,199,49 -db 177,18,16,89,39,128,236,95 -db 96,81,127,169,25,181,74,13 -db 45,229,122,159,147,201,156,239 -db 160,224,59,77,174,42,245,176 -db 200,235,187,60,131,83,153,97 -db 23,43,4,126,186,119,214,38 -db 225,105,20,99,85,33,12,125 -db 82,9,106,213,48,54,165,56 -db 191,64,163,158,129,243,215,251 -db 124,227,57,130,155,47,255,135 -db 52,142,67,68,196,222,233,203 -db 84,123,148,50,166,194,35,61 -db 238,76,149,11,66,250,195,78 -db 8,46,161,102,40,217,36,178 -db 118,91,162,73,109,139,209,37 -db 114,248,246,100,134,104,152,22 -db 212,164,92,204,93,101,182,146 -db 108,112,72,80,253,237,185,218 -db 94,21,70,87,167,141,157,132 -db 144,216,171,0,140,188,211,10 -db 247,228,88,5,184,179,69,6 -db 208,44,30,143,202,63,15,2 -db 193,175,189,3,1,19,138,107 -db 58,145,17,65,79,103,220,234 -db 151,242,207,206,240,180,230,115 -db 150,172,116,34,231,173,53,133 -db 226,249,55,232,28,117,223,110 -db 71,241,26,113,29,41,197,137 -db 111,183,98,14,170,24,190,27 -db 252,86,62,75,198,210,121,32 -db 154,219,192,254,120,205,90,244 -db 31,221,168,51,136,7,199,49 -db 177,18,16,89,39,128,236,95 -db 96,81,127,169,25,181,74,13 -db 45,229,122,159,147,201,156,239 -db 160,224,59,77,174,42,245,176 -db 200,235,187,60,131,83,153,97 -db 23,43,4,126,186,119,214,38 -db 225,105,20,99,85,33,12,125 -global _aes_nohw_decrypt -align 16 -_aes_nohw_decrypt: -L$_aes_nohw_decrypt_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [28+esp] - mov eax,esp - sub esp,36 - and esp,-64 - lea ebx,[edi-127] - sub ebx,esp - neg ebx - and ebx,960 - sub esp,ebx - add esp,4 - mov DWORD [28+esp],eax - call L$010pic_point -L$010pic_point: - pop ebp - lea eax,[_OPENSSL_ia32cap_P] - lea ebp,[(L$AES_Td-L$010pic_point)+ebp] - lea ebx,[764+esp] - sub ebx,ebp - and ebx,768 - lea ebp,[2176+ebx*1+ebp] - bt DWORD [eax],25 - jnc NEAR L$011x86 - movq mm0,[esi] - movq mm4,[8+esi] - call __sse_AES_decrypt_compact - mov esp,DWORD [28+esp] - mov esi,DWORD [24+esp] - movq [esi],mm0 - movq [8+esi],mm4 - emms - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -L$011x86: - mov DWORD [24+esp],ebp - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - call __x86_AES_decrypt_compact - mov esp,DWORD [28+esp] - mov esi,DWORD [24+esp] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_nohw_cbc_encrypt -align 16 -_aes_nohw_cbc_encrypt: -L$_aes_nohw_cbc_encrypt_begin: - push ebp - push ebx - push esi - push edi - mov ecx,DWORD [28+esp] - cmp ecx,0 - je NEAR L$012drop_out - call L$013pic_point -L$013pic_point: - pop ebp - lea eax,[_OPENSSL_ia32cap_P] - cmp DWORD [40+esp],0 - lea ebp,[(L$AES_Te-L$013pic_point)+ebp] - jne NEAR L$014picked_te - lea ebp,[(L$AES_Td-L$AES_Te)+ebp] -L$014picked_te: - pushfd - cld - cmp ecx,512 - jb NEAR L$015slow_way - test ecx,15 - jnz NEAR L$015slow_way - bt DWORD [eax],28 - jc NEAR L$015slow_way - lea esi,[esp-324] - and esi,-64 - mov eax,ebp - lea ebx,[2304+ebp] - mov edx,esi - and eax,4095 - and ebx,4095 - and edx,4095 - cmp edx,ebx - jb NEAR L$016tbl_break_out - sub edx,ebx - sub esi,edx - jmp NEAR L$017tbl_ok -align 4 -L$016tbl_break_out: - sub edx,eax - and edx,4095 - add edx,384 - sub esi,edx -align 4 -L$017tbl_ok: - lea edx,[24+esp] - xchg esp,esi - add esp,4 - mov DWORD [24+esp],ebp - mov DWORD [28+esp],esi - mov eax,DWORD [edx] - mov ebx,DWORD [4+edx] - mov edi,DWORD [12+edx] - mov esi,DWORD [16+edx] - mov edx,DWORD [20+edx] - mov DWORD [32+esp],eax - mov DWORD [36+esp],ebx - mov DWORD [40+esp],ecx - mov DWORD [44+esp],edi - mov DWORD [48+esp],esi - mov DWORD [316+esp],0 - mov ebx,edi - mov ecx,61 - sub ebx,ebp - mov esi,edi - and ebx,4095 - lea edi,[76+esp] - cmp ebx,2304 - jb NEAR L$018do_copy - cmp ebx,3852 - jb NEAR L$019skip_copy -align 4 -L$018do_copy: - mov DWORD [44+esp],edi -dd 2784229001 -L$019skip_copy: - mov edi,16 -align 4 -L$020prefetch_tbl: - mov eax,DWORD [ebp] - mov ebx,DWORD [32+ebp] - mov ecx,DWORD [64+ebp] - mov esi,DWORD [96+ebp] - lea ebp,[128+ebp] - sub edi,1 - jnz NEAR L$020prefetch_tbl - sub ebp,2048 - mov esi,DWORD [32+esp] - mov edi,DWORD [48+esp] - cmp edx,0 - je NEAR L$021fast_decrypt - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] -align 16 -L$022fast_enc_loop: - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - xor eax,DWORD [esi] - xor ebx,DWORD [4+esi] - xor ecx,DWORD [8+esi] - xor edx,DWORD [12+esi] - mov edi,DWORD [44+esp] - call __x86_AES_encrypt - mov esi,DWORD [32+esp] - mov edi,DWORD [36+esp] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - lea esi,[16+esi] - mov ecx,DWORD [40+esp] - mov DWORD [32+esp],esi - lea edx,[16+edi] - mov DWORD [36+esp],edx - sub ecx,16 - mov DWORD [40+esp],ecx - jnz NEAR L$022fast_enc_loop - mov esi,DWORD [48+esp] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - cmp DWORD [316+esp],0 - mov edi,DWORD [44+esp] - je NEAR L$023skip_ezero - mov ecx,60 - xor eax,eax -align 4 -dd 2884892297 -L$023skip_ezero: - mov esp,DWORD [28+esp] - popfd -L$012drop_out: - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$021fast_decrypt: - cmp esi,DWORD [36+esp] - je NEAR L$024fast_dec_in_place - mov DWORD [52+esp],edi -align 4 -align 16 -L$025fast_dec_loop: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov edi,DWORD [44+esp] - call __x86_AES_decrypt - mov edi,DWORD [52+esp] - mov esi,DWORD [40+esp] - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - mov edi,DWORD [36+esp] - mov esi,DWORD [32+esp] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov ecx,DWORD [40+esp] - mov DWORD [52+esp],esi - lea esi,[16+esi] - mov DWORD [32+esp],esi - lea edi,[16+edi] - mov DWORD [36+esp],edi - sub ecx,16 - mov DWORD [40+esp],ecx - jnz NEAR L$025fast_dec_loop - mov edi,DWORD [52+esp] - mov esi,DWORD [48+esp] - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - jmp NEAR L$026fast_dec_out -align 16 -L$024fast_dec_in_place: -L$027fast_dec_in_place_loop: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - lea edi,[60+esp] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov edi,DWORD [44+esp] - call __x86_AES_decrypt - mov edi,DWORD [48+esp] - mov esi,DWORD [36+esp] - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - lea esi,[16+esi] - mov DWORD [36+esp],esi - lea esi,[60+esp] - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov esi,DWORD [32+esp] - mov ecx,DWORD [40+esp] - lea esi,[16+esi] - mov DWORD [32+esp],esi - sub ecx,16 - mov DWORD [40+esp],ecx - jnz NEAR L$027fast_dec_in_place_loop -align 4 -L$026fast_dec_out: - cmp DWORD [316+esp],0 - mov edi,DWORD [44+esp] - je NEAR L$028skip_dzero - mov ecx,60 - xor eax,eax -align 4 -dd 2884892297 -L$028skip_dzero: - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$015slow_way: - mov eax,DWORD [eax] - mov edi,DWORD [36+esp] - lea esi,[esp-80] - and esi,-64 - lea ebx,[edi-143] - sub ebx,esi - neg ebx - and ebx,960 - sub esi,ebx - lea ebx,[768+esi] - sub ebx,ebp - and ebx,768 - lea ebp,[2176+ebx*1+ebp] - lea edx,[24+esp] - xchg esp,esi - add esp,4 - mov DWORD [24+esp],ebp - mov DWORD [28+esp],esi - mov DWORD [52+esp],eax - mov eax,DWORD [edx] - mov ebx,DWORD [4+edx] - mov esi,DWORD [16+edx] - mov edx,DWORD [20+edx] - mov DWORD [32+esp],eax - mov DWORD [36+esp],ebx - mov DWORD [40+esp],ecx - mov DWORD [44+esp],edi - mov DWORD [48+esp],esi - mov edi,esi - mov esi,eax - cmp edx,0 - je NEAR L$029slow_decrypt - cmp ecx,16 - mov edx,ebx - jb NEAR L$030slow_enc_tail - bt DWORD [52+esp],25 - jnc NEAR L$031slow_enc_x86 - movq mm0,[edi] - movq mm4,[8+edi] -align 16 -L$032slow_enc_loop_sse: - pxor mm0,[esi] - pxor mm4,[8+esi] - mov edi,DWORD [44+esp] - call __sse_AES_encrypt_compact - mov esi,DWORD [32+esp] - mov edi,DWORD [36+esp] - mov ecx,DWORD [40+esp] - movq [edi],mm0 - movq [8+edi],mm4 - lea esi,[16+esi] - mov DWORD [32+esp],esi - lea edx,[16+edi] - mov DWORD [36+esp],edx - sub ecx,16 - cmp ecx,16 - mov DWORD [40+esp],ecx - jae NEAR L$032slow_enc_loop_sse - test ecx,15 - jnz NEAR L$030slow_enc_tail - mov esi,DWORD [48+esp] - movq [esi],mm0 - movq [8+esi],mm4 - emms - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$031slow_enc_x86: - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] -align 4 -L$033slow_enc_loop_x86: - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - xor eax,DWORD [esi] - xor ebx,DWORD [4+esi] - xor ecx,DWORD [8+esi] - xor edx,DWORD [12+esi] - mov edi,DWORD [44+esp] - call __x86_AES_encrypt_compact - mov esi,DWORD [32+esp] - mov edi,DWORD [36+esp] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov ecx,DWORD [40+esp] - lea esi,[16+esi] - mov DWORD [32+esp],esi - lea edx,[16+edi] - mov DWORD [36+esp],edx - sub ecx,16 - cmp ecx,16 - mov DWORD [40+esp],ecx - jae NEAR L$033slow_enc_loop_x86 - test ecx,15 - jnz NEAR L$030slow_enc_tail - mov esi,DWORD [48+esp] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$030slow_enc_tail: - emms - mov edi,edx - mov ebx,16 - sub ebx,ecx - cmp edi,esi - je NEAR L$034enc_in_place -align 4 -dd 2767451785 - jmp NEAR L$035enc_skip_in_place -L$034enc_in_place: - lea edi,[ecx*1+edi] -L$035enc_skip_in_place: - mov ecx,ebx - xor eax,eax -align 4 -dd 2868115081 - mov edi,DWORD [48+esp] - mov esi,edx - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov DWORD [40+esp],16 - jmp NEAR L$033slow_enc_loop_x86 -align 16 -L$029slow_decrypt: - bt DWORD [52+esp],25 - jnc NEAR L$036slow_dec_loop_x86 -align 4 -L$037slow_dec_loop_sse: - movq mm0,[esi] - movq mm4,[8+esi] - mov edi,DWORD [44+esp] - call __sse_AES_decrypt_compact - mov esi,DWORD [32+esp] - lea eax,[60+esp] - mov ebx,DWORD [36+esp] - mov ecx,DWORD [40+esp] - mov edi,DWORD [48+esp] - movq mm1,[esi] - movq mm5,[8+esi] - pxor mm0,[edi] - pxor mm4,[8+edi] - movq [edi],mm1 - movq [8+edi],mm5 - sub ecx,16 - jc NEAR L$038slow_dec_partial_sse - movq [ebx],mm0 - movq [8+ebx],mm4 - lea ebx,[16+ebx] - mov DWORD [36+esp],ebx - lea esi,[16+esi] - mov DWORD [32+esp],esi - mov DWORD [40+esp],ecx - jnz NEAR L$037slow_dec_loop_sse - emms - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$038slow_dec_partial_sse: - movq [eax],mm0 - movq [8+eax],mm4 - emms - add ecx,16 - mov edi,ebx - mov esi,eax -align 4 -dd 2767451785 - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$036slow_dec_loop_x86: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - lea edi,[60+esp] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov edi,DWORD [44+esp] - call __x86_AES_decrypt_compact - mov edi,DWORD [48+esp] - mov esi,DWORD [40+esp] - xor eax,DWORD [edi] - xor ebx,DWORD [4+edi] - xor ecx,DWORD [8+edi] - xor edx,DWORD [12+edi] - sub esi,16 - jc NEAR L$039slow_dec_partial_x86 - mov DWORD [40+esp],esi - mov esi,DWORD [36+esp] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - lea esi,[16+esi] - mov DWORD [36+esp],esi - lea esi,[60+esp] - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov esi,DWORD [32+esp] - lea esi,[16+esi] - mov DWORD [32+esp],esi - jnz NEAR L$036slow_dec_loop_x86 - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret - pushfd -align 16 -L$039slow_dec_partial_x86: - lea esi,[60+esp] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - mov esi,DWORD [32+esp] - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov ecx,DWORD [40+esp] - mov edi,DWORD [36+esp] - lea esi,[60+esp] -align 4 -dd 2767451785 - mov esp,DWORD [28+esp] - popfd - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -__x86_AES_set_encrypt_key: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [24+esp] - mov edi,DWORD [32+esp] - test esi,-1 - jz NEAR L$040badpointer - test edi,-1 - jz NEAR L$040badpointer - call L$041pic_point -L$041pic_point: - pop ebp - lea ebp,[(L$AES_Te-L$041pic_point)+ebp] - lea ebp,[2176+ebp] - mov eax,DWORD [ebp-128] - mov ebx,DWORD [ebp-96] - mov ecx,DWORD [ebp-64] - mov edx,DWORD [ebp-32] - mov eax,DWORD [ebp] - mov ebx,DWORD [32+ebp] - mov ecx,DWORD [64+ebp] - mov edx,DWORD [96+ebp] - mov ecx,DWORD [28+esp] - cmp ecx,128 - je NEAR L$04210rounds - cmp ecx,192 - je NEAR L$04312rounds - cmp ecx,256 - je NEAR L$04414rounds - mov eax,-2 - jmp NEAR L$045exit -L$04210rounds: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - xor ecx,ecx - jmp NEAR L$04610shortcut -align 4 -L$04710loop: - mov eax,DWORD [edi] - mov edx,DWORD [12+edi] -L$04610shortcut: - movzx esi,dl - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,24 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shr edx,16 - movzx esi,dl - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,8 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shl ebx,16 - xor eax,ebx - xor eax,DWORD [896+ecx*4+ebp] - mov DWORD [16+edi],eax - xor eax,DWORD [4+edi] - mov DWORD [20+edi],eax - xor eax,DWORD [8+edi] - mov DWORD [24+edi],eax - xor eax,DWORD [12+edi] - mov DWORD [28+edi],eax - inc ecx - add edi,16 - cmp ecx,10 - jl NEAR L$04710loop - mov DWORD [80+edi],10 - xor eax,eax - jmp NEAR L$045exit -L$04312rounds: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov ecx,DWORD [16+esi] - mov edx,DWORD [20+esi] - mov DWORD [16+edi],ecx - mov DWORD [20+edi],edx - xor ecx,ecx - jmp NEAR L$04812shortcut -align 4 -L$04912loop: - mov eax,DWORD [edi] - mov edx,DWORD [20+edi] -L$04812shortcut: - movzx esi,dl - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,24 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shr edx,16 - movzx esi,dl - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,8 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shl ebx,16 - xor eax,ebx - xor eax,DWORD [896+ecx*4+ebp] - mov DWORD [24+edi],eax - xor eax,DWORD [4+edi] - mov DWORD [28+edi],eax - xor eax,DWORD [8+edi] - mov DWORD [32+edi],eax - xor eax,DWORD [12+edi] - mov DWORD [36+edi],eax - cmp ecx,7 - je NEAR L$05012break - inc ecx - xor eax,DWORD [16+edi] - mov DWORD [40+edi],eax - xor eax,DWORD [20+edi] - mov DWORD [44+edi],eax - add edi,24 - jmp NEAR L$04912loop -L$05012break: - mov DWORD [72+edi],12 - xor eax,eax - jmp NEAR L$045exit -L$04414rounds: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [8+edi],ecx - mov DWORD [12+edi],edx - mov eax,DWORD [16+esi] - mov ebx,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov edx,DWORD [28+esi] - mov DWORD [16+edi],eax - mov DWORD [20+edi],ebx - mov DWORD [24+edi],ecx - mov DWORD [28+edi],edx - xor ecx,ecx - jmp NEAR L$05114shortcut -align 4 -L$05214loop: - mov edx,DWORD [28+edi] -L$05114shortcut: - mov eax,DWORD [edi] - movzx esi,dl - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,24 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shr edx,16 - movzx esi,dl - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,8 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shl ebx,16 - xor eax,ebx - xor eax,DWORD [896+ecx*4+ebp] - mov DWORD [32+edi],eax - xor eax,DWORD [4+edi] - mov DWORD [36+edi],eax - xor eax,DWORD [8+edi] - mov DWORD [40+edi],eax - xor eax,DWORD [12+edi] - mov DWORD [44+edi],eax - cmp ecx,6 - je NEAR L$05314break - inc ecx - mov edx,eax - mov eax,DWORD [16+edi] - movzx esi,dl - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shr edx,16 - shl ebx,8 - movzx esi,dl - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - movzx esi,dh - shl ebx,16 - xor eax,ebx - movzx ebx,BYTE [esi*1+ebp-128] - shl ebx,24 - xor eax,ebx - mov DWORD [48+edi],eax - xor eax,DWORD [20+edi] - mov DWORD [52+edi],eax - xor eax,DWORD [24+edi] - mov DWORD [56+edi],eax - xor eax,DWORD [28+edi] - mov DWORD [60+edi],eax - add edi,32 - jmp NEAR L$05214loop -L$05314break: - mov DWORD [48+edi],14 - xor eax,eax - jmp NEAR L$045exit -L$040badpointer: - mov eax,-1 -L$045exit: - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_nohw_set_encrypt_key -align 16 -_aes_nohw_set_encrypt_key: -L$_aes_nohw_set_encrypt_key_begin: - call __x86_AES_set_encrypt_key - ret -global _aes_nohw_set_decrypt_key -align 16 -_aes_nohw_set_decrypt_key: -L$_aes_nohw_set_decrypt_key_begin: - call __x86_AES_set_encrypt_key - cmp eax,0 - je NEAR L$054proceed - ret -L$054proceed: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [28+esp] - mov ecx,DWORD [240+esi] - lea ecx,[ecx*4] - lea edi,[ecx*4+esi] -align 4 -L$055invert: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [edi] - mov edx,DWORD [4+edi] - mov DWORD [edi],eax - mov DWORD [4+edi],ebx - mov DWORD [esi],ecx - mov DWORD [4+esi],edx - mov eax,DWORD [8+esi] - mov ebx,DWORD [12+esi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - mov DWORD [8+edi],eax - mov DWORD [12+edi],ebx - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - add esi,16 - sub edi,16 - cmp esi,edi - jne NEAR L$055invert - mov edi,DWORD [28+esp] - mov esi,DWORD [240+edi] - lea esi,[esi*1+esi-2] - lea esi,[esi*8+edi] - mov DWORD [28+esp],esi - mov eax,DWORD [16+edi] -align 4 -L$056permute: - add edi,16 - mov ebp,2155905152 - and ebp,eax - lea ebx,[eax*1+eax] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and ebx,4278124286 - and esi,454761243 - xor ebx,esi - mov ebp,2155905152 - and ebp,ebx - lea ecx,[ebx*1+ebx] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and ecx,4278124286 - and esi,454761243 - xor ebx,eax - xor ecx,esi - mov ebp,2155905152 - and ebp,ecx - lea edx,[ecx*1+ecx] - mov esi,ebp - shr ebp,7 - xor ecx,eax - sub esi,ebp - and edx,4278124286 - and esi,454761243 - rol eax,8 - xor edx,esi - mov ebp,DWORD [4+edi] - xor eax,ebx - xor ebx,edx - xor eax,ecx - rol ebx,24 - xor ecx,edx - xor eax,edx - rol ecx,16 - xor eax,ebx - rol edx,8 - xor eax,ecx - mov ebx,ebp - xor eax,edx - mov DWORD [edi],eax - mov ebp,2155905152 - and ebp,ebx - lea ecx,[ebx*1+ebx] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and ecx,4278124286 - and esi,454761243 - xor ecx,esi - mov ebp,2155905152 - and ebp,ecx - lea edx,[ecx*1+ecx] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and edx,4278124286 - and esi,454761243 - xor ecx,ebx - xor edx,esi - mov ebp,2155905152 - and ebp,edx - lea eax,[edx*1+edx] - mov esi,ebp - shr ebp,7 - xor edx,ebx - sub esi,ebp - and eax,4278124286 - and esi,454761243 - rol ebx,8 - xor eax,esi - mov ebp,DWORD [8+edi] - xor ebx,ecx - xor ecx,eax - xor ebx,edx - rol ecx,24 - xor edx,eax - xor ebx,eax - rol edx,16 - xor ebx,ecx - rol eax,8 - xor ebx,edx - mov ecx,ebp - xor ebx,eax - mov DWORD [4+edi],ebx - mov ebp,2155905152 - and ebp,ecx - lea edx,[ecx*1+ecx] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and edx,4278124286 - and esi,454761243 - xor edx,esi - mov ebp,2155905152 - and ebp,edx - lea eax,[edx*1+edx] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and eax,4278124286 - and esi,454761243 - xor edx,ecx - xor eax,esi - mov ebp,2155905152 - and ebp,eax - lea ebx,[eax*1+eax] - mov esi,ebp - shr ebp,7 - xor eax,ecx - sub esi,ebp - and ebx,4278124286 - and esi,454761243 - rol ecx,8 - xor ebx,esi - mov ebp,DWORD [12+edi] - xor ecx,edx - xor edx,ebx - xor ecx,eax - rol edx,24 - xor eax,ebx - xor ecx,ebx - rol eax,16 - xor ecx,edx - rol ebx,8 - xor ecx,eax - mov edx,ebp - xor ecx,ebx - mov DWORD [8+edi],ecx - mov ebp,2155905152 - and ebp,edx - lea eax,[edx*1+edx] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and eax,4278124286 - and esi,454761243 - xor eax,esi - mov ebp,2155905152 - and ebp,eax - lea ebx,[eax*1+eax] - mov esi,ebp - shr ebp,7 - sub esi,ebp - and ebx,4278124286 - and esi,454761243 - xor eax,edx - xor ebx,esi - mov ebp,2155905152 - and ebp,ebx - lea ecx,[ebx*1+ebx] - mov esi,ebp - shr ebp,7 - xor ebx,edx - sub esi,ebp - and ecx,4278124286 - and esi,454761243 - rol edx,8 - xor ecx,esi - mov ebp,DWORD [16+edi] - xor edx,eax - xor eax,ecx - xor edx,ebx - rol eax,24 - xor ebx,ecx - xor edx,ecx - rol ebx,16 - xor edx,eax - rol ecx,8 - xor edx,ebx - mov eax,ebp - xor edx,ecx - mov DWORD [12+edi],edx - cmp edi,DWORD [28+esp] - jb NEAR L$056permute - xor eax,eax - pop edi - pop esi - pop ebx - pop ebp - ret -db 65,69,83,32,102,111,114,32,120,56,54,44,32,67,82,89 -db 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114 -db 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/aesni-x86.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/aesni-x86.asm deleted file mode 100644 index 0272fce460..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/aesni-x86.asm +++ /dev/null @@ -1,2469 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -;extern _OPENSSL_ia32cap_P -%ifdef BORINGSSL_DISPATCH_TEST -extern _BORINGSSL_function_hit -%endif -global _aes_hw_encrypt -align 16 -_aes_hw_encrypt: -L$_aes_hw_encrypt_begin: -%ifdef BORINGSSL_DISPATCH_TEST - push ebx - push edx - call L$000pic -L$000pic: - pop ebx - lea ebx,[(_BORINGSSL_function_hit+1-L$000pic)+ebx] - mov edx,1 - mov BYTE [ebx],dl - pop edx - pop ebx -%endif - mov eax,DWORD [4+esp] - mov edx,DWORD [12+esp] - movups xmm2,[eax] - mov ecx,DWORD [240+edx] - mov eax,DWORD [8+esp] - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$001enc1_loop_1: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$001enc1_loop_1 -db 102,15,56,221,209 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movups [eax],xmm2 - pxor xmm2,xmm2 - ret -global _aes_hw_decrypt -align 16 -_aes_hw_decrypt: -L$_aes_hw_decrypt_begin: - mov eax,DWORD [4+esp] - mov edx,DWORD [12+esp] - movups xmm2,[eax] - mov ecx,DWORD [240+edx] - mov eax,DWORD [8+esp] - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$002dec1_loop_2: -db 102,15,56,222,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$002dec1_loop_2 -db 102,15,56,223,209 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movups [eax],xmm2 - pxor xmm2,xmm2 - ret -align 16 -__aesni_encrypt2: - movups xmm0,[edx] - shl ecx,4 - movups xmm1,[16+edx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - movups xmm0,[32+edx] - lea edx,[32+ecx*1+edx] - neg ecx - add ecx,16 -L$003enc2_loop: -db 102,15,56,220,209 -db 102,15,56,220,217 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,220,208 -db 102,15,56,220,216 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$003enc2_loop -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,221,208 -db 102,15,56,221,216 - ret -align 16 -__aesni_decrypt2: - movups xmm0,[edx] - shl ecx,4 - movups xmm1,[16+edx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - movups xmm0,[32+edx] - lea edx,[32+ecx*1+edx] - neg ecx - add ecx,16 -L$004dec2_loop: -db 102,15,56,222,209 -db 102,15,56,222,217 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,222,208 -db 102,15,56,222,216 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$004dec2_loop -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,223,208 -db 102,15,56,223,216 - ret -align 16 -__aesni_encrypt3: - movups xmm0,[edx] - shl ecx,4 - movups xmm1,[16+edx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 - movups xmm0,[32+edx] - lea edx,[32+ecx*1+edx] - neg ecx - add ecx,16 -L$005enc3_loop: -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,220,225 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,220,208 -db 102,15,56,220,216 -db 102,15,56,220,224 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$005enc3_loop -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,220,225 -db 102,15,56,221,208 -db 102,15,56,221,216 -db 102,15,56,221,224 - ret -align 16 -__aesni_decrypt3: - movups xmm0,[edx] - shl ecx,4 - movups xmm1,[16+edx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 - movups xmm0,[32+edx] - lea edx,[32+ecx*1+edx] - neg ecx - add ecx,16 -L$006dec3_loop: -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,222,225 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,222,208 -db 102,15,56,222,216 -db 102,15,56,222,224 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$006dec3_loop -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,222,225 -db 102,15,56,223,208 -db 102,15,56,223,216 -db 102,15,56,223,224 - ret -align 16 -__aesni_encrypt4: - movups xmm0,[edx] - movups xmm1,[16+edx] - shl ecx,4 - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 - pxor xmm5,xmm0 - movups xmm0,[32+edx] - lea edx,[32+ecx*1+edx] - neg ecx -db 15,31,64,0 - add ecx,16 -L$007enc4_loop: -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,220,225 -db 102,15,56,220,233 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,220,208 -db 102,15,56,220,216 -db 102,15,56,220,224 -db 102,15,56,220,232 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$007enc4_loop -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,220,225 -db 102,15,56,220,233 -db 102,15,56,221,208 -db 102,15,56,221,216 -db 102,15,56,221,224 -db 102,15,56,221,232 - ret -align 16 -__aesni_decrypt4: - movups xmm0,[edx] - movups xmm1,[16+edx] - shl ecx,4 - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 - pxor xmm5,xmm0 - movups xmm0,[32+edx] - lea edx,[32+ecx*1+edx] - neg ecx -db 15,31,64,0 - add ecx,16 -L$008dec4_loop: -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,222,225 -db 102,15,56,222,233 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,222,208 -db 102,15,56,222,216 -db 102,15,56,222,224 -db 102,15,56,222,232 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$008dec4_loop -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,222,225 -db 102,15,56,222,233 -db 102,15,56,223,208 -db 102,15,56,223,216 -db 102,15,56,223,224 -db 102,15,56,223,232 - ret -align 16 -__aesni_encrypt6: - movups xmm0,[edx] - shl ecx,4 - movups xmm1,[16+edx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 -db 102,15,56,220,209 - pxor xmm5,xmm0 - pxor xmm6,xmm0 -db 102,15,56,220,217 - lea edx,[32+ecx*1+edx] - neg ecx -db 102,15,56,220,225 - pxor xmm7,xmm0 - movups xmm0,[ecx*1+edx] - add ecx,16 - jmp NEAR L$009_aesni_encrypt6_inner -align 16 -L$010enc6_loop: -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,220,225 -L$009_aesni_encrypt6_inner: -db 102,15,56,220,233 -db 102,15,56,220,241 -db 102,15,56,220,249 -L$_aesni_encrypt6_enter: - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,220,208 -db 102,15,56,220,216 -db 102,15,56,220,224 -db 102,15,56,220,232 -db 102,15,56,220,240 -db 102,15,56,220,248 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$010enc6_loop -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,220,225 -db 102,15,56,220,233 -db 102,15,56,220,241 -db 102,15,56,220,249 -db 102,15,56,221,208 -db 102,15,56,221,216 -db 102,15,56,221,224 -db 102,15,56,221,232 -db 102,15,56,221,240 -db 102,15,56,221,248 - ret -align 16 -__aesni_decrypt6: - movups xmm0,[edx] - shl ecx,4 - movups xmm1,[16+edx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 -db 102,15,56,222,209 - pxor xmm5,xmm0 - pxor xmm6,xmm0 -db 102,15,56,222,217 - lea edx,[32+ecx*1+edx] - neg ecx -db 102,15,56,222,225 - pxor xmm7,xmm0 - movups xmm0,[ecx*1+edx] - add ecx,16 - jmp NEAR L$011_aesni_decrypt6_inner -align 16 -L$012dec6_loop: -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,222,225 -L$011_aesni_decrypt6_inner: -db 102,15,56,222,233 -db 102,15,56,222,241 -db 102,15,56,222,249 -L$_aesni_decrypt6_enter: - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,222,208 -db 102,15,56,222,216 -db 102,15,56,222,224 -db 102,15,56,222,232 -db 102,15,56,222,240 -db 102,15,56,222,248 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$012dec6_loop -db 102,15,56,222,209 -db 102,15,56,222,217 -db 102,15,56,222,225 -db 102,15,56,222,233 -db 102,15,56,222,241 -db 102,15,56,222,249 -db 102,15,56,223,208 -db 102,15,56,223,216 -db 102,15,56,223,224 -db 102,15,56,223,232 -db 102,15,56,223,240 -db 102,15,56,223,248 - ret -global _aes_hw_ecb_encrypt -align 16 -_aes_hw_ecb_encrypt: -L$_aes_hw_ecb_encrypt_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebx,DWORD [36+esp] - and eax,-16 - jz NEAR L$013ecb_ret - mov ecx,DWORD [240+edx] - test ebx,ebx - jz NEAR L$014ecb_decrypt - mov ebp,edx - mov ebx,ecx - cmp eax,96 - jb NEAR L$015ecb_enc_tail - movdqu xmm2,[esi] - movdqu xmm3,[16+esi] - movdqu xmm4,[32+esi] - movdqu xmm5,[48+esi] - movdqu xmm6,[64+esi] - movdqu xmm7,[80+esi] - lea esi,[96+esi] - sub eax,96 - jmp NEAR L$016ecb_enc_loop6_enter -align 16 -L$017ecb_enc_loop6: - movups [edi],xmm2 - movdqu xmm2,[esi] - movups [16+edi],xmm3 - movdqu xmm3,[16+esi] - movups [32+edi],xmm4 - movdqu xmm4,[32+esi] - movups [48+edi],xmm5 - movdqu xmm5,[48+esi] - movups [64+edi],xmm6 - movdqu xmm6,[64+esi] - movups [80+edi],xmm7 - lea edi,[96+edi] - movdqu xmm7,[80+esi] - lea esi,[96+esi] -L$016ecb_enc_loop6_enter: - call __aesni_encrypt6 - mov edx,ebp - mov ecx,ebx - sub eax,96 - jnc NEAR L$017ecb_enc_loop6 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - movups [80+edi],xmm7 - lea edi,[96+edi] - add eax,96 - jz NEAR L$013ecb_ret -L$015ecb_enc_tail: - movups xmm2,[esi] - cmp eax,32 - jb NEAR L$018ecb_enc_one - movups xmm3,[16+esi] - je NEAR L$019ecb_enc_two - movups xmm4,[32+esi] - cmp eax,64 - jb NEAR L$020ecb_enc_three - movups xmm5,[48+esi] - je NEAR L$021ecb_enc_four - movups xmm6,[64+esi] - xorps xmm7,xmm7 - call __aesni_encrypt6 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - jmp NEAR L$013ecb_ret -align 16 -L$018ecb_enc_one: - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$022enc1_loop_3: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$022enc1_loop_3 -db 102,15,56,221,209 - movups [edi],xmm2 - jmp NEAR L$013ecb_ret -align 16 -L$019ecb_enc_two: - call __aesni_encrypt2 - movups [edi],xmm2 - movups [16+edi],xmm3 - jmp NEAR L$013ecb_ret -align 16 -L$020ecb_enc_three: - call __aesni_encrypt3 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - jmp NEAR L$013ecb_ret -align 16 -L$021ecb_enc_four: - call __aesni_encrypt4 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - jmp NEAR L$013ecb_ret -align 16 -L$014ecb_decrypt: - mov ebp,edx - mov ebx,ecx - cmp eax,96 - jb NEAR L$023ecb_dec_tail - movdqu xmm2,[esi] - movdqu xmm3,[16+esi] - movdqu xmm4,[32+esi] - movdqu xmm5,[48+esi] - movdqu xmm6,[64+esi] - movdqu xmm7,[80+esi] - lea esi,[96+esi] - sub eax,96 - jmp NEAR L$024ecb_dec_loop6_enter -align 16 -L$025ecb_dec_loop6: - movups [edi],xmm2 - movdqu xmm2,[esi] - movups [16+edi],xmm3 - movdqu xmm3,[16+esi] - movups [32+edi],xmm4 - movdqu xmm4,[32+esi] - movups [48+edi],xmm5 - movdqu xmm5,[48+esi] - movups [64+edi],xmm6 - movdqu xmm6,[64+esi] - movups [80+edi],xmm7 - lea edi,[96+edi] - movdqu xmm7,[80+esi] - lea esi,[96+esi] -L$024ecb_dec_loop6_enter: - call __aesni_decrypt6 - mov edx,ebp - mov ecx,ebx - sub eax,96 - jnc NEAR L$025ecb_dec_loop6 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - movups [80+edi],xmm7 - lea edi,[96+edi] - add eax,96 - jz NEAR L$013ecb_ret -L$023ecb_dec_tail: - movups xmm2,[esi] - cmp eax,32 - jb NEAR L$026ecb_dec_one - movups xmm3,[16+esi] - je NEAR L$027ecb_dec_two - movups xmm4,[32+esi] - cmp eax,64 - jb NEAR L$028ecb_dec_three - movups xmm5,[48+esi] - je NEAR L$029ecb_dec_four - movups xmm6,[64+esi] - xorps xmm7,xmm7 - call __aesni_decrypt6 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - jmp NEAR L$013ecb_ret -align 16 -L$026ecb_dec_one: - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$030dec1_loop_4: -db 102,15,56,222,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$030dec1_loop_4 -db 102,15,56,223,209 - movups [edi],xmm2 - jmp NEAR L$013ecb_ret -align 16 -L$027ecb_dec_two: - call __aesni_decrypt2 - movups [edi],xmm2 - movups [16+edi],xmm3 - jmp NEAR L$013ecb_ret -align 16 -L$028ecb_dec_three: - call __aesni_decrypt3 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - jmp NEAR L$013ecb_ret -align 16 -L$029ecb_dec_four: - call __aesni_decrypt4 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 -L$013ecb_ret: - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_hw_ccm64_encrypt_blocks -align 16 -_aes_hw_ccm64_encrypt_blocks: -L$_aes_hw_ccm64_encrypt_blocks_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebx,DWORD [36+esp] - mov ecx,DWORD [40+esp] - mov ebp,esp - sub esp,60 - and esp,-16 - mov DWORD [48+esp],ebp - movdqu xmm7,[ebx] - movdqu xmm3,[ecx] - mov ecx,DWORD [240+edx] - mov DWORD [esp],202182159 - mov DWORD [4+esp],134810123 - mov DWORD [8+esp],67438087 - mov DWORD [12+esp],66051 - mov ebx,1 - xor ebp,ebp - mov DWORD [16+esp],ebx - mov DWORD [20+esp],ebp - mov DWORD [24+esp],ebp - mov DWORD [28+esp],ebp - shl ecx,4 - mov ebx,16 - lea ebp,[edx] - movdqa xmm5,[esp] - movdqa xmm2,xmm7 - lea edx,[32+ecx*1+edx] - sub ebx,ecx -db 102,15,56,0,253 -L$031ccm64_enc_outer: - movups xmm0,[ebp] - mov ecx,ebx - movups xmm6,[esi] - xorps xmm2,xmm0 - movups xmm1,[16+ebp] - xorps xmm0,xmm6 - xorps xmm3,xmm0 - movups xmm0,[32+ebp] -L$032ccm64_enc2_loop: -db 102,15,56,220,209 -db 102,15,56,220,217 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,220,208 -db 102,15,56,220,216 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$032ccm64_enc2_loop -db 102,15,56,220,209 -db 102,15,56,220,217 - paddq xmm7,[16+esp] - dec eax -db 102,15,56,221,208 -db 102,15,56,221,216 - lea esi,[16+esi] - xorps xmm6,xmm2 - movdqa xmm2,xmm7 - movups [edi],xmm6 -db 102,15,56,0,213 - lea edi,[16+edi] - jnz NEAR L$031ccm64_enc_outer - mov esp,DWORD [48+esp] - mov edi,DWORD [40+esp] - movups [edi],xmm3 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_hw_ccm64_decrypt_blocks -align 16 -_aes_hw_ccm64_decrypt_blocks: -L$_aes_hw_ccm64_decrypt_blocks_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebx,DWORD [36+esp] - mov ecx,DWORD [40+esp] - mov ebp,esp - sub esp,60 - and esp,-16 - mov DWORD [48+esp],ebp - movdqu xmm7,[ebx] - movdqu xmm3,[ecx] - mov ecx,DWORD [240+edx] - mov DWORD [esp],202182159 - mov DWORD [4+esp],134810123 - mov DWORD [8+esp],67438087 - mov DWORD [12+esp],66051 - mov ebx,1 - xor ebp,ebp - mov DWORD [16+esp],ebx - mov DWORD [20+esp],ebp - mov DWORD [24+esp],ebp - mov DWORD [28+esp],ebp - movdqa xmm5,[esp] - movdqa xmm2,xmm7 - mov ebp,edx - mov ebx,ecx -db 102,15,56,0,253 - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$033enc1_loop_5: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$033enc1_loop_5 -db 102,15,56,221,209 - shl ebx,4 - mov ecx,16 - movups xmm6,[esi] - paddq xmm7,[16+esp] - lea esi,[16+esi] - sub ecx,ebx - lea edx,[32+ebx*1+ebp] - mov ebx,ecx - jmp NEAR L$034ccm64_dec_outer -align 16 -L$034ccm64_dec_outer: - xorps xmm6,xmm2 - movdqa xmm2,xmm7 - movups [edi],xmm6 - lea edi,[16+edi] -db 102,15,56,0,213 - sub eax,1 - jz NEAR L$035ccm64_dec_break - movups xmm0,[ebp] - mov ecx,ebx - movups xmm1,[16+ebp] - xorps xmm6,xmm0 - xorps xmm2,xmm0 - xorps xmm3,xmm6 - movups xmm0,[32+ebp] -L$036ccm64_dec2_loop: -db 102,15,56,220,209 -db 102,15,56,220,217 - movups xmm1,[ecx*1+edx] - add ecx,32 -db 102,15,56,220,208 -db 102,15,56,220,216 - movups xmm0,[ecx*1+edx-16] - jnz NEAR L$036ccm64_dec2_loop - movups xmm6,[esi] - paddq xmm7,[16+esp] -db 102,15,56,220,209 -db 102,15,56,220,217 -db 102,15,56,221,208 -db 102,15,56,221,216 - lea esi,[16+esi] - jmp NEAR L$034ccm64_dec_outer -align 16 -L$035ccm64_dec_break: - mov ecx,DWORD [240+ebp] - mov edx,ebp - movups xmm0,[edx] - movups xmm1,[16+edx] - xorps xmm6,xmm0 - lea edx,[32+edx] - xorps xmm3,xmm6 -L$037enc1_loop_6: -db 102,15,56,220,217 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$037enc1_loop_6 -db 102,15,56,221,217 - mov esp,DWORD [48+esp] - mov edi,DWORD [40+esp] - movups [edi],xmm3 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_hw_ctr32_encrypt_blocks -align 16 -_aes_hw_ctr32_encrypt_blocks: -L$_aes_hw_ctr32_encrypt_blocks_begin: - push ebp - push ebx - push esi - push edi -%ifdef BORINGSSL_DISPATCH_TEST - push ebx - push edx - call L$038pic -L$038pic: - pop ebx - lea ebx,[(_BORINGSSL_function_hit+0-L$038pic)+ebx] - mov edx,1 - mov BYTE [ebx],dl - pop edx - pop ebx -%endif - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebx,DWORD [36+esp] - mov ebp,esp - sub esp,88 - and esp,-16 - mov DWORD [80+esp],ebp - cmp eax,1 - je NEAR L$039ctr32_one_shortcut - movdqu xmm7,[ebx] - mov DWORD [esp],202182159 - mov DWORD [4+esp],134810123 - mov DWORD [8+esp],67438087 - mov DWORD [12+esp],66051 - mov ecx,6 - xor ebp,ebp - mov DWORD [16+esp],ecx - mov DWORD [20+esp],ecx - mov DWORD [24+esp],ecx - mov DWORD [28+esp],ebp -db 102,15,58,22,251,3 -db 102,15,58,34,253,3 - mov ecx,DWORD [240+edx] - bswap ebx - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movdqa xmm2,[esp] -db 102,15,58,34,195,0 - lea ebp,[3+ebx] -db 102,15,58,34,205,0 - inc ebx -db 102,15,58,34,195,1 - inc ebp -db 102,15,58,34,205,1 - inc ebx -db 102,15,58,34,195,2 - inc ebp -db 102,15,58,34,205,2 - movdqa [48+esp],xmm0 -db 102,15,56,0,194 - movdqu xmm6,[edx] - movdqa [64+esp],xmm1 -db 102,15,56,0,202 - pshufd xmm2,xmm0,192 - pshufd xmm3,xmm0,128 - cmp eax,6 - jb NEAR L$040ctr32_tail - pxor xmm7,xmm6 - shl ecx,4 - mov ebx,16 - movdqa [32+esp],xmm7 - mov ebp,edx - sub ebx,ecx - lea edx,[32+ecx*1+edx] - sub eax,6 - jmp NEAR L$041ctr32_loop6 -align 16 -L$041ctr32_loop6: - pshufd xmm4,xmm0,64 - movdqa xmm0,[32+esp] - pshufd xmm5,xmm1,192 - pxor xmm2,xmm0 - pshufd xmm6,xmm1,128 - pxor xmm3,xmm0 - pshufd xmm7,xmm1,64 - movups xmm1,[16+ebp] - pxor xmm4,xmm0 - pxor xmm5,xmm0 -db 102,15,56,220,209 - pxor xmm6,xmm0 - pxor xmm7,xmm0 -db 102,15,56,220,217 - movups xmm0,[32+ebp] - mov ecx,ebx -db 102,15,56,220,225 -db 102,15,56,220,233 -db 102,15,56,220,241 -db 102,15,56,220,249 - call L$_aesni_encrypt6_enter - movups xmm1,[esi] - movups xmm0,[16+esi] - xorps xmm2,xmm1 - movups xmm1,[32+esi] - xorps xmm3,xmm0 - movups [edi],xmm2 - movdqa xmm0,[16+esp] - xorps xmm4,xmm1 - movdqa xmm1,[64+esp] - movups [16+edi],xmm3 - movups [32+edi],xmm4 - paddd xmm1,xmm0 - paddd xmm0,[48+esp] - movdqa xmm2,[esp] - movups xmm3,[48+esi] - movups xmm4,[64+esi] - xorps xmm5,xmm3 - movups xmm3,[80+esi] - lea esi,[96+esi] - movdqa [48+esp],xmm0 -db 102,15,56,0,194 - xorps xmm6,xmm4 - movups [48+edi],xmm5 - xorps xmm7,xmm3 - movdqa [64+esp],xmm1 -db 102,15,56,0,202 - movups [64+edi],xmm6 - pshufd xmm2,xmm0,192 - movups [80+edi],xmm7 - lea edi,[96+edi] - pshufd xmm3,xmm0,128 - sub eax,6 - jnc NEAR L$041ctr32_loop6 - add eax,6 - jz NEAR L$042ctr32_ret - movdqu xmm7,[ebp] - mov edx,ebp - pxor xmm7,[32+esp] - mov ecx,DWORD [240+ebp] -L$040ctr32_tail: - por xmm2,xmm7 - cmp eax,2 - jb NEAR L$043ctr32_one - pshufd xmm4,xmm0,64 - por xmm3,xmm7 - je NEAR L$044ctr32_two - pshufd xmm5,xmm1,192 - por xmm4,xmm7 - cmp eax,4 - jb NEAR L$045ctr32_three - pshufd xmm6,xmm1,128 - por xmm5,xmm7 - je NEAR L$046ctr32_four - por xmm6,xmm7 - call __aesni_encrypt6 - movups xmm1,[esi] - movups xmm0,[16+esi] - xorps xmm2,xmm1 - movups xmm1,[32+esi] - xorps xmm3,xmm0 - movups xmm0,[48+esi] - xorps xmm4,xmm1 - movups xmm1,[64+esi] - xorps xmm5,xmm0 - movups [edi],xmm2 - xorps xmm6,xmm1 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - jmp NEAR L$042ctr32_ret -align 16 -L$039ctr32_one_shortcut: - movups xmm2,[ebx] - mov ecx,DWORD [240+edx] -L$043ctr32_one: - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$047enc1_loop_7: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$047enc1_loop_7 -db 102,15,56,221,209 - movups xmm6,[esi] - xorps xmm6,xmm2 - movups [edi],xmm6 - jmp NEAR L$042ctr32_ret -align 16 -L$044ctr32_two: - call __aesni_encrypt2 - movups xmm5,[esi] - movups xmm6,[16+esi] - xorps xmm2,xmm5 - xorps xmm3,xmm6 - movups [edi],xmm2 - movups [16+edi],xmm3 - jmp NEAR L$042ctr32_ret -align 16 -L$045ctr32_three: - call __aesni_encrypt3 - movups xmm5,[esi] - movups xmm6,[16+esi] - xorps xmm2,xmm5 - movups xmm7,[32+esi] - xorps xmm3,xmm6 - movups [edi],xmm2 - xorps xmm4,xmm7 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - jmp NEAR L$042ctr32_ret -align 16 -L$046ctr32_four: - call __aesni_encrypt4 - movups xmm6,[esi] - movups xmm7,[16+esi] - movups xmm1,[32+esi] - xorps xmm2,xmm6 - movups xmm0,[48+esi] - xorps xmm3,xmm7 - movups [edi],xmm2 - xorps xmm4,xmm1 - movups [16+edi],xmm3 - xorps xmm5,xmm0 - movups [32+edi],xmm4 - movups [48+edi],xmm5 -L$042ctr32_ret: - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - movdqa [32+esp],xmm0 - pxor xmm5,xmm5 - movdqa [48+esp],xmm0 - pxor xmm6,xmm6 - movdqa [64+esp],xmm0 - pxor xmm7,xmm7 - mov esp,DWORD [80+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_hw_xts_encrypt -align 16 -_aes_hw_xts_encrypt: -L$_aes_hw_xts_encrypt_begin: - push ebp - push ebx - push esi - push edi - mov edx,DWORD [36+esp] - mov esi,DWORD [40+esp] - mov ecx,DWORD [240+edx] - movups xmm2,[esi] - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$048enc1_loop_8: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$048enc1_loop_8 -db 102,15,56,221,209 - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebp,esp - sub esp,120 - mov ecx,DWORD [240+edx] - and esp,-16 - mov DWORD [96+esp],135 - mov DWORD [100+esp],0 - mov DWORD [104+esp],1 - mov DWORD [108+esp],0 - mov DWORD [112+esp],eax - mov DWORD [116+esp],ebp - movdqa xmm1,xmm2 - pxor xmm0,xmm0 - movdqa xmm3,[96+esp] - pcmpgtd xmm0,xmm1 - and eax,-16 - mov ebp,edx - mov ebx,ecx - sub eax,96 - jc NEAR L$049xts_enc_short - shl ecx,4 - mov ebx,16 - sub ebx,ecx - lea edx,[32+ecx*1+edx] - jmp NEAR L$050xts_enc_loop6 -align 16 -L$050xts_enc_loop6: - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [16+esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [32+esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [48+esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm7,xmm0,19 - movdqa [64+esp],xmm1 - paddq xmm1,xmm1 - movups xmm0,[ebp] - pand xmm7,xmm3 - movups xmm2,[esi] - pxor xmm7,xmm1 - mov ecx,ebx - movdqu xmm3,[16+esi] - xorps xmm2,xmm0 - movdqu xmm4,[32+esi] - pxor xmm3,xmm0 - movdqu xmm5,[48+esi] - pxor xmm4,xmm0 - movdqu xmm6,[64+esi] - pxor xmm5,xmm0 - movdqu xmm1,[80+esi] - pxor xmm6,xmm0 - lea esi,[96+esi] - pxor xmm2,[esp] - movdqa [80+esp],xmm7 - pxor xmm7,xmm1 - movups xmm1,[16+ebp] - pxor xmm3,[16+esp] - pxor xmm4,[32+esp] -db 102,15,56,220,209 - pxor xmm5,[48+esp] - pxor xmm6,[64+esp] -db 102,15,56,220,217 - pxor xmm7,xmm0 - movups xmm0,[32+ebp] -db 102,15,56,220,225 -db 102,15,56,220,233 -db 102,15,56,220,241 -db 102,15,56,220,249 - call L$_aesni_encrypt6_enter - movdqa xmm1,[80+esp] - pxor xmm0,xmm0 - xorps xmm2,[esp] - pcmpgtd xmm0,xmm1 - xorps xmm3,[16+esp] - movups [edi],xmm2 - xorps xmm4,[32+esp] - movups [16+edi],xmm3 - xorps xmm5,[48+esp] - movups [32+edi],xmm4 - xorps xmm6,[64+esp] - movups [48+edi],xmm5 - xorps xmm7,xmm1 - movups [64+edi],xmm6 - pshufd xmm2,xmm0,19 - movups [80+edi],xmm7 - lea edi,[96+edi] - movdqa xmm3,[96+esp] - pxor xmm0,xmm0 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - sub eax,96 - jnc NEAR L$050xts_enc_loop6 - mov ecx,DWORD [240+ebp] - mov edx,ebp - mov ebx,ecx -L$049xts_enc_short: - add eax,96 - jz NEAR L$051xts_enc_done6x - movdqa xmm5,xmm1 - cmp eax,32 - jb NEAR L$052xts_enc_one - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - je NEAR L$053xts_enc_two - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa xmm6,xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - cmp eax,64 - jb NEAR L$054xts_enc_three - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa xmm7,xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - movdqa [esp],xmm5 - movdqa [16+esp],xmm6 - je NEAR L$055xts_enc_four - movdqa [32+esp],xmm7 - pshufd xmm7,xmm0,19 - movdqa [48+esp],xmm1 - paddq xmm1,xmm1 - pand xmm7,xmm3 - pxor xmm7,xmm1 - movdqu xmm2,[esi] - movdqu xmm3,[16+esi] - movdqu xmm4,[32+esi] - pxor xmm2,[esp] - movdqu xmm5,[48+esi] - pxor xmm3,[16+esp] - movdqu xmm6,[64+esi] - pxor xmm4,[32+esp] - lea esi,[80+esi] - pxor xmm5,[48+esp] - movdqa [64+esp],xmm7 - pxor xmm6,xmm7 - call __aesni_encrypt6 - movaps xmm1,[64+esp] - xorps xmm2,[esp] - xorps xmm3,[16+esp] - xorps xmm4,[32+esp] - movups [edi],xmm2 - xorps xmm5,[48+esp] - movups [16+edi],xmm3 - xorps xmm6,xmm1 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - lea edi,[80+edi] - jmp NEAR L$056xts_enc_done -align 16 -L$052xts_enc_one: - movups xmm2,[esi] - lea esi,[16+esi] - xorps xmm2,xmm5 - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$057enc1_loop_9: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$057enc1_loop_9 -db 102,15,56,221,209 - xorps xmm2,xmm5 - movups [edi],xmm2 - lea edi,[16+edi] - movdqa xmm1,xmm5 - jmp NEAR L$056xts_enc_done -align 16 -L$053xts_enc_two: - movaps xmm6,xmm1 - movups xmm2,[esi] - movups xmm3,[16+esi] - lea esi,[32+esi] - xorps xmm2,xmm5 - xorps xmm3,xmm6 - call __aesni_encrypt2 - xorps xmm2,xmm5 - xorps xmm3,xmm6 - movups [edi],xmm2 - movups [16+edi],xmm3 - lea edi,[32+edi] - movdqa xmm1,xmm6 - jmp NEAR L$056xts_enc_done -align 16 -L$054xts_enc_three: - movaps xmm7,xmm1 - movups xmm2,[esi] - movups xmm3,[16+esi] - movups xmm4,[32+esi] - lea esi,[48+esi] - xorps xmm2,xmm5 - xorps xmm3,xmm6 - xorps xmm4,xmm7 - call __aesni_encrypt3 - xorps xmm2,xmm5 - xorps xmm3,xmm6 - xorps xmm4,xmm7 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - lea edi,[48+edi] - movdqa xmm1,xmm7 - jmp NEAR L$056xts_enc_done -align 16 -L$055xts_enc_four: - movaps xmm6,xmm1 - movups xmm2,[esi] - movups xmm3,[16+esi] - movups xmm4,[32+esi] - xorps xmm2,[esp] - movups xmm5,[48+esi] - lea esi,[64+esi] - xorps xmm3,[16+esp] - xorps xmm4,xmm7 - xorps xmm5,xmm6 - call __aesni_encrypt4 - xorps xmm2,[esp] - xorps xmm3,[16+esp] - xorps xmm4,xmm7 - movups [edi],xmm2 - xorps xmm5,xmm6 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - lea edi,[64+edi] - movdqa xmm1,xmm6 - jmp NEAR L$056xts_enc_done -align 16 -L$051xts_enc_done6x: - mov eax,DWORD [112+esp] - and eax,15 - jz NEAR L$058xts_enc_ret - movdqa xmm5,xmm1 - mov DWORD [112+esp],eax - jmp NEAR L$059xts_enc_steal -align 16 -L$056xts_enc_done: - mov eax,DWORD [112+esp] - pxor xmm0,xmm0 - and eax,15 - jz NEAR L$058xts_enc_ret - pcmpgtd xmm0,xmm1 - mov DWORD [112+esp],eax - pshufd xmm5,xmm0,19 - paddq xmm1,xmm1 - pand xmm5,[96+esp] - pxor xmm5,xmm1 -L$059xts_enc_steal: - movzx ecx,BYTE [esi] - movzx edx,BYTE [edi-16] - lea esi,[1+esi] - mov BYTE [edi-16],cl - mov BYTE [edi],dl - lea edi,[1+edi] - sub eax,1 - jnz NEAR L$059xts_enc_steal - sub edi,DWORD [112+esp] - mov edx,ebp - mov ecx,ebx - movups xmm2,[edi-16] - xorps xmm2,xmm5 - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$060enc1_loop_10: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$060enc1_loop_10 -db 102,15,56,221,209 - xorps xmm2,xmm5 - movups [edi-16],xmm2 -L$058xts_enc_ret: - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - movdqa [esp],xmm0 - pxor xmm3,xmm3 - movdqa [16+esp],xmm0 - pxor xmm4,xmm4 - movdqa [32+esp],xmm0 - pxor xmm5,xmm5 - movdqa [48+esp],xmm0 - pxor xmm6,xmm6 - movdqa [64+esp],xmm0 - pxor xmm7,xmm7 - movdqa [80+esp],xmm0 - mov esp,DWORD [116+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_hw_xts_decrypt -align 16 -_aes_hw_xts_decrypt: -L$_aes_hw_xts_decrypt_begin: - push ebp - push ebx - push esi - push edi - mov edx,DWORD [36+esp] - mov esi,DWORD [40+esp] - mov ecx,DWORD [240+edx] - movups xmm2,[esi] - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$061enc1_loop_11: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$061enc1_loop_11 -db 102,15,56,221,209 - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebp,esp - sub esp,120 - and esp,-16 - xor ebx,ebx - test eax,15 - setnz bl - shl ebx,4 - sub eax,ebx - mov DWORD [96+esp],135 - mov DWORD [100+esp],0 - mov DWORD [104+esp],1 - mov DWORD [108+esp],0 - mov DWORD [112+esp],eax - mov DWORD [116+esp],ebp - mov ecx,DWORD [240+edx] - mov ebp,edx - mov ebx,ecx - movdqa xmm1,xmm2 - pxor xmm0,xmm0 - movdqa xmm3,[96+esp] - pcmpgtd xmm0,xmm1 - and eax,-16 - sub eax,96 - jc NEAR L$062xts_dec_short - shl ecx,4 - mov ebx,16 - sub ebx,ecx - lea edx,[32+ecx*1+edx] - jmp NEAR L$063xts_dec_loop6 -align 16 -L$063xts_dec_loop6: - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [16+esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [32+esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa [48+esp],xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - pshufd xmm7,xmm0,19 - movdqa [64+esp],xmm1 - paddq xmm1,xmm1 - movups xmm0,[ebp] - pand xmm7,xmm3 - movups xmm2,[esi] - pxor xmm7,xmm1 - mov ecx,ebx - movdqu xmm3,[16+esi] - xorps xmm2,xmm0 - movdqu xmm4,[32+esi] - pxor xmm3,xmm0 - movdqu xmm5,[48+esi] - pxor xmm4,xmm0 - movdqu xmm6,[64+esi] - pxor xmm5,xmm0 - movdqu xmm1,[80+esi] - pxor xmm6,xmm0 - lea esi,[96+esi] - pxor xmm2,[esp] - movdqa [80+esp],xmm7 - pxor xmm7,xmm1 - movups xmm1,[16+ebp] - pxor xmm3,[16+esp] - pxor xmm4,[32+esp] -db 102,15,56,222,209 - pxor xmm5,[48+esp] - pxor xmm6,[64+esp] -db 102,15,56,222,217 - pxor xmm7,xmm0 - movups xmm0,[32+ebp] -db 102,15,56,222,225 -db 102,15,56,222,233 -db 102,15,56,222,241 -db 102,15,56,222,249 - call L$_aesni_decrypt6_enter - movdqa xmm1,[80+esp] - pxor xmm0,xmm0 - xorps xmm2,[esp] - pcmpgtd xmm0,xmm1 - xorps xmm3,[16+esp] - movups [edi],xmm2 - xorps xmm4,[32+esp] - movups [16+edi],xmm3 - xorps xmm5,[48+esp] - movups [32+edi],xmm4 - xorps xmm6,[64+esp] - movups [48+edi],xmm5 - xorps xmm7,xmm1 - movups [64+edi],xmm6 - pshufd xmm2,xmm0,19 - movups [80+edi],xmm7 - lea edi,[96+edi] - movdqa xmm3,[96+esp] - pxor xmm0,xmm0 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - sub eax,96 - jnc NEAR L$063xts_dec_loop6 - mov ecx,DWORD [240+ebp] - mov edx,ebp - mov ebx,ecx -L$062xts_dec_short: - add eax,96 - jz NEAR L$064xts_dec_done6x - movdqa xmm5,xmm1 - cmp eax,32 - jb NEAR L$065xts_dec_one - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - je NEAR L$066xts_dec_two - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa xmm6,xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - cmp eax,64 - jb NEAR L$067xts_dec_three - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa xmm7,xmm1 - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 - movdqa [esp],xmm5 - movdqa [16+esp],xmm6 - je NEAR L$068xts_dec_four - movdqa [32+esp],xmm7 - pshufd xmm7,xmm0,19 - movdqa [48+esp],xmm1 - paddq xmm1,xmm1 - pand xmm7,xmm3 - pxor xmm7,xmm1 - movdqu xmm2,[esi] - movdqu xmm3,[16+esi] - movdqu xmm4,[32+esi] - pxor xmm2,[esp] - movdqu xmm5,[48+esi] - pxor xmm3,[16+esp] - movdqu xmm6,[64+esi] - pxor xmm4,[32+esp] - lea esi,[80+esi] - pxor xmm5,[48+esp] - movdqa [64+esp],xmm7 - pxor xmm6,xmm7 - call __aesni_decrypt6 - movaps xmm1,[64+esp] - xorps xmm2,[esp] - xorps xmm3,[16+esp] - xorps xmm4,[32+esp] - movups [edi],xmm2 - xorps xmm5,[48+esp] - movups [16+edi],xmm3 - xorps xmm6,xmm1 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - movups [64+edi],xmm6 - lea edi,[80+edi] - jmp NEAR L$069xts_dec_done -align 16 -L$065xts_dec_one: - movups xmm2,[esi] - lea esi,[16+esi] - xorps xmm2,xmm5 - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$070dec1_loop_12: -db 102,15,56,222,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$070dec1_loop_12 -db 102,15,56,223,209 - xorps xmm2,xmm5 - movups [edi],xmm2 - lea edi,[16+edi] - movdqa xmm1,xmm5 - jmp NEAR L$069xts_dec_done -align 16 -L$066xts_dec_two: - movaps xmm6,xmm1 - movups xmm2,[esi] - movups xmm3,[16+esi] - lea esi,[32+esi] - xorps xmm2,xmm5 - xorps xmm3,xmm6 - call __aesni_decrypt2 - xorps xmm2,xmm5 - xorps xmm3,xmm6 - movups [edi],xmm2 - movups [16+edi],xmm3 - lea edi,[32+edi] - movdqa xmm1,xmm6 - jmp NEAR L$069xts_dec_done -align 16 -L$067xts_dec_three: - movaps xmm7,xmm1 - movups xmm2,[esi] - movups xmm3,[16+esi] - movups xmm4,[32+esi] - lea esi,[48+esi] - xorps xmm2,xmm5 - xorps xmm3,xmm6 - xorps xmm4,xmm7 - call __aesni_decrypt3 - xorps xmm2,xmm5 - xorps xmm3,xmm6 - xorps xmm4,xmm7 - movups [edi],xmm2 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - lea edi,[48+edi] - movdqa xmm1,xmm7 - jmp NEAR L$069xts_dec_done -align 16 -L$068xts_dec_four: - movaps xmm6,xmm1 - movups xmm2,[esi] - movups xmm3,[16+esi] - movups xmm4,[32+esi] - xorps xmm2,[esp] - movups xmm5,[48+esi] - lea esi,[64+esi] - xorps xmm3,[16+esp] - xorps xmm4,xmm7 - xorps xmm5,xmm6 - call __aesni_decrypt4 - xorps xmm2,[esp] - xorps xmm3,[16+esp] - xorps xmm4,xmm7 - movups [edi],xmm2 - xorps xmm5,xmm6 - movups [16+edi],xmm3 - movups [32+edi],xmm4 - movups [48+edi],xmm5 - lea edi,[64+edi] - movdqa xmm1,xmm6 - jmp NEAR L$069xts_dec_done -align 16 -L$064xts_dec_done6x: - mov eax,DWORD [112+esp] - and eax,15 - jz NEAR L$071xts_dec_ret - mov DWORD [112+esp],eax - jmp NEAR L$072xts_dec_only_one_more -align 16 -L$069xts_dec_done: - mov eax,DWORD [112+esp] - pxor xmm0,xmm0 - and eax,15 - jz NEAR L$071xts_dec_ret - pcmpgtd xmm0,xmm1 - mov DWORD [112+esp],eax - pshufd xmm2,xmm0,19 - pxor xmm0,xmm0 - movdqa xmm3,[96+esp] - paddq xmm1,xmm1 - pand xmm2,xmm3 - pcmpgtd xmm0,xmm1 - pxor xmm1,xmm2 -L$072xts_dec_only_one_more: - pshufd xmm5,xmm0,19 - movdqa xmm6,xmm1 - paddq xmm1,xmm1 - pand xmm5,xmm3 - pxor xmm5,xmm1 - mov edx,ebp - mov ecx,ebx - movups xmm2,[esi] - xorps xmm2,xmm5 - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$073dec1_loop_13: -db 102,15,56,222,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$073dec1_loop_13 -db 102,15,56,223,209 - xorps xmm2,xmm5 - movups [edi],xmm2 -L$074xts_dec_steal: - movzx ecx,BYTE [16+esi] - movzx edx,BYTE [edi] - lea esi,[1+esi] - mov BYTE [edi],cl - mov BYTE [16+edi],dl - lea edi,[1+edi] - sub eax,1 - jnz NEAR L$074xts_dec_steal - sub edi,DWORD [112+esp] - mov edx,ebp - mov ecx,ebx - movups xmm2,[edi] - xorps xmm2,xmm6 - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$075dec1_loop_14: -db 102,15,56,222,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$075dec1_loop_14 -db 102,15,56,223,209 - xorps xmm2,xmm6 - movups [edi],xmm2 -L$071xts_dec_ret: - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - movdqa [esp],xmm0 - pxor xmm3,xmm3 - movdqa [16+esp],xmm0 - pxor xmm4,xmm4 - movdqa [32+esp],xmm0 - pxor xmm5,xmm5 - movdqa [48+esp],xmm0 - pxor xmm6,xmm6 - movdqa [64+esp],xmm0 - pxor xmm7,xmm7 - movdqa [80+esp],xmm0 - mov esp,DWORD [116+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -global _aes_hw_cbc_encrypt -align 16 -_aes_hw_cbc_encrypt: -L$_aes_hw_cbc_encrypt_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov ebx,esp - mov edi,DWORD [24+esp] - sub ebx,24 - mov eax,DWORD [28+esp] - and ebx,-16 - mov edx,DWORD [32+esp] - mov ebp,DWORD [36+esp] - test eax,eax - jz NEAR L$076cbc_abort - cmp DWORD [40+esp],0 - xchg ebx,esp - movups xmm7,[ebp] - mov ecx,DWORD [240+edx] - mov ebp,edx - mov DWORD [16+esp],ebx - mov ebx,ecx - je NEAR L$077cbc_decrypt - movaps xmm2,xmm7 - cmp eax,16 - jb NEAR L$078cbc_enc_tail - sub eax,16 - jmp NEAR L$079cbc_enc_loop -align 16 -L$079cbc_enc_loop: - movups xmm7,[esi] - lea esi,[16+esi] - movups xmm0,[edx] - movups xmm1,[16+edx] - xorps xmm7,xmm0 - lea edx,[32+edx] - xorps xmm2,xmm7 -L$080enc1_loop_15: -db 102,15,56,220,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$080enc1_loop_15 -db 102,15,56,221,209 - mov ecx,ebx - mov edx,ebp - movups [edi],xmm2 - lea edi,[16+edi] - sub eax,16 - jnc NEAR L$079cbc_enc_loop - add eax,16 - jnz NEAR L$078cbc_enc_tail - movaps xmm7,xmm2 - pxor xmm2,xmm2 - jmp NEAR L$081cbc_ret -L$078cbc_enc_tail: - mov ecx,eax -dd 2767451785 - mov ecx,16 - sub ecx,eax - xor eax,eax -dd 2868115081 - lea edi,[edi-16] - mov ecx,ebx - mov esi,edi - mov edx,ebp - jmp NEAR L$079cbc_enc_loop -align 16 -L$077cbc_decrypt: - cmp eax,80 - jbe NEAR L$082cbc_dec_tail - movaps [esp],xmm7 - sub eax,80 - jmp NEAR L$083cbc_dec_loop6_enter -align 16 -L$084cbc_dec_loop6: - movaps [esp],xmm0 - movups [edi],xmm7 - lea edi,[16+edi] -L$083cbc_dec_loop6_enter: - movdqu xmm2,[esi] - movdqu xmm3,[16+esi] - movdqu xmm4,[32+esi] - movdqu xmm5,[48+esi] - movdqu xmm6,[64+esi] - movdqu xmm7,[80+esi] - call __aesni_decrypt6 - movups xmm1,[esi] - movups xmm0,[16+esi] - xorps xmm2,[esp] - xorps xmm3,xmm1 - movups xmm1,[32+esi] - xorps xmm4,xmm0 - movups xmm0,[48+esi] - xorps xmm5,xmm1 - movups xmm1,[64+esi] - xorps xmm6,xmm0 - movups xmm0,[80+esi] - xorps xmm7,xmm1 - movups [edi],xmm2 - movups [16+edi],xmm3 - lea esi,[96+esi] - movups [32+edi],xmm4 - mov ecx,ebx - movups [48+edi],xmm5 - mov edx,ebp - movups [64+edi],xmm6 - lea edi,[80+edi] - sub eax,96 - ja NEAR L$084cbc_dec_loop6 - movaps xmm2,xmm7 - movaps xmm7,xmm0 - add eax,80 - jle NEAR L$085cbc_dec_clear_tail_collected - movups [edi],xmm2 - lea edi,[16+edi] -L$082cbc_dec_tail: - movups xmm2,[esi] - movaps xmm6,xmm2 - cmp eax,16 - jbe NEAR L$086cbc_dec_one - movups xmm3,[16+esi] - movaps xmm5,xmm3 - cmp eax,32 - jbe NEAR L$087cbc_dec_two - movups xmm4,[32+esi] - cmp eax,48 - jbe NEAR L$088cbc_dec_three - movups xmm5,[48+esi] - cmp eax,64 - jbe NEAR L$089cbc_dec_four - movups xmm6,[64+esi] - movaps [esp],xmm7 - movups xmm2,[esi] - xorps xmm7,xmm7 - call __aesni_decrypt6 - movups xmm1,[esi] - movups xmm0,[16+esi] - xorps xmm2,[esp] - xorps xmm3,xmm1 - movups xmm1,[32+esi] - xorps xmm4,xmm0 - movups xmm0,[48+esi] - xorps xmm5,xmm1 - movups xmm7,[64+esi] - xorps xmm6,xmm0 - movups [edi],xmm2 - movups [16+edi],xmm3 - pxor xmm3,xmm3 - movups [32+edi],xmm4 - pxor xmm4,xmm4 - movups [48+edi],xmm5 - pxor xmm5,xmm5 - lea edi,[64+edi] - movaps xmm2,xmm6 - pxor xmm6,xmm6 - sub eax,80 - jmp NEAR L$090cbc_dec_tail_collected -align 16 -L$086cbc_dec_one: - movups xmm0,[edx] - movups xmm1,[16+edx] - lea edx,[32+edx] - xorps xmm2,xmm0 -L$091dec1_loop_16: -db 102,15,56,222,209 - dec ecx - movups xmm1,[edx] - lea edx,[16+edx] - jnz NEAR L$091dec1_loop_16 -db 102,15,56,223,209 - xorps xmm2,xmm7 - movaps xmm7,xmm6 - sub eax,16 - jmp NEAR L$090cbc_dec_tail_collected -align 16 -L$087cbc_dec_two: - call __aesni_decrypt2 - xorps xmm2,xmm7 - xorps xmm3,xmm6 - movups [edi],xmm2 - movaps xmm2,xmm3 - pxor xmm3,xmm3 - lea edi,[16+edi] - movaps xmm7,xmm5 - sub eax,32 - jmp NEAR L$090cbc_dec_tail_collected -align 16 -L$088cbc_dec_three: - call __aesni_decrypt3 - xorps xmm2,xmm7 - xorps xmm3,xmm6 - xorps xmm4,xmm5 - movups [edi],xmm2 - movaps xmm2,xmm4 - pxor xmm4,xmm4 - movups [16+edi],xmm3 - pxor xmm3,xmm3 - lea edi,[32+edi] - movups xmm7,[32+esi] - sub eax,48 - jmp NEAR L$090cbc_dec_tail_collected -align 16 -L$089cbc_dec_four: - call __aesni_decrypt4 - movups xmm1,[16+esi] - movups xmm0,[32+esi] - xorps xmm2,xmm7 - movups xmm7,[48+esi] - xorps xmm3,xmm6 - movups [edi],xmm2 - xorps xmm4,xmm1 - movups [16+edi],xmm3 - pxor xmm3,xmm3 - xorps xmm5,xmm0 - movups [32+edi],xmm4 - pxor xmm4,xmm4 - lea edi,[48+edi] - movaps xmm2,xmm5 - pxor xmm5,xmm5 - sub eax,64 - jmp NEAR L$090cbc_dec_tail_collected -align 16 -L$085cbc_dec_clear_tail_collected: - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 -L$090cbc_dec_tail_collected: - and eax,15 - jnz NEAR L$092cbc_dec_tail_partial - movups [edi],xmm2 - pxor xmm0,xmm0 - jmp NEAR L$081cbc_ret -align 16 -L$092cbc_dec_tail_partial: - movaps [esp],xmm2 - pxor xmm0,xmm0 - mov ecx,16 - mov esi,esp - sub ecx,eax -dd 2767451785 - movdqa [esp],xmm2 -L$081cbc_ret: - mov esp,DWORD [16+esp] - mov ebp,DWORD [36+esp] - pxor xmm2,xmm2 - pxor xmm1,xmm1 - movups [ebp],xmm7 - pxor xmm7,xmm7 -L$076cbc_abort: - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -__aesni_set_encrypt_key: - push ebp - push ebx - test eax,eax - jz NEAR L$093bad_pointer - test edx,edx - jz NEAR L$093bad_pointer - call L$094pic -L$094pic: - pop ebx - lea ebx,[(L$key_const-L$094pic)+ebx] - lea ebp,[_OPENSSL_ia32cap_P] - movups xmm0,[eax] - xorps xmm4,xmm4 - mov ebp,DWORD [4+ebp] - lea edx,[16+edx] - and ebp,268437504 - cmp ecx,256 - je NEAR L$09514rounds - cmp ecx,192 - je NEAR L$09612rounds - cmp ecx,128 - jne NEAR L$097bad_keybits -align 16 -L$09810rounds: - cmp ebp,268435456 - je NEAR L$09910rounds_alt - mov ecx,9 - movups [edx-16],xmm0 -db 102,15,58,223,200,1 - call L$100key_128_cold -db 102,15,58,223,200,2 - call L$101key_128 -db 102,15,58,223,200,4 - call L$101key_128 -db 102,15,58,223,200,8 - call L$101key_128 -db 102,15,58,223,200,16 - call L$101key_128 -db 102,15,58,223,200,32 - call L$101key_128 -db 102,15,58,223,200,64 - call L$101key_128 -db 102,15,58,223,200,128 - call L$101key_128 -db 102,15,58,223,200,27 - call L$101key_128 -db 102,15,58,223,200,54 - call L$101key_128 - movups [edx],xmm0 - mov DWORD [80+edx],ecx - jmp NEAR L$102good_key -align 16 -L$101key_128: - movups [edx],xmm0 - lea edx,[16+edx] -L$100key_128_cold: - shufps xmm4,xmm0,16 - xorps xmm0,xmm4 - shufps xmm4,xmm0,140 - xorps xmm0,xmm4 - shufps xmm1,xmm1,255 - xorps xmm0,xmm1 - ret -align 16 -L$09910rounds_alt: - movdqa xmm5,[ebx] - mov ecx,8 - movdqa xmm4,[32+ebx] - movdqa xmm2,xmm0 - movdqu [edx-16],xmm0 -L$103loop_key128: -db 102,15,56,0,197 -db 102,15,56,221,196 - pslld xmm4,1 - lea edx,[16+edx] - movdqa xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm2,xmm3 - pxor xmm0,xmm2 - movdqu [edx-16],xmm0 - movdqa xmm2,xmm0 - dec ecx - jnz NEAR L$103loop_key128 - movdqa xmm4,[48+ebx] -db 102,15,56,0,197 -db 102,15,56,221,196 - pslld xmm4,1 - movdqa xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm2,xmm3 - pxor xmm0,xmm2 - movdqu [edx],xmm0 - movdqa xmm2,xmm0 -db 102,15,56,0,197 -db 102,15,56,221,196 - movdqa xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm2,xmm3 - pxor xmm0,xmm2 - movdqu [16+edx],xmm0 - mov ecx,9 - mov DWORD [96+edx],ecx - jmp NEAR L$102good_key -align 16 -L$09612rounds: - movq xmm2,[16+eax] - cmp ebp,268435456 - je NEAR L$10412rounds_alt - mov ecx,11 - movups [edx-16],xmm0 -db 102,15,58,223,202,1 - call L$105key_192a_cold -db 102,15,58,223,202,2 - call L$106key_192b -db 102,15,58,223,202,4 - call L$107key_192a -db 102,15,58,223,202,8 - call L$106key_192b -db 102,15,58,223,202,16 - call L$107key_192a -db 102,15,58,223,202,32 - call L$106key_192b -db 102,15,58,223,202,64 - call L$107key_192a -db 102,15,58,223,202,128 - call L$106key_192b - movups [edx],xmm0 - mov DWORD [48+edx],ecx - jmp NEAR L$102good_key -align 16 -L$107key_192a: - movups [edx],xmm0 - lea edx,[16+edx] -align 16 -L$105key_192a_cold: - movaps xmm5,xmm2 -L$108key_192b_warm: - shufps xmm4,xmm0,16 - movdqa xmm3,xmm2 - xorps xmm0,xmm4 - shufps xmm4,xmm0,140 - pslldq xmm3,4 - xorps xmm0,xmm4 - pshufd xmm1,xmm1,85 - pxor xmm2,xmm3 - pxor xmm0,xmm1 - pshufd xmm3,xmm0,255 - pxor xmm2,xmm3 - ret -align 16 -L$106key_192b: - movaps xmm3,xmm0 - shufps xmm5,xmm0,68 - movups [edx],xmm5 - shufps xmm3,xmm2,78 - movups [16+edx],xmm3 - lea edx,[32+edx] - jmp NEAR L$108key_192b_warm -align 16 -L$10412rounds_alt: - movdqa xmm5,[16+ebx] - movdqa xmm4,[32+ebx] - mov ecx,8 - movdqu [edx-16],xmm0 -L$109loop_key192: - movq [edx],xmm2 - movdqa xmm1,xmm2 -db 102,15,56,0,213 -db 102,15,56,221,212 - pslld xmm4,1 - lea edx,[24+edx] - movdqa xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm0,xmm3 - pshufd xmm3,xmm0,255 - pxor xmm3,xmm1 - pslldq xmm1,4 - pxor xmm3,xmm1 - pxor xmm0,xmm2 - pxor xmm2,xmm3 - movdqu [edx-16],xmm0 - dec ecx - jnz NEAR L$109loop_key192 - mov ecx,11 - mov DWORD [32+edx],ecx - jmp NEAR L$102good_key -align 16 -L$09514rounds: - movups xmm2,[16+eax] - lea edx,[16+edx] - cmp ebp,268435456 - je NEAR L$11014rounds_alt - mov ecx,13 - movups [edx-32],xmm0 - movups [edx-16],xmm2 -db 102,15,58,223,202,1 - call L$111key_256a_cold -db 102,15,58,223,200,1 - call L$112key_256b -db 102,15,58,223,202,2 - call L$113key_256a -db 102,15,58,223,200,2 - call L$112key_256b -db 102,15,58,223,202,4 - call L$113key_256a -db 102,15,58,223,200,4 - call L$112key_256b -db 102,15,58,223,202,8 - call L$113key_256a -db 102,15,58,223,200,8 - call L$112key_256b -db 102,15,58,223,202,16 - call L$113key_256a -db 102,15,58,223,200,16 - call L$112key_256b -db 102,15,58,223,202,32 - call L$113key_256a -db 102,15,58,223,200,32 - call L$112key_256b -db 102,15,58,223,202,64 - call L$113key_256a - movups [edx],xmm0 - mov DWORD [16+edx],ecx - xor eax,eax - jmp NEAR L$102good_key -align 16 -L$113key_256a: - movups [edx],xmm2 - lea edx,[16+edx] -L$111key_256a_cold: - shufps xmm4,xmm0,16 - xorps xmm0,xmm4 - shufps xmm4,xmm0,140 - xorps xmm0,xmm4 - shufps xmm1,xmm1,255 - xorps xmm0,xmm1 - ret -align 16 -L$112key_256b: - movups [edx],xmm0 - lea edx,[16+edx] - shufps xmm4,xmm2,16 - xorps xmm2,xmm4 - shufps xmm4,xmm2,140 - xorps xmm2,xmm4 - shufps xmm1,xmm1,170 - xorps xmm2,xmm1 - ret -align 16 -L$11014rounds_alt: - movdqa xmm5,[ebx] - movdqa xmm4,[32+ebx] - mov ecx,7 - movdqu [edx-32],xmm0 - movdqa xmm1,xmm2 - movdqu [edx-16],xmm2 -L$114loop_key256: -db 102,15,56,0,213 -db 102,15,56,221,212 - movdqa xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm0,xmm3 - pslld xmm4,1 - pxor xmm0,xmm2 - movdqu [edx],xmm0 - dec ecx - jz NEAR L$115done_key256 - pshufd xmm2,xmm0,255 - pxor xmm3,xmm3 -db 102,15,56,221,211 - movdqa xmm3,xmm1 - pslldq xmm1,4 - pxor xmm3,xmm1 - pslldq xmm1,4 - pxor xmm3,xmm1 - pslldq xmm1,4 - pxor xmm1,xmm3 - pxor xmm2,xmm1 - movdqu [16+edx],xmm2 - lea edx,[32+edx] - movdqa xmm1,xmm2 - jmp NEAR L$114loop_key256 -L$115done_key256: - mov ecx,13 - mov DWORD [16+edx],ecx -L$102good_key: - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - xor eax,eax - pop ebx - pop ebp - ret -align 4 -L$093bad_pointer: - mov eax,-1 - pop ebx - pop ebp - ret -align 4 -L$097bad_keybits: - pxor xmm0,xmm0 - mov eax,-2 - pop ebx - pop ebp - ret -global _aes_hw_set_encrypt_key -align 16 -_aes_hw_set_encrypt_key: -L$_aes_hw_set_encrypt_key_begin: -%ifdef BORINGSSL_DISPATCH_TEST - push ebx - push edx - call L$116pic -L$116pic: - pop ebx - lea ebx,[(_BORINGSSL_function_hit+3-L$116pic)+ebx] - mov edx,1 - mov BYTE [ebx],dl - pop edx - pop ebx -%endif - mov eax,DWORD [4+esp] - mov ecx,DWORD [8+esp] - mov edx,DWORD [12+esp] - call __aesni_set_encrypt_key - ret -global _aes_hw_set_decrypt_key -align 16 -_aes_hw_set_decrypt_key: -L$_aes_hw_set_decrypt_key_begin: - mov eax,DWORD [4+esp] - mov ecx,DWORD [8+esp] - mov edx,DWORD [12+esp] - call __aesni_set_encrypt_key - mov edx,DWORD [12+esp] - shl ecx,4 - test eax,eax - jnz NEAR L$117dec_key_ret - lea eax,[16+ecx*1+edx] - movups xmm0,[edx] - movups xmm1,[eax] - movups [eax],xmm0 - movups [edx],xmm1 - lea edx,[16+edx] - lea eax,[eax-16] -L$118dec_key_inverse: - movups xmm0,[edx] - movups xmm1,[eax] -db 102,15,56,219,192 -db 102,15,56,219,201 - lea edx,[16+edx] - lea eax,[eax-16] - movups [16+eax],xmm0 - movups [edx-16],xmm1 - cmp eax,edx - ja NEAR L$118dec_key_inverse - movups xmm0,[edx] -db 102,15,56,219,192 - movups [edx],xmm0 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - xor eax,eax -L$117dec_key_ret: - ret -align 64 -L$key_const: -dd 202313229,202313229,202313229,202313229 -dd 67569157,67569157,67569157,67569157 -dd 1,1,1,1 -dd 27,27,27,27 -db 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 -db 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 -db 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 -db 115,108,46,111,114,103,62,0 -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/bn-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/bn-586.asm deleted file mode 100644 index a87f86d12f..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/bn-586.asm +++ /dev/null @@ -1,1529 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -;extern _OPENSSL_ia32cap_P -global _bn_mul_add_words -align 16 -_bn_mul_add_words: -L$_bn_mul_add_words_begin: - lea eax,[_OPENSSL_ia32cap_P] - bt DWORD [eax],26 - jnc NEAR L$000maw_non_sse2 - mov eax,DWORD [4+esp] - mov edx,DWORD [8+esp] - mov ecx,DWORD [12+esp] - movd mm0,DWORD [16+esp] - pxor mm1,mm1 - jmp NEAR L$001maw_sse2_entry -align 16 -L$002maw_sse2_unrolled: - movd mm3,DWORD [eax] - paddq mm1,mm3 - movd mm2,DWORD [edx] - pmuludq mm2,mm0 - movd mm4,DWORD [4+edx] - pmuludq mm4,mm0 - movd mm6,DWORD [8+edx] - pmuludq mm6,mm0 - movd mm7,DWORD [12+edx] - pmuludq mm7,mm0 - paddq mm1,mm2 - movd mm3,DWORD [4+eax] - paddq mm3,mm4 - movd mm5,DWORD [8+eax] - paddq mm5,mm6 - movd mm4,DWORD [12+eax] - paddq mm7,mm4 - movd DWORD [eax],mm1 - movd mm2,DWORD [16+edx] - pmuludq mm2,mm0 - psrlq mm1,32 - movd mm4,DWORD [20+edx] - pmuludq mm4,mm0 - paddq mm1,mm3 - movd mm6,DWORD [24+edx] - pmuludq mm6,mm0 - movd DWORD [4+eax],mm1 - psrlq mm1,32 - movd mm3,DWORD [28+edx] - add edx,32 - pmuludq mm3,mm0 - paddq mm1,mm5 - movd mm5,DWORD [16+eax] - paddq mm2,mm5 - movd DWORD [8+eax],mm1 - psrlq mm1,32 - paddq mm1,mm7 - movd mm5,DWORD [20+eax] - paddq mm4,mm5 - movd DWORD [12+eax],mm1 - psrlq mm1,32 - paddq mm1,mm2 - movd mm5,DWORD [24+eax] - paddq mm6,mm5 - movd DWORD [16+eax],mm1 - psrlq mm1,32 - paddq mm1,mm4 - movd mm5,DWORD [28+eax] - paddq mm3,mm5 - movd DWORD [20+eax],mm1 - psrlq mm1,32 - paddq mm1,mm6 - movd DWORD [24+eax],mm1 - psrlq mm1,32 - paddq mm1,mm3 - movd DWORD [28+eax],mm1 - lea eax,[32+eax] - psrlq mm1,32 - sub ecx,8 - jz NEAR L$003maw_sse2_exit -L$001maw_sse2_entry: - test ecx,4294967288 - jnz NEAR L$002maw_sse2_unrolled -align 4 -L$004maw_sse2_loop: - movd mm2,DWORD [edx] - movd mm3,DWORD [eax] - pmuludq mm2,mm0 - lea edx,[4+edx] - paddq mm1,mm3 - paddq mm1,mm2 - movd DWORD [eax],mm1 - sub ecx,1 - psrlq mm1,32 - lea eax,[4+eax] - jnz NEAR L$004maw_sse2_loop -L$003maw_sse2_exit: - movd eax,mm1 - emms - ret -align 16 -L$000maw_non_sse2: - push ebp - push ebx - push esi - push edi - ; - xor esi,esi - mov edi,DWORD [20+esp] - mov ecx,DWORD [28+esp] - mov ebx,DWORD [24+esp] - and ecx,4294967288 - mov ebp,DWORD [32+esp] - push ecx - jz NEAR L$005maw_finish -align 16 -L$006maw_loop: - ; Round 0 - mov eax,DWORD [ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [edi] - adc edx,0 - mov DWORD [edi],eax - mov esi,edx - ; Round 4 - mov eax,DWORD [4+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [4+edi] - adc edx,0 - mov DWORD [4+edi],eax - mov esi,edx - ; Round 8 - mov eax,DWORD [8+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [8+edi] - adc edx,0 - mov DWORD [8+edi],eax - mov esi,edx - ; Round 12 - mov eax,DWORD [12+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [12+edi] - adc edx,0 - mov DWORD [12+edi],eax - mov esi,edx - ; Round 16 - mov eax,DWORD [16+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [16+edi] - adc edx,0 - mov DWORD [16+edi],eax - mov esi,edx - ; Round 20 - mov eax,DWORD [20+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [20+edi] - adc edx,0 - mov DWORD [20+edi],eax - mov esi,edx - ; Round 24 - mov eax,DWORD [24+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [24+edi] - adc edx,0 - mov DWORD [24+edi],eax - mov esi,edx - ; Round 28 - mov eax,DWORD [28+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [28+edi] - adc edx,0 - mov DWORD [28+edi],eax - mov esi,edx - ; - sub ecx,8 - lea ebx,[32+ebx] - lea edi,[32+edi] - jnz NEAR L$006maw_loop -L$005maw_finish: - mov ecx,DWORD [32+esp] - and ecx,7 - jnz NEAR L$007maw_finish2 - jmp NEAR L$008maw_end -L$007maw_finish2: - ; Tail Round 0 - mov eax,DWORD [ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [edi] - adc edx,0 - dec ecx - mov DWORD [edi],eax - mov esi,edx - jz NEAR L$008maw_end - ; Tail Round 1 - mov eax,DWORD [4+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [4+edi] - adc edx,0 - dec ecx - mov DWORD [4+edi],eax - mov esi,edx - jz NEAR L$008maw_end - ; Tail Round 2 - mov eax,DWORD [8+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [8+edi] - adc edx,0 - dec ecx - mov DWORD [8+edi],eax - mov esi,edx - jz NEAR L$008maw_end - ; Tail Round 3 - mov eax,DWORD [12+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [12+edi] - adc edx,0 - dec ecx - mov DWORD [12+edi],eax - mov esi,edx - jz NEAR L$008maw_end - ; Tail Round 4 - mov eax,DWORD [16+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [16+edi] - adc edx,0 - dec ecx - mov DWORD [16+edi],eax - mov esi,edx - jz NEAR L$008maw_end - ; Tail Round 5 - mov eax,DWORD [20+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [20+edi] - adc edx,0 - dec ecx - mov DWORD [20+edi],eax - mov esi,edx - jz NEAR L$008maw_end - ; Tail Round 6 - mov eax,DWORD [24+ebx] - mul ebp - add eax,esi - adc edx,0 - add eax,DWORD [24+edi] - adc edx,0 - mov DWORD [24+edi],eax - mov esi,edx -L$008maw_end: - mov eax,esi - pop ecx - pop edi - pop esi - pop ebx - pop ebp - ret -global _bn_mul_words -align 16 -_bn_mul_words: -L$_bn_mul_words_begin: - lea eax,[_OPENSSL_ia32cap_P] - bt DWORD [eax],26 - jnc NEAR L$009mw_non_sse2 - mov eax,DWORD [4+esp] - mov edx,DWORD [8+esp] - mov ecx,DWORD [12+esp] - movd mm0,DWORD [16+esp] - pxor mm1,mm1 -align 16 -L$010mw_sse2_loop: - movd mm2,DWORD [edx] - pmuludq mm2,mm0 - lea edx,[4+edx] - paddq mm1,mm2 - movd DWORD [eax],mm1 - sub ecx,1 - psrlq mm1,32 - lea eax,[4+eax] - jnz NEAR L$010mw_sse2_loop - movd eax,mm1 - emms - ret -align 16 -L$009mw_non_sse2: - push ebp - push ebx - push esi - push edi - ; - xor esi,esi - mov edi,DWORD [20+esp] - mov ebx,DWORD [24+esp] - mov ebp,DWORD [28+esp] - mov ecx,DWORD [32+esp] - and ebp,4294967288 - jz NEAR L$011mw_finish -L$012mw_loop: - ; Round 0 - mov eax,DWORD [ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [edi],eax - mov esi,edx - ; Round 4 - mov eax,DWORD [4+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [4+edi],eax - mov esi,edx - ; Round 8 - mov eax,DWORD [8+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [8+edi],eax - mov esi,edx - ; Round 12 - mov eax,DWORD [12+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [12+edi],eax - mov esi,edx - ; Round 16 - mov eax,DWORD [16+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [16+edi],eax - mov esi,edx - ; Round 20 - mov eax,DWORD [20+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [20+edi],eax - mov esi,edx - ; Round 24 - mov eax,DWORD [24+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [24+edi],eax - mov esi,edx - ; Round 28 - mov eax,DWORD [28+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [28+edi],eax - mov esi,edx - ; - add ebx,32 - add edi,32 - sub ebp,8 - jz NEAR L$011mw_finish - jmp NEAR L$012mw_loop -L$011mw_finish: - mov ebp,DWORD [28+esp] - and ebp,7 - jnz NEAR L$013mw_finish2 - jmp NEAR L$014mw_end -L$013mw_finish2: - ; Tail Round 0 - mov eax,DWORD [ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [edi],eax - mov esi,edx - dec ebp - jz NEAR L$014mw_end - ; Tail Round 1 - mov eax,DWORD [4+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [4+edi],eax - mov esi,edx - dec ebp - jz NEAR L$014mw_end - ; Tail Round 2 - mov eax,DWORD [8+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [8+edi],eax - mov esi,edx - dec ebp - jz NEAR L$014mw_end - ; Tail Round 3 - mov eax,DWORD [12+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [12+edi],eax - mov esi,edx - dec ebp - jz NEAR L$014mw_end - ; Tail Round 4 - mov eax,DWORD [16+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [16+edi],eax - mov esi,edx - dec ebp - jz NEAR L$014mw_end - ; Tail Round 5 - mov eax,DWORD [20+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [20+edi],eax - mov esi,edx - dec ebp - jz NEAR L$014mw_end - ; Tail Round 6 - mov eax,DWORD [24+ebx] - mul ecx - add eax,esi - adc edx,0 - mov DWORD [24+edi],eax - mov esi,edx -L$014mw_end: - mov eax,esi - pop edi - pop esi - pop ebx - pop ebp - ret -global _bn_sqr_words -align 16 -_bn_sqr_words: -L$_bn_sqr_words_begin: - lea eax,[_OPENSSL_ia32cap_P] - bt DWORD [eax],26 - jnc NEAR L$015sqr_non_sse2 - mov eax,DWORD [4+esp] - mov edx,DWORD [8+esp] - mov ecx,DWORD [12+esp] -align 16 -L$016sqr_sse2_loop: - movd mm0,DWORD [edx] - pmuludq mm0,mm0 - lea edx,[4+edx] - movq [eax],mm0 - sub ecx,1 - lea eax,[8+eax] - jnz NEAR L$016sqr_sse2_loop - emms - ret -align 16 -L$015sqr_non_sse2: - push ebp - push ebx - push esi - push edi - ; - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov ebx,DWORD [28+esp] - and ebx,4294967288 - jz NEAR L$017sw_finish -L$018sw_loop: - ; Round 0 - mov eax,DWORD [edi] - mul eax - mov DWORD [esi],eax - mov DWORD [4+esi],edx - ; Round 4 - mov eax,DWORD [4+edi] - mul eax - mov DWORD [8+esi],eax - mov DWORD [12+esi],edx - ; Round 8 - mov eax,DWORD [8+edi] - mul eax - mov DWORD [16+esi],eax - mov DWORD [20+esi],edx - ; Round 12 - mov eax,DWORD [12+edi] - mul eax - mov DWORD [24+esi],eax - mov DWORD [28+esi],edx - ; Round 16 - mov eax,DWORD [16+edi] - mul eax - mov DWORD [32+esi],eax - mov DWORD [36+esi],edx - ; Round 20 - mov eax,DWORD [20+edi] - mul eax - mov DWORD [40+esi],eax - mov DWORD [44+esi],edx - ; Round 24 - mov eax,DWORD [24+edi] - mul eax - mov DWORD [48+esi],eax - mov DWORD [52+esi],edx - ; Round 28 - mov eax,DWORD [28+edi] - mul eax - mov DWORD [56+esi],eax - mov DWORD [60+esi],edx - ; - add edi,32 - add esi,64 - sub ebx,8 - jnz NEAR L$018sw_loop -L$017sw_finish: - mov ebx,DWORD [28+esp] - and ebx,7 - jz NEAR L$019sw_end - ; Tail Round 0 - mov eax,DWORD [edi] - mul eax - mov DWORD [esi],eax - dec ebx - mov DWORD [4+esi],edx - jz NEAR L$019sw_end - ; Tail Round 1 - mov eax,DWORD [4+edi] - mul eax - mov DWORD [8+esi],eax - dec ebx - mov DWORD [12+esi],edx - jz NEAR L$019sw_end - ; Tail Round 2 - mov eax,DWORD [8+edi] - mul eax - mov DWORD [16+esi],eax - dec ebx - mov DWORD [20+esi],edx - jz NEAR L$019sw_end - ; Tail Round 3 - mov eax,DWORD [12+edi] - mul eax - mov DWORD [24+esi],eax - dec ebx - mov DWORD [28+esi],edx - jz NEAR L$019sw_end - ; Tail Round 4 - mov eax,DWORD [16+edi] - mul eax - mov DWORD [32+esi],eax - dec ebx - mov DWORD [36+esi],edx - jz NEAR L$019sw_end - ; Tail Round 5 - mov eax,DWORD [20+edi] - mul eax - mov DWORD [40+esi],eax - dec ebx - mov DWORD [44+esi],edx - jz NEAR L$019sw_end - ; Tail Round 6 - mov eax,DWORD [24+edi] - mul eax - mov DWORD [48+esi],eax - mov DWORD [52+esi],edx -L$019sw_end: - pop edi - pop esi - pop ebx - pop ebp - ret -global _bn_div_words -align 16 -_bn_div_words: -L$_bn_div_words_begin: - mov edx,DWORD [4+esp] - mov eax,DWORD [8+esp] - mov ecx,DWORD [12+esp] - div ecx - ret -global _bn_add_words -align 16 -_bn_add_words: -L$_bn_add_words_begin: - push ebp - push ebx - push esi - push edi - ; - mov ebx,DWORD [20+esp] - mov esi,DWORD [24+esp] - mov edi,DWORD [28+esp] - mov ebp,DWORD [32+esp] - xor eax,eax - and ebp,4294967288 - jz NEAR L$020aw_finish -L$021aw_loop: - ; Round 0 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - ; Round 1 - mov ecx,DWORD [4+esi] - mov edx,DWORD [4+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [4+ebx],ecx - ; Round 2 - mov ecx,DWORD [8+esi] - mov edx,DWORD [8+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [8+ebx],ecx - ; Round 3 - mov ecx,DWORD [12+esi] - mov edx,DWORD [12+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [12+ebx],ecx - ; Round 4 - mov ecx,DWORD [16+esi] - mov edx,DWORD [16+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [16+ebx],ecx - ; Round 5 - mov ecx,DWORD [20+esi] - mov edx,DWORD [20+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [20+ebx],ecx - ; Round 6 - mov ecx,DWORD [24+esi] - mov edx,DWORD [24+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx - ; Round 7 - mov ecx,DWORD [28+esi] - mov edx,DWORD [28+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [28+ebx],ecx - ; - add esi,32 - add edi,32 - add ebx,32 - sub ebp,8 - jnz NEAR L$021aw_loop -L$020aw_finish: - mov ebp,DWORD [32+esp] - and ebp,7 - jz NEAR L$022aw_end - ; Tail Round 0 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - dec ebp - mov DWORD [ebx],ecx - jz NEAR L$022aw_end - ; Tail Round 1 - mov ecx,DWORD [4+esi] - mov edx,DWORD [4+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - dec ebp - mov DWORD [4+ebx],ecx - jz NEAR L$022aw_end - ; Tail Round 2 - mov ecx,DWORD [8+esi] - mov edx,DWORD [8+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - dec ebp - mov DWORD [8+ebx],ecx - jz NEAR L$022aw_end - ; Tail Round 3 - mov ecx,DWORD [12+esi] - mov edx,DWORD [12+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - dec ebp - mov DWORD [12+ebx],ecx - jz NEAR L$022aw_end - ; Tail Round 4 - mov ecx,DWORD [16+esi] - mov edx,DWORD [16+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - dec ebp - mov DWORD [16+ebx],ecx - jz NEAR L$022aw_end - ; Tail Round 5 - mov ecx,DWORD [20+esi] - mov edx,DWORD [20+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - dec ebp - mov DWORD [20+ebx],ecx - jz NEAR L$022aw_end - ; Tail Round 6 - mov ecx,DWORD [24+esi] - mov edx,DWORD [24+edi] - add ecx,eax - mov eax,0 - adc eax,eax - add ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx -L$022aw_end: - pop edi - pop esi - pop ebx - pop ebp - ret -global _bn_sub_words -align 16 -_bn_sub_words: -L$_bn_sub_words_begin: - push ebp - push ebx - push esi - push edi - ; - mov ebx,DWORD [20+esp] - mov esi,DWORD [24+esp] - mov edi,DWORD [28+esp] - mov ebp,DWORD [32+esp] - xor eax,eax - and ebp,4294967288 - jz NEAR L$023aw_finish -L$024aw_loop: - ; Round 0 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - ; Round 1 - mov ecx,DWORD [4+esi] - mov edx,DWORD [4+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [4+ebx],ecx - ; Round 2 - mov ecx,DWORD [8+esi] - mov edx,DWORD [8+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [8+ebx],ecx - ; Round 3 - mov ecx,DWORD [12+esi] - mov edx,DWORD [12+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [12+ebx],ecx - ; Round 4 - mov ecx,DWORD [16+esi] - mov edx,DWORD [16+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [16+ebx],ecx - ; Round 5 - mov ecx,DWORD [20+esi] - mov edx,DWORD [20+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [20+ebx],ecx - ; Round 6 - mov ecx,DWORD [24+esi] - mov edx,DWORD [24+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx - ; Round 7 - mov ecx,DWORD [28+esi] - mov edx,DWORD [28+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [28+ebx],ecx - ; - add esi,32 - add edi,32 - add ebx,32 - sub ebp,8 - jnz NEAR L$024aw_loop -L$023aw_finish: - mov ebp,DWORD [32+esp] - and ebp,7 - jz NEAR L$025aw_end - ; Tail Round 0 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [ebx],ecx - jz NEAR L$025aw_end - ; Tail Round 1 - mov ecx,DWORD [4+esi] - mov edx,DWORD [4+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [4+ebx],ecx - jz NEAR L$025aw_end - ; Tail Round 2 - mov ecx,DWORD [8+esi] - mov edx,DWORD [8+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [8+ebx],ecx - jz NEAR L$025aw_end - ; Tail Round 3 - mov ecx,DWORD [12+esi] - mov edx,DWORD [12+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [12+ebx],ecx - jz NEAR L$025aw_end - ; Tail Round 4 - mov ecx,DWORD [16+esi] - mov edx,DWORD [16+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [16+ebx],ecx - jz NEAR L$025aw_end - ; Tail Round 5 - mov ecx,DWORD [20+esi] - mov edx,DWORD [20+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [20+ebx],ecx - jz NEAR L$025aw_end - ; Tail Round 6 - mov ecx,DWORD [24+esi] - mov edx,DWORD [24+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx -L$025aw_end: - pop edi - pop esi - pop ebx - pop ebp - ret -global _bn_sub_part_words -align 16 -_bn_sub_part_words: -L$_bn_sub_part_words_begin: - push ebp - push ebx - push esi - push edi - ; - mov ebx,DWORD [20+esp] - mov esi,DWORD [24+esp] - mov edi,DWORD [28+esp] - mov ebp,DWORD [32+esp] - xor eax,eax - and ebp,4294967288 - jz NEAR L$026aw_finish -L$027aw_loop: - ; Round 0 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - ; Round 1 - mov ecx,DWORD [4+esi] - mov edx,DWORD [4+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [4+ebx],ecx - ; Round 2 - mov ecx,DWORD [8+esi] - mov edx,DWORD [8+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [8+ebx],ecx - ; Round 3 - mov ecx,DWORD [12+esi] - mov edx,DWORD [12+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [12+ebx],ecx - ; Round 4 - mov ecx,DWORD [16+esi] - mov edx,DWORD [16+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [16+ebx],ecx - ; Round 5 - mov ecx,DWORD [20+esi] - mov edx,DWORD [20+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [20+ebx],ecx - ; Round 6 - mov ecx,DWORD [24+esi] - mov edx,DWORD [24+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx - ; Round 7 - mov ecx,DWORD [28+esi] - mov edx,DWORD [28+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [28+ebx],ecx - ; - add esi,32 - add edi,32 - add ebx,32 - sub ebp,8 - jnz NEAR L$027aw_loop -L$026aw_finish: - mov ebp,DWORD [32+esp] - and ebp,7 - jz NEAR L$028aw_end - ; Tail Round 0 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 - dec ebp - jz NEAR L$028aw_end - ; Tail Round 1 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 - dec ebp - jz NEAR L$028aw_end - ; Tail Round 2 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 - dec ebp - jz NEAR L$028aw_end - ; Tail Round 3 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 - dec ebp - jz NEAR L$028aw_end - ; Tail Round 4 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 - dec ebp - jz NEAR L$028aw_end - ; Tail Round 5 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 - dec ebp - jz NEAR L$028aw_end - ; Tail Round 6 - mov ecx,DWORD [esi] - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - add esi,4 - add edi,4 - add ebx,4 -L$028aw_end: - cmp DWORD [36+esp],0 - je NEAR L$029pw_end - mov ebp,DWORD [36+esp] - cmp ebp,0 - je NEAR L$029pw_end - jge NEAR L$030pw_pos - ; pw_neg - mov edx,0 - sub edx,ebp - mov ebp,edx - and ebp,4294967288 - jz NEAR L$031pw_neg_finish -L$032pw_neg_loop: - ; dl<0 Round 0 - mov ecx,0 - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [ebx],ecx - ; dl<0 Round 1 - mov ecx,0 - mov edx,DWORD [4+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [4+ebx],ecx - ; dl<0 Round 2 - mov ecx,0 - mov edx,DWORD [8+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [8+ebx],ecx - ; dl<0 Round 3 - mov ecx,0 - mov edx,DWORD [12+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [12+ebx],ecx - ; dl<0 Round 4 - mov ecx,0 - mov edx,DWORD [16+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [16+ebx],ecx - ; dl<0 Round 5 - mov ecx,0 - mov edx,DWORD [20+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [20+ebx],ecx - ; dl<0 Round 6 - mov ecx,0 - mov edx,DWORD [24+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx - ; dl<0 Round 7 - mov ecx,0 - mov edx,DWORD [28+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [28+ebx],ecx - ; - add edi,32 - add ebx,32 - sub ebp,8 - jnz NEAR L$032pw_neg_loop -L$031pw_neg_finish: - mov edx,DWORD [36+esp] - mov ebp,0 - sub ebp,edx - and ebp,7 - jz NEAR L$029pw_end - ; dl<0 Tail Round 0 - mov ecx,0 - mov edx,DWORD [edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [ebx],ecx - jz NEAR L$029pw_end - ; dl<0 Tail Round 1 - mov ecx,0 - mov edx,DWORD [4+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [4+ebx],ecx - jz NEAR L$029pw_end - ; dl<0 Tail Round 2 - mov ecx,0 - mov edx,DWORD [8+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [8+ebx],ecx - jz NEAR L$029pw_end - ; dl<0 Tail Round 3 - mov ecx,0 - mov edx,DWORD [12+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [12+ebx],ecx - jz NEAR L$029pw_end - ; dl<0 Tail Round 4 - mov ecx,0 - mov edx,DWORD [16+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [16+ebx],ecx - jz NEAR L$029pw_end - ; dl<0 Tail Round 5 - mov ecx,0 - mov edx,DWORD [20+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - dec ebp - mov DWORD [20+ebx],ecx - jz NEAR L$029pw_end - ; dl<0 Tail Round 6 - mov ecx,0 - mov edx,DWORD [24+edi] - sub ecx,eax - mov eax,0 - adc eax,eax - sub ecx,edx - adc eax,0 - mov DWORD [24+ebx],ecx - jmp NEAR L$029pw_end -L$030pw_pos: - and ebp,4294967288 - jz NEAR L$033pw_pos_finish -L$034pw_pos_loop: - ; dl>0 Round 0 - mov ecx,DWORD [esi] - sub ecx,eax - mov DWORD [ebx],ecx - jnc NEAR L$035pw_nc0 - ; dl>0 Round 1 - mov ecx,DWORD [4+esi] - sub ecx,eax - mov DWORD [4+ebx],ecx - jnc NEAR L$036pw_nc1 - ; dl>0 Round 2 - mov ecx,DWORD [8+esi] - sub ecx,eax - mov DWORD [8+ebx],ecx - jnc NEAR L$037pw_nc2 - ; dl>0 Round 3 - mov ecx,DWORD [12+esi] - sub ecx,eax - mov DWORD [12+ebx],ecx - jnc NEAR L$038pw_nc3 - ; dl>0 Round 4 - mov ecx,DWORD [16+esi] - sub ecx,eax - mov DWORD [16+ebx],ecx - jnc NEAR L$039pw_nc4 - ; dl>0 Round 5 - mov ecx,DWORD [20+esi] - sub ecx,eax - mov DWORD [20+ebx],ecx - jnc NEAR L$040pw_nc5 - ; dl>0 Round 6 - mov ecx,DWORD [24+esi] - sub ecx,eax - mov DWORD [24+ebx],ecx - jnc NEAR L$041pw_nc6 - ; dl>0 Round 7 - mov ecx,DWORD [28+esi] - sub ecx,eax - mov DWORD [28+ebx],ecx - jnc NEAR L$042pw_nc7 - ; - add esi,32 - add ebx,32 - sub ebp,8 - jnz NEAR L$034pw_pos_loop -L$033pw_pos_finish: - mov ebp,DWORD [36+esp] - and ebp,7 - jz NEAR L$029pw_end - ; dl>0 Tail Round 0 - mov ecx,DWORD [esi] - sub ecx,eax - mov DWORD [ebx],ecx - jnc NEAR L$043pw_tail_nc0 - dec ebp - jz NEAR L$029pw_end - ; dl>0 Tail Round 1 - mov ecx,DWORD [4+esi] - sub ecx,eax - mov DWORD [4+ebx],ecx - jnc NEAR L$044pw_tail_nc1 - dec ebp - jz NEAR L$029pw_end - ; dl>0 Tail Round 2 - mov ecx,DWORD [8+esi] - sub ecx,eax - mov DWORD [8+ebx],ecx - jnc NEAR L$045pw_tail_nc2 - dec ebp - jz NEAR L$029pw_end - ; dl>0 Tail Round 3 - mov ecx,DWORD [12+esi] - sub ecx,eax - mov DWORD [12+ebx],ecx - jnc NEAR L$046pw_tail_nc3 - dec ebp - jz NEAR L$029pw_end - ; dl>0 Tail Round 4 - mov ecx,DWORD [16+esi] - sub ecx,eax - mov DWORD [16+ebx],ecx - jnc NEAR L$047pw_tail_nc4 - dec ebp - jz NEAR L$029pw_end - ; dl>0 Tail Round 5 - mov ecx,DWORD [20+esi] - sub ecx,eax - mov DWORD [20+ebx],ecx - jnc NEAR L$048pw_tail_nc5 - dec ebp - jz NEAR L$029pw_end - ; dl>0 Tail Round 6 - mov ecx,DWORD [24+esi] - sub ecx,eax - mov DWORD [24+ebx],ecx - jnc NEAR L$049pw_tail_nc6 - mov eax,1 - jmp NEAR L$029pw_end -L$050pw_nc_loop: - mov ecx,DWORD [esi] - mov DWORD [ebx],ecx -L$035pw_nc0: - mov ecx,DWORD [4+esi] - mov DWORD [4+ebx],ecx -L$036pw_nc1: - mov ecx,DWORD [8+esi] - mov DWORD [8+ebx],ecx -L$037pw_nc2: - mov ecx,DWORD [12+esi] - mov DWORD [12+ebx],ecx -L$038pw_nc3: - mov ecx,DWORD [16+esi] - mov DWORD [16+ebx],ecx -L$039pw_nc4: - mov ecx,DWORD [20+esi] - mov DWORD [20+ebx],ecx -L$040pw_nc5: - mov ecx,DWORD [24+esi] - mov DWORD [24+ebx],ecx -L$041pw_nc6: - mov ecx,DWORD [28+esi] - mov DWORD [28+ebx],ecx -L$042pw_nc7: - ; - add esi,32 - add ebx,32 - sub ebp,8 - jnz NEAR L$050pw_nc_loop - mov ebp,DWORD [36+esp] - and ebp,7 - jz NEAR L$051pw_nc_end - mov ecx,DWORD [esi] - mov DWORD [ebx],ecx -L$043pw_tail_nc0: - dec ebp - jz NEAR L$051pw_nc_end - mov ecx,DWORD [4+esi] - mov DWORD [4+ebx],ecx -L$044pw_tail_nc1: - dec ebp - jz NEAR L$051pw_nc_end - mov ecx,DWORD [8+esi] - mov DWORD [8+ebx],ecx -L$045pw_tail_nc2: - dec ebp - jz NEAR L$051pw_nc_end - mov ecx,DWORD [12+esi] - mov DWORD [12+ebx],ecx -L$046pw_tail_nc3: - dec ebp - jz NEAR L$051pw_nc_end - mov ecx,DWORD [16+esi] - mov DWORD [16+ebx],ecx -L$047pw_tail_nc4: - dec ebp - jz NEAR L$051pw_nc_end - mov ecx,DWORD [20+esi] - mov DWORD [20+ebx],ecx -L$048pw_tail_nc5: - dec ebp - jz NEAR L$051pw_nc_end - mov ecx,DWORD [24+esi] - mov DWORD [24+ebx],ecx -L$049pw_tail_nc6: -L$051pw_nc_end: - mov eax,0 -L$029pw_end: - pop edi - pop esi - pop ebx - pop ebp - ret -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/co-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/co-586.asm deleted file mode 100644 index b6784bf928..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/co-586.asm +++ /dev/null @@ -1,1266 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -global _bn_mul_comba8 -align 16 -_bn_mul_comba8: -L$_bn_mul_comba8_begin: - push esi - mov esi,DWORD [12+esp] - push edi - mov edi,DWORD [20+esp] - push ebp - push ebx - xor ebx,ebx - mov eax,DWORD [esi] - xor ecx,ecx - mov edx,DWORD [edi] - ; ################## Calculate word 0 - xor ebp,ebp - ; mul a[0]*b[0] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [edi] - adc ebp,0 - mov DWORD [eax],ebx - mov eax,DWORD [4+esi] - ; saved r[0] - ; ################## Calculate word 1 - xor ebx,ebx - ; mul a[1]*b[0] - mul edx - add ecx,eax - mov eax,DWORD [esi] - adc ebp,edx - mov edx,DWORD [4+edi] - adc ebx,0 - ; mul a[0]*b[1] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [edi] - adc ebx,0 - mov DWORD [4+eax],ecx - mov eax,DWORD [8+esi] - ; saved r[1] - ; ################## Calculate word 2 - xor ecx,ecx - ; mul a[2]*b[0] - mul edx - add ebp,eax - mov eax,DWORD [4+esi] - adc ebx,edx - mov edx,DWORD [4+edi] - adc ecx,0 - ; mul a[1]*b[1] - mul edx - add ebp,eax - mov eax,DWORD [esi] - adc ebx,edx - mov edx,DWORD [8+edi] - adc ecx,0 - ; mul a[0]*b[2] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - mov edx,DWORD [edi] - adc ecx,0 - mov DWORD [8+eax],ebp - mov eax,DWORD [12+esi] - ; saved r[2] - ; ################## Calculate word 3 - xor ebp,ebp - ; mul a[3]*b[0] - mul edx - add ebx,eax - mov eax,DWORD [8+esi] - adc ecx,edx - mov edx,DWORD [4+edi] - adc ebp,0 - ; mul a[2]*b[1] - mul edx - add ebx,eax - mov eax,DWORD [4+esi] - adc ecx,edx - mov edx,DWORD [8+edi] - adc ebp,0 - ; mul a[1]*b[2] - mul edx - add ebx,eax - mov eax,DWORD [esi] - adc ecx,edx - mov edx,DWORD [12+edi] - adc ebp,0 - ; mul a[0]*b[3] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [edi] - adc ebp,0 - mov DWORD [12+eax],ebx - mov eax,DWORD [16+esi] - ; saved r[3] - ; ################## Calculate word 4 - xor ebx,ebx - ; mul a[4]*b[0] - mul edx - add ecx,eax - mov eax,DWORD [12+esi] - adc ebp,edx - mov edx,DWORD [4+edi] - adc ebx,0 - ; mul a[3]*b[1] - mul edx - add ecx,eax - mov eax,DWORD [8+esi] - adc ebp,edx - mov edx,DWORD [8+edi] - adc ebx,0 - ; mul a[2]*b[2] - mul edx - add ecx,eax - mov eax,DWORD [4+esi] - adc ebp,edx - mov edx,DWORD [12+edi] - adc ebx,0 - ; mul a[1]*b[3] - mul edx - add ecx,eax - mov eax,DWORD [esi] - adc ebp,edx - mov edx,DWORD [16+edi] - adc ebx,0 - ; mul a[0]*b[4] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [edi] - adc ebx,0 - mov DWORD [16+eax],ecx - mov eax,DWORD [20+esi] - ; saved r[4] - ; ################## Calculate word 5 - xor ecx,ecx - ; mul a[5]*b[0] - mul edx - add ebp,eax - mov eax,DWORD [16+esi] - adc ebx,edx - mov edx,DWORD [4+edi] - adc ecx,0 - ; mul a[4]*b[1] - mul edx - add ebp,eax - mov eax,DWORD [12+esi] - adc ebx,edx - mov edx,DWORD [8+edi] - adc ecx,0 - ; mul a[3]*b[2] - mul edx - add ebp,eax - mov eax,DWORD [8+esi] - adc ebx,edx - mov edx,DWORD [12+edi] - adc ecx,0 - ; mul a[2]*b[3] - mul edx - add ebp,eax - mov eax,DWORD [4+esi] - adc ebx,edx - mov edx,DWORD [16+edi] - adc ecx,0 - ; mul a[1]*b[4] - mul edx - add ebp,eax - mov eax,DWORD [esi] - adc ebx,edx - mov edx,DWORD [20+edi] - adc ecx,0 - ; mul a[0]*b[5] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - mov edx,DWORD [edi] - adc ecx,0 - mov DWORD [20+eax],ebp - mov eax,DWORD [24+esi] - ; saved r[5] - ; ################## Calculate word 6 - xor ebp,ebp - ; mul a[6]*b[0] - mul edx - add ebx,eax - mov eax,DWORD [20+esi] - adc ecx,edx - mov edx,DWORD [4+edi] - adc ebp,0 - ; mul a[5]*b[1] - mul edx - add ebx,eax - mov eax,DWORD [16+esi] - adc ecx,edx - mov edx,DWORD [8+edi] - adc ebp,0 - ; mul a[4]*b[2] - mul edx - add ebx,eax - mov eax,DWORD [12+esi] - adc ecx,edx - mov edx,DWORD [12+edi] - adc ebp,0 - ; mul a[3]*b[3] - mul edx - add ebx,eax - mov eax,DWORD [8+esi] - adc ecx,edx - mov edx,DWORD [16+edi] - adc ebp,0 - ; mul a[2]*b[4] - mul edx - add ebx,eax - mov eax,DWORD [4+esi] - adc ecx,edx - mov edx,DWORD [20+edi] - adc ebp,0 - ; mul a[1]*b[5] - mul edx - add ebx,eax - mov eax,DWORD [esi] - adc ecx,edx - mov edx,DWORD [24+edi] - adc ebp,0 - ; mul a[0]*b[6] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [edi] - adc ebp,0 - mov DWORD [24+eax],ebx - mov eax,DWORD [28+esi] - ; saved r[6] - ; ################## Calculate word 7 - xor ebx,ebx - ; mul a[7]*b[0] - mul edx - add ecx,eax - mov eax,DWORD [24+esi] - adc ebp,edx - mov edx,DWORD [4+edi] - adc ebx,0 - ; mul a[6]*b[1] - mul edx - add ecx,eax - mov eax,DWORD [20+esi] - adc ebp,edx - mov edx,DWORD [8+edi] - adc ebx,0 - ; mul a[5]*b[2] - mul edx - add ecx,eax - mov eax,DWORD [16+esi] - adc ebp,edx - mov edx,DWORD [12+edi] - adc ebx,0 - ; mul a[4]*b[3] - mul edx - add ecx,eax - mov eax,DWORD [12+esi] - adc ebp,edx - mov edx,DWORD [16+edi] - adc ebx,0 - ; mul a[3]*b[4] - mul edx - add ecx,eax - mov eax,DWORD [8+esi] - adc ebp,edx - mov edx,DWORD [20+edi] - adc ebx,0 - ; mul a[2]*b[5] - mul edx - add ecx,eax - mov eax,DWORD [4+esi] - adc ebp,edx - mov edx,DWORD [24+edi] - adc ebx,0 - ; mul a[1]*b[6] - mul edx - add ecx,eax - mov eax,DWORD [esi] - adc ebp,edx - mov edx,DWORD [28+edi] - adc ebx,0 - ; mul a[0]*b[7] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [4+edi] - adc ebx,0 - mov DWORD [28+eax],ecx - mov eax,DWORD [28+esi] - ; saved r[7] - ; ################## Calculate word 8 - xor ecx,ecx - ; mul a[7]*b[1] - mul edx - add ebp,eax - mov eax,DWORD [24+esi] - adc ebx,edx - mov edx,DWORD [8+edi] - adc ecx,0 - ; mul a[6]*b[2] - mul edx - add ebp,eax - mov eax,DWORD [20+esi] - adc ebx,edx - mov edx,DWORD [12+edi] - adc ecx,0 - ; mul a[5]*b[3] - mul edx - add ebp,eax - mov eax,DWORD [16+esi] - adc ebx,edx - mov edx,DWORD [16+edi] - adc ecx,0 - ; mul a[4]*b[4] - mul edx - add ebp,eax - mov eax,DWORD [12+esi] - adc ebx,edx - mov edx,DWORD [20+edi] - adc ecx,0 - ; mul a[3]*b[5] - mul edx - add ebp,eax - mov eax,DWORD [8+esi] - adc ebx,edx - mov edx,DWORD [24+edi] - adc ecx,0 - ; mul a[2]*b[6] - mul edx - add ebp,eax - mov eax,DWORD [4+esi] - adc ebx,edx - mov edx,DWORD [28+edi] - adc ecx,0 - ; mul a[1]*b[7] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - mov edx,DWORD [8+edi] - adc ecx,0 - mov DWORD [32+eax],ebp - mov eax,DWORD [28+esi] - ; saved r[8] - ; ################## Calculate word 9 - xor ebp,ebp - ; mul a[7]*b[2] - mul edx - add ebx,eax - mov eax,DWORD [24+esi] - adc ecx,edx - mov edx,DWORD [12+edi] - adc ebp,0 - ; mul a[6]*b[3] - mul edx - add ebx,eax - mov eax,DWORD [20+esi] - adc ecx,edx - mov edx,DWORD [16+edi] - adc ebp,0 - ; mul a[5]*b[4] - mul edx - add ebx,eax - mov eax,DWORD [16+esi] - adc ecx,edx - mov edx,DWORD [20+edi] - adc ebp,0 - ; mul a[4]*b[5] - mul edx - add ebx,eax - mov eax,DWORD [12+esi] - adc ecx,edx - mov edx,DWORD [24+edi] - adc ebp,0 - ; mul a[3]*b[6] - mul edx - add ebx,eax - mov eax,DWORD [8+esi] - adc ecx,edx - mov edx,DWORD [28+edi] - adc ebp,0 - ; mul a[2]*b[7] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [12+edi] - adc ebp,0 - mov DWORD [36+eax],ebx - mov eax,DWORD [28+esi] - ; saved r[9] - ; ################## Calculate word 10 - xor ebx,ebx - ; mul a[7]*b[3] - mul edx - add ecx,eax - mov eax,DWORD [24+esi] - adc ebp,edx - mov edx,DWORD [16+edi] - adc ebx,0 - ; mul a[6]*b[4] - mul edx - add ecx,eax - mov eax,DWORD [20+esi] - adc ebp,edx - mov edx,DWORD [20+edi] - adc ebx,0 - ; mul a[5]*b[5] - mul edx - add ecx,eax - mov eax,DWORD [16+esi] - adc ebp,edx - mov edx,DWORD [24+edi] - adc ebx,0 - ; mul a[4]*b[6] - mul edx - add ecx,eax - mov eax,DWORD [12+esi] - adc ebp,edx - mov edx,DWORD [28+edi] - adc ebx,0 - ; mul a[3]*b[7] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [16+edi] - adc ebx,0 - mov DWORD [40+eax],ecx - mov eax,DWORD [28+esi] - ; saved r[10] - ; ################## Calculate word 11 - xor ecx,ecx - ; mul a[7]*b[4] - mul edx - add ebp,eax - mov eax,DWORD [24+esi] - adc ebx,edx - mov edx,DWORD [20+edi] - adc ecx,0 - ; mul a[6]*b[5] - mul edx - add ebp,eax - mov eax,DWORD [20+esi] - adc ebx,edx - mov edx,DWORD [24+edi] - adc ecx,0 - ; mul a[5]*b[6] - mul edx - add ebp,eax - mov eax,DWORD [16+esi] - adc ebx,edx - mov edx,DWORD [28+edi] - adc ecx,0 - ; mul a[4]*b[7] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - mov edx,DWORD [20+edi] - adc ecx,0 - mov DWORD [44+eax],ebp - mov eax,DWORD [28+esi] - ; saved r[11] - ; ################## Calculate word 12 - xor ebp,ebp - ; mul a[7]*b[5] - mul edx - add ebx,eax - mov eax,DWORD [24+esi] - adc ecx,edx - mov edx,DWORD [24+edi] - adc ebp,0 - ; mul a[6]*b[6] - mul edx - add ebx,eax - mov eax,DWORD [20+esi] - adc ecx,edx - mov edx,DWORD [28+edi] - adc ebp,0 - ; mul a[5]*b[7] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [24+edi] - adc ebp,0 - mov DWORD [48+eax],ebx - mov eax,DWORD [28+esi] - ; saved r[12] - ; ################## Calculate word 13 - xor ebx,ebx - ; mul a[7]*b[6] - mul edx - add ecx,eax - mov eax,DWORD [24+esi] - adc ebp,edx - mov edx,DWORD [28+edi] - adc ebx,0 - ; mul a[6]*b[7] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [28+edi] - adc ebx,0 - mov DWORD [52+eax],ecx - mov eax,DWORD [28+esi] - ; saved r[13] - ; ################## Calculate word 14 - xor ecx,ecx - ; mul a[7]*b[7] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - adc ecx,0 - mov DWORD [56+eax],ebp - ; saved r[14] - ; save r[15] - mov DWORD [60+eax],ebx - pop ebx - pop ebp - pop edi - pop esi - ret -global _bn_mul_comba4 -align 16 -_bn_mul_comba4: -L$_bn_mul_comba4_begin: - push esi - mov esi,DWORD [12+esp] - push edi - mov edi,DWORD [20+esp] - push ebp - push ebx - xor ebx,ebx - mov eax,DWORD [esi] - xor ecx,ecx - mov edx,DWORD [edi] - ; ################## Calculate word 0 - xor ebp,ebp - ; mul a[0]*b[0] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [edi] - adc ebp,0 - mov DWORD [eax],ebx - mov eax,DWORD [4+esi] - ; saved r[0] - ; ################## Calculate word 1 - xor ebx,ebx - ; mul a[1]*b[0] - mul edx - add ecx,eax - mov eax,DWORD [esi] - adc ebp,edx - mov edx,DWORD [4+edi] - adc ebx,0 - ; mul a[0]*b[1] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [edi] - adc ebx,0 - mov DWORD [4+eax],ecx - mov eax,DWORD [8+esi] - ; saved r[1] - ; ################## Calculate word 2 - xor ecx,ecx - ; mul a[2]*b[0] - mul edx - add ebp,eax - mov eax,DWORD [4+esi] - adc ebx,edx - mov edx,DWORD [4+edi] - adc ecx,0 - ; mul a[1]*b[1] - mul edx - add ebp,eax - mov eax,DWORD [esi] - adc ebx,edx - mov edx,DWORD [8+edi] - adc ecx,0 - ; mul a[0]*b[2] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - mov edx,DWORD [edi] - adc ecx,0 - mov DWORD [8+eax],ebp - mov eax,DWORD [12+esi] - ; saved r[2] - ; ################## Calculate word 3 - xor ebp,ebp - ; mul a[3]*b[0] - mul edx - add ebx,eax - mov eax,DWORD [8+esi] - adc ecx,edx - mov edx,DWORD [4+edi] - adc ebp,0 - ; mul a[2]*b[1] - mul edx - add ebx,eax - mov eax,DWORD [4+esi] - adc ecx,edx - mov edx,DWORD [8+edi] - adc ebp,0 - ; mul a[1]*b[2] - mul edx - add ebx,eax - mov eax,DWORD [esi] - adc ecx,edx - mov edx,DWORD [12+edi] - adc ebp,0 - ; mul a[0]*b[3] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - mov edx,DWORD [4+edi] - adc ebp,0 - mov DWORD [12+eax],ebx - mov eax,DWORD [12+esi] - ; saved r[3] - ; ################## Calculate word 4 - xor ebx,ebx - ; mul a[3]*b[1] - mul edx - add ecx,eax - mov eax,DWORD [8+esi] - adc ebp,edx - mov edx,DWORD [8+edi] - adc ebx,0 - ; mul a[2]*b[2] - mul edx - add ecx,eax - mov eax,DWORD [4+esi] - adc ebp,edx - mov edx,DWORD [12+edi] - adc ebx,0 - ; mul a[1]*b[3] - mul edx - add ecx,eax - mov eax,DWORD [20+esp] - adc ebp,edx - mov edx,DWORD [8+edi] - adc ebx,0 - mov DWORD [16+eax],ecx - mov eax,DWORD [12+esi] - ; saved r[4] - ; ################## Calculate word 5 - xor ecx,ecx - ; mul a[3]*b[2] - mul edx - add ebp,eax - mov eax,DWORD [8+esi] - adc ebx,edx - mov edx,DWORD [12+edi] - adc ecx,0 - ; mul a[2]*b[3] - mul edx - add ebp,eax - mov eax,DWORD [20+esp] - adc ebx,edx - mov edx,DWORD [12+edi] - adc ecx,0 - mov DWORD [20+eax],ebp - mov eax,DWORD [12+esi] - ; saved r[5] - ; ################## Calculate word 6 - xor ebp,ebp - ; mul a[3]*b[3] - mul edx - add ebx,eax - mov eax,DWORD [20+esp] - adc ecx,edx - adc ebp,0 - mov DWORD [24+eax],ebx - ; saved r[6] - ; save r[7] - mov DWORD [28+eax],ecx - pop ebx - pop ebp - pop edi - pop esi - ret -global _bn_sqr_comba8 -align 16 -_bn_sqr_comba8: -L$_bn_sqr_comba8_begin: - push esi - push edi - push ebp - push ebx - mov edi,DWORD [20+esp] - mov esi,DWORD [24+esp] - xor ebx,ebx - xor ecx,ecx - mov eax,DWORD [esi] - ; ############### Calculate word 0 - xor ebp,ebp - ; sqr a[0]*a[0] - mul eax - add ebx,eax - adc ecx,edx - mov edx,DWORD [esi] - adc ebp,0 - mov DWORD [edi],ebx - mov eax,DWORD [4+esi] - ; saved r[0] - ; ############### Calculate word 1 - xor ebx,ebx - ; sqr a[1]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [8+esi] - adc ebx,0 - mov DWORD [4+edi],ecx - mov edx,DWORD [esi] - ; saved r[1] - ; ############### Calculate word 2 - xor ecx,ecx - ; sqr a[2]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [4+esi] - adc ecx,0 - ; sqr a[1]*a[1] - mul eax - add ebp,eax - adc ebx,edx - mov edx,DWORD [esi] - adc ecx,0 - mov DWORD [8+edi],ebp - mov eax,DWORD [12+esi] - ; saved r[2] - ; ############### Calculate word 3 - xor ebp,ebp - ; sqr a[3]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [8+esi] - adc ebp,0 - mov edx,DWORD [4+esi] - ; sqr a[2]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [16+esi] - adc ebp,0 - mov DWORD [12+edi],ebx - mov edx,DWORD [esi] - ; saved r[3] - ; ############### Calculate word 4 - xor ebx,ebx - ; sqr a[4]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [12+esi] - adc ebx,0 - mov edx,DWORD [4+esi] - ; sqr a[3]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [8+esi] - adc ebx,0 - ; sqr a[2]*a[2] - mul eax - add ecx,eax - adc ebp,edx - mov edx,DWORD [esi] - adc ebx,0 - mov DWORD [16+edi],ecx - mov eax,DWORD [20+esi] - ; saved r[4] - ; ############### Calculate word 5 - xor ecx,ecx - ; sqr a[5]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [16+esi] - adc ecx,0 - mov edx,DWORD [4+esi] - ; sqr a[4]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [12+esi] - adc ecx,0 - mov edx,DWORD [8+esi] - ; sqr a[3]*a[2] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [24+esi] - adc ecx,0 - mov DWORD [20+edi],ebp - mov edx,DWORD [esi] - ; saved r[5] - ; ############### Calculate word 6 - xor ebp,ebp - ; sqr a[6]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [20+esi] - adc ebp,0 - mov edx,DWORD [4+esi] - ; sqr a[5]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [16+esi] - adc ebp,0 - mov edx,DWORD [8+esi] - ; sqr a[4]*a[2] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [12+esi] - adc ebp,0 - ; sqr a[3]*a[3] - mul eax - add ebx,eax - adc ecx,edx - mov edx,DWORD [esi] - adc ebp,0 - mov DWORD [24+edi],ebx - mov eax,DWORD [28+esi] - ; saved r[6] - ; ############### Calculate word 7 - xor ebx,ebx - ; sqr a[7]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [24+esi] - adc ebx,0 - mov edx,DWORD [4+esi] - ; sqr a[6]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [20+esi] - adc ebx,0 - mov edx,DWORD [8+esi] - ; sqr a[5]*a[2] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [16+esi] - adc ebx,0 - mov edx,DWORD [12+esi] - ; sqr a[4]*a[3] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [28+esi] - adc ebx,0 - mov DWORD [28+edi],ecx - mov edx,DWORD [4+esi] - ; saved r[7] - ; ############### Calculate word 8 - xor ecx,ecx - ; sqr a[7]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [24+esi] - adc ecx,0 - mov edx,DWORD [8+esi] - ; sqr a[6]*a[2] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [20+esi] - adc ecx,0 - mov edx,DWORD [12+esi] - ; sqr a[5]*a[3] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [16+esi] - adc ecx,0 - ; sqr a[4]*a[4] - mul eax - add ebp,eax - adc ebx,edx - mov edx,DWORD [8+esi] - adc ecx,0 - mov DWORD [32+edi],ebp - mov eax,DWORD [28+esi] - ; saved r[8] - ; ############### Calculate word 9 - xor ebp,ebp - ; sqr a[7]*a[2] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [24+esi] - adc ebp,0 - mov edx,DWORD [12+esi] - ; sqr a[6]*a[3] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [20+esi] - adc ebp,0 - mov edx,DWORD [16+esi] - ; sqr a[5]*a[4] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [28+esi] - adc ebp,0 - mov DWORD [36+edi],ebx - mov edx,DWORD [12+esi] - ; saved r[9] - ; ############### Calculate word 10 - xor ebx,ebx - ; sqr a[7]*a[3] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [24+esi] - adc ebx,0 - mov edx,DWORD [16+esi] - ; sqr a[6]*a[4] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [20+esi] - adc ebx,0 - ; sqr a[5]*a[5] - mul eax - add ecx,eax - adc ebp,edx - mov edx,DWORD [16+esi] - adc ebx,0 - mov DWORD [40+edi],ecx - mov eax,DWORD [28+esi] - ; saved r[10] - ; ############### Calculate word 11 - xor ecx,ecx - ; sqr a[7]*a[4] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [24+esi] - adc ecx,0 - mov edx,DWORD [20+esi] - ; sqr a[6]*a[5] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [28+esi] - adc ecx,0 - mov DWORD [44+edi],ebp - mov edx,DWORD [20+esi] - ; saved r[11] - ; ############### Calculate word 12 - xor ebp,ebp - ; sqr a[7]*a[5] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [24+esi] - adc ebp,0 - ; sqr a[6]*a[6] - mul eax - add ebx,eax - adc ecx,edx - mov edx,DWORD [24+esi] - adc ebp,0 - mov DWORD [48+edi],ebx - mov eax,DWORD [28+esi] - ; saved r[12] - ; ############### Calculate word 13 - xor ebx,ebx - ; sqr a[7]*a[6] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [28+esi] - adc ebx,0 - mov DWORD [52+edi],ecx - ; saved r[13] - ; ############### Calculate word 14 - xor ecx,ecx - ; sqr a[7]*a[7] - mul eax - add ebp,eax - adc ebx,edx - adc ecx,0 - mov DWORD [56+edi],ebp - ; saved r[14] - mov DWORD [60+edi],ebx - pop ebx - pop ebp - pop edi - pop esi - ret -global _bn_sqr_comba4 -align 16 -_bn_sqr_comba4: -L$_bn_sqr_comba4_begin: - push esi - push edi - push ebp - push ebx - mov edi,DWORD [20+esp] - mov esi,DWORD [24+esp] - xor ebx,ebx - xor ecx,ecx - mov eax,DWORD [esi] - ; ############### Calculate word 0 - xor ebp,ebp - ; sqr a[0]*a[0] - mul eax - add ebx,eax - adc ecx,edx - mov edx,DWORD [esi] - adc ebp,0 - mov DWORD [edi],ebx - mov eax,DWORD [4+esi] - ; saved r[0] - ; ############### Calculate word 1 - xor ebx,ebx - ; sqr a[1]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [8+esi] - adc ebx,0 - mov DWORD [4+edi],ecx - mov edx,DWORD [esi] - ; saved r[1] - ; ############### Calculate word 2 - xor ecx,ecx - ; sqr a[2]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [4+esi] - adc ecx,0 - ; sqr a[1]*a[1] - mul eax - add ebp,eax - adc ebx,edx - mov edx,DWORD [esi] - adc ecx,0 - mov DWORD [8+edi],ebp - mov eax,DWORD [12+esi] - ; saved r[2] - ; ############### Calculate word 3 - xor ebp,ebp - ; sqr a[3]*a[0] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [8+esi] - adc ebp,0 - mov edx,DWORD [4+esi] - ; sqr a[2]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ebp,0 - add ebx,eax - adc ecx,edx - mov eax,DWORD [12+esi] - adc ebp,0 - mov DWORD [12+edi],ebx - mov edx,DWORD [4+esi] - ; saved r[3] - ; ############### Calculate word 4 - xor ebx,ebx - ; sqr a[3]*a[1] - mul edx - add eax,eax - adc edx,edx - adc ebx,0 - add ecx,eax - adc ebp,edx - mov eax,DWORD [8+esi] - adc ebx,0 - ; sqr a[2]*a[2] - mul eax - add ecx,eax - adc ebp,edx - mov edx,DWORD [8+esi] - adc ebx,0 - mov DWORD [16+edi],ecx - mov eax,DWORD [12+esi] - ; saved r[4] - ; ############### Calculate word 5 - xor ecx,ecx - ; sqr a[3]*a[2] - mul edx - add eax,eax - adc edx,edx - adc ecx,0 - add ebp,eax - adc ebx,edx - mov eax,DWORD [12+esi] - adc ecx,0 - mov DWORD [20+edi],ebp - ; saved r[5] - ; ############### Calculate word 6 - xor ebp,ebp - ; sqr a[3]*a[3] - mul eax - add ebx,eax - adc ecx,edx - adc ebp,0 - mov DWORD [24+edi],ebx - ; saved r[6] - mov DWORD [28+edi],ecx - pop ebx - pop ebp - pop edi - pop esi - ret diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm deleted file mode 100644 index 1d07be0aea..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm +++ /dev/null @@ -1,300 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -global _gcm_gmult_ssse3 -align 16 -_gcm_gmult_ssse3: -L$_gcm_gmult_ssse3_begin: - push ebp - push ebx - push esi - push edi - mov edi,DWORD [20+esp] - mov esi,DWORD [24+esp] - movdqu xmm0,[edi] - call L$000pic_point -L$000pic_point: - pop eax - movdqa xmm7,[(L$reverse_bytes-L$000pic_point)+eax] - movdqa xmm2,[(L$low4_mask-L$000pic_point)+eax] -db 102,15,56,0,199 - movdqa xmm1,xmm2 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm2 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - mov eax,5 -L$001loop_row_1: - movdqa xmm4,[esi] - lea esi,[16+esi] - movdqa xmm6,xmm2 -db 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - movdqa xmm5,xmm4 -db 102,15,56,0,224 -db 102,15,56,0,233 - pxor xmm2,xmm5 - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - sub eax,1 - jnz NEAR L$001loop_row_1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov eax,5 -L$002loop_row_2: - movdqa xmm4,[esi] - lea esi,[16+esi] - movdqa xmm6,xmm2 -db 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - movdqa xmm5,xmm4 -db 102,15,56,0,224 -db 102,15,56,0,233 - pxor xmm2,xmm5 - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - sub eax,1 - jnz NEAR L$002loop_row_2 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov eax,6 -L$003loop_row_3: - movdqa xmm4,[esi] - lea esi,[16+esi] - movdqa xmm6,xmm2 -db 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - movdqa xmm5,xmm4 -db 102,15,56,0,224 -db 102,15,56,0,233 - pxor xmm2,xmm5 - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - sub eax,1 - jnz NEAR L$003loop_row_3 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 -db 102,15,56,0,215 - movdqu [edi],xmm2 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pop edi - pop esi - pop ebx - pop ebp - ret -global _gcm_ghash_ssse3 -align 16 -_gcm_ghash_ssse3: -L$_gcm_ghash_ssse3_begin: - push ebp - push ebx - push esi - push edi - mov edi,DWORD [20+esp] - mov esi,DWORD [24+esp] - mov edx,DWORD [28+esp] - mov ecx,DWORD [32+esp] - movdqu xmm0,[edi] - call L$004pic_point -L$004pic_point: - pop ebx - movdqa xmm7,[(L$reverse_bytes-L$004pic_point)+ebx] - and ecx,-16 -db 102,15,56,0,199 - pxor xmm3,xmm3 -L$005loop_ghash: - movdqa xmm2,[(L$low4_mask-L$004pic_point)+ebx] - movdqu xmm1,[edx] -db 102,15,56,0,207 - pxor xmm0,xmm1 - movdqa xmm1,xmm2 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm2 - pxor xmm2,xmm2 - mov eax,5 -L$006loop_row_4: - movdqa xmm4,[esi] - lea esi,[16+esi] - movdqa xmm6,xmm2 -db 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - movdqa xmm5,xmm4 -db 102,15,56,0,224 -db 102,15,56,0,233 - pxor xmm2,xmm5 - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - sub eax,1 - jnz NEAR L$006loop_row_4 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov eax,5 -L$007loop_row_5: - movdqa xmm4,[esi] - lea esi,[16+esi] - movdqa xmm6,xmm2 -db 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - movdqa xmm5,xmm4 -db 102,15,56,0,224 -db 102,15,56,0,233 - pxor xmm2,xmm5 - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - sub eax,1 - jnz NEAR L$007loop_row_5 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov eax,6 -L$008loop_row_6: - movdqa xmm4,[esi] - lea esi,[16+esi] - movdqa xmm6,xmm2 -db 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - movdqa xmm5,xmm4 -db 102,15,56,0,224 -db 102,15,56,0,233 - pxor xmm2,xmm5 - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - sub eax,1 - jnz NEAR L$008loop_row_6 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - movdqa xmm0,xmm2 - lea esi,[esi-256] - lea edx,[16+edx] - sub ecx,16 - jnz NEAR L$005loop_ghash -db 102,15,56,0,199 - movdqu [edi],xmm0 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -L$reverse_bytes: -db 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -align 16 -L$low4_mask: -dd 252645135,252645135,252645135,252645135 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/ghash-x86.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/ghash-x86.asm deleted file mode 100644 index 753c472f04..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/ghash-x86.asm +++ /dev/null @@ -1,1072 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -global _gcm_gmult_4bit_mmx -align 16 -_gcm_gmult_4bit_mmx: -L$_gcm_gmult_4bit_mmx_begin: - push ebp - push ebx - push esi - push edi - mov edi,DWORD [20+esp] - mov esi,DWORD [24+esp] - call L$000pic_point -L$000pic_point: - pop eax - lea eax,[(L$rem_4bit-L$000pic_point)+eax] - movzx ebx,BYTE [15+edi] - xor ecx,ecx - mov edx,ebx - mov cl,dl - mov ebp,14 - shl cl,4 - and edx,240 - movq mm0,[8+ecx*1+esi] - movq mm1,[ecx*1+esi] - movd ebx,mm0 - jmp NEAR L$001mmx_loop -align 16 -L$001mmx_loop: - psrlq mm0,4 - and ebx,15 - movq mm2,mm1 - psrlq mm1,4 - pxor mm0,[8+edx*1+esi] - mov cl,BYTE [ebp*1+edi] - psllq mm2,60 - pxor mm1,[ebx*8+eax] - dec ebp - movd ebx,mm0 - pxor mm1,[edx*1+esi] - mov edx,ecx - pxor mm0,mm2 - js NEAR L$002mmx_break - shl cl,4 - and ebx,15 - psrlq mm0,4 - and edx,240 - movq mm2,mm1 - psrlq mm1,4 - pxor mm0,[8+ecx*1+esi] - psllq mm2,60 - pxor mm1,[ebx*8+eax] - movd ebx,mm0 - pxor mm1,[ecx*1+esi] - pxor mm0,mm2 - jmp NEAR L$001mmx_loop -align 16 -L$002mmx_break: - shl cl,4 - and ebx,15 - psrlq mm0,4 - and edx,240 - movq mm2,mm1 - psrlq mm1,4 - pxor mm0,[8+ecx*1+esi] - psllq mm2,60 - pxor mm1,[ebx*8+eax] - movd ebx,mm0 - pxor mm1,[ecx*1+esi] - pxor mm0,mm2 - psrlq mm0,4 - and ebx,15 - movq mm2,mm1 - psrlq mm1,4 - pxor mm0,[8+edx*1+esi] - psllq mm2,60 - pxor mm1,[ebx*8+eax] - movd ebx,mm0 - pxor mm1,[edx*1+esi] - pxor mm0,mm2 - psrlq mm0,32 - movd edx,mm1 - psrlq mm1,32 - movd ecx,mm0 - movd ebp,mm1 - bswap ebx - bswap edx - bswap ecx - bswap ebp - emms - mov DWORD [12+edi],ebx - mov DWORD [4+edi],edx - mov DWORD [8+edi],ecx - mov DWORD [edi],ebp - pop edi - pop esi - pop ebx - pop ebp - ret -global _gcm_ghash_4bit_mmx -align 16 -_gcm_ghash_4bit_mmx: -L$_gcm_ghash_4bit_mmx_begin: - push ebp - push ebx - push esi - push edi - mov eax,DWORD [20+esp] - mov ebx,DWORD [24+esp] - mov ecx,DWORD [28+esp] - mov edx,DWORD [32+esp] - mov ebp,esp - call L$003pic_point -L$003pic_point: - pop esi - lea esi,[(L$rem_8bit-L$003pic_point)+esi] - sub esp,544 - and esp,-64 - sub esp,16 - add edx,ecx - mov DWORD [544+esp],eax - mov DWORD [552+esp],edx - mov DWORD [556+esp],ebp - add ebx,128 - lea edi,[144+esp] - lea ebp,[400+esp] - mov edx,DWORD [ebx-120] - movq mm0,[ebx-120] - movq mm3,[ebx-128] - shl edx,4 - mov BYTE [esp],dl - mov edx,DWORD [ebx-104] - movq mm2,[ebx-104] - movq mm5,[ebx-112] - movq [edi-128],mm0 - psrlq mm0,4 - movq [edi],mm3 - movq mm7,mm3 - psrlq mm3,4 - shl edx,4 - mov BYTE [1+esp],dl - mov edx,DWORD [ebx-88] - movq mm1,[ebx-88] - psllq mm7,60 - movq mm4,[ebx-96] - por mm0,mm7 - movq [edi-120],mm2 - psrlq mm2,4 - movq [8+edi],mm5 - movq mm6,mm5 - movq [ebp-128],mm0 - psrlq mm5,4 - movq [ebp],mm3 - shl edx,4 - mov BYTE [2+esp],dl - mov edx,DWORD [ebx-72] - movq mm0,[ebx-72] - psllq mm6,60 - movq mm3,[ebx-80] - por mm2,mm6 - movq [edi-112],mm1 - psrlq mm1,4 - movq [16+edi],mm4 - movq mm7,mm4 - movq [ebp-120],mm2 - psrlq mm4,4 - movq [8+ebp],mm5 - shl edx,4 - mov BYTE [3+esp],dl - mov edx,DWORD [ebx-56] - movq mm2,[ebx-56] - psllq mm7,60 - movq mm5,[ebx-64] - por mm1,mm7 - movq [edi-104],mm0 - psrlq mm0,4 - movq [24+edi],mm3 - movq mm6,mm3 - movq [ebp-112],mm1 - psrlq mm3,4 - movq [16+ebp],mm4 - shl edx,4 - mov BYTE [4+esp],dl - mov edx,DWORD [ebx-40] - movq mm1,[ebx-40] - psllq mm6,60 - movq mm4,[ebx-48] - por mm0,mm6 - movq [edi-96],mm2 - psrlq mm2,4 - movq [32+edi],mm5 - movq mm7,mm5 - movq [ebp-104],mm0 - psrlq mm5,4 - movq [24+ebp],mm3 - shl edx,4 - mov BYTE [5+esp],dl - mov edx,DWORD [ebx-24] - movq mm0,[ebx-24] - psllq mm7,60 - movq mm3,[ebx-32] - por mm2,mm7 - movq [edi-88],mm1 - psrlq mm1,4 - movq [40+edi],mm4 - movq mm6,mm4 - movq [ebp-96],mm2 - psrlq mm4,4 - movq [32+ebp],mm5 - shl edx,4 - mov BYTE [6+esp],dl - mov edx,DWORD [ebx-8] - movq mm2,[ebx-8] - psllq mm6,60 - movq mm5,[ebx-16] - por mm1,mm6 - movq [edi-80],mm0 - psrlq mm0,4 - movq [48+edi],mm3 - movq mm7,mm3 - movq [ebp-88],mm1 - psrlq mm3,4 - movq [40+ebp],mm4 - shl edx,4 - mov BYTE [7+esp],dl - mov edx,DWORD [8+ebx] - movq mm1,[8+ebx] - psllq mm7,60 - movq mm4,[ebx] - por mm0,mm7 - movq [edi-72],mm2 - psrlq mm2,4 - movq [56+edi],mm5 - movq mm6,mm5 - movq [ebp-80],mm0 - psrlq mm5,4 - movq [48+ebp],mm3 - shl edx,4 - mov BYTE [8+esp],dl - mov edx,DWORD [24+ebx] - movq mm0,[24+ebx] - psllq mm6,60 - movq mm3,[16+ebx] - por mm2,mm6 - movq [edi-64],mm1 - psrlq mm1,4 - movq [64+edi],mm4 - movq mm7,mm4 - movq [ebp-72],mm2 - psrlq mm4,4 - movq [56+ebp],mm5 - shl edx,4 - mov BYTE [9+esp],dl - mov edx,DWORD [40+ebx] - movq mm2,[40+ebx] - psllq mm7,60 - movq mm5,[32+ebx] - por mm1,mm7 - movq [edi-56],mm0 - psrlq mm0,4 - movq [72+edi],mm3 - movq mm6,mm3 - movq [ebp-64],mm1 - psrlq mm3,4 - movq [64+ebp],mm4 - shl edx,4 - mov BYTE [10+esp],dl - mov edx,DWORD [56+ebx] - movq mm1,[56+ebx] - psllq mm6,60 - movq mm4,[48+ebx] - por mm0,mm6 - movq [edi-48],mm2 - psrlq mm2,4 - movq [80+edi],mm5 - movq mm7,mm5 - movq [ebp-56],mm0 - psrlq mm5,4 - movq [72+ebp],mm3 - shl edx,4 - mov BYTE [11+esp],dl - mov edx,DWORD [72+ebx] - movq mm0,[72+ebx] - psllq mm7,60 - movq mm3,[64+ebx] - por mm2,mm7 - movq [edi-40],mm1 - psrlq mm1,4 - movq [88+edi],mm4 - movq mm6,mm4 - movq [ebp-48],mm2 - psrlq mm4,4 - movq [80+ebp],mm5 - shl edx,4 - mov BYTE [12+esp],dl - mov edx,DWORD [88+ebx] - movq mm2,[88+ebx] - psllq mm6,60 - movq mm5,[80+ebx] - por mm1,mm6 - movq [edi-32],mm0 - psrlq mm0,4 - movq [96+edi],mm3 - movq mm7,mm3 - movq [ebp-40],mm1 - psrlq mm3,4 - movq [88+ebp],mm4 - shl edx,4 - mov BYTE [13+esp],dl - mov edx,DWORD [104+ebx] - movq mm1,[104+ebx] - psllq mm7,60 - movq mm4,[96+ebx] - por mm0,mm7 - movq [edi-24],mm2 - psrlq mm2,4 - movq [104+edi],mm5 - movq mm6,mm5 - movq [ebp-32],mm0 - psrlq mm5,4 - movq [96+ebp],mm3 - shl edx,4 - mov BYTE [14+esp],dl - mov edx,DWORD [120+ebx] - movq mm0,[120+ebx] - psllq mm6,60 - movq mm3,[112+ebx] - por mm2,mm6 - movq [edi-16],mm1 - psrlq mm1,4 - movq [112+edi],mm4 - movq mm7,mm4 - movq [ebp-24],mm2 - psrlq mm4,4 - movq [104+ebp],mm5 - shl edx,4 - mov BYTE [15+esp],dl - psllq mm7,60 - por mm1,mm7 - movq [edi-8],mm0 - psrlq mm0,4 - movq [120+edi],mm3 - movq mm6,mm3 - movq [ebp-16],mm1 - psrlq mm3,4 - movq [112+ebp],mm4 - psllq mm6,60 - por mm0,mm6 - movq [ebp-8],mm0 - movq [120+ebp],mm3 - movq mm6,[eax] - mov ebx,DWORD [8+eax] - mov edx,DWORD [12+eax] -align 16 -L$004outer: - xor edx,DWORD [12+ecx] - xor ebx,DWORD [8+ecx] - pxor mm6,[ecx] - lea ecx,[16+ecx] - mov DWORD [536+esp],ebx - movq [528+esp],mm6 - mov DWORD [548+esp],ecx - xor eax,eax - rol edx,8 - mov al,dl - mov ebp,eax - and al,15 - shr ebp,4 - pxor mm0,mm0 - rol edx,8 - pxor mm1,mm1 - pxor mm2,mm2 - movq mm7,[16+eax*8+esp] - movq mm6,[144+eax*8+esp] - mov al,dl - movd ebx,mm7 - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - shr edi,4 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - shr ebp,4 - pinsrw mm2,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - mov edx,DWORD [536+esp] - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm2 - shr edi,4 - pinsrw mm1,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm1 - shr ebp,4 - pinsrw mm0,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm0 - shr edi,4 - pinsrw mm2,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm2 - shr ebp,4 - pinsrw mm1,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - mov edx,DWORD [532+esp] - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm1 - shr edi,4 - pinsrw mm0,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm0 - shr ebp,4 - pinsrw mm2,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm2 - shr edi,4 - pinsrw mm1,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm1 - shr ebp,4 - pinsrw mm0,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - mov edx,DWORD [528+esp] - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm0 - shr edi,4 - pinsrw mm2,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm2 - shr ebp,4 - pinsrw mm1,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm1 - shr edi,4 - pinsrw mm0,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - xor bl,BYTE [ebp*1+esp] - mov al,dl - movd ecx,mm7 - movzx ebx,bl - psrlq mm7,8 - movq mm3,mm6 - mov ebp,eax - psrlq mm6,8 - pxor mm7,[272+edi*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm0 - shr ebp,4 - pinsrw mm2,WORD [ebx*2+esi],2 - pxor mm7,[16+eax*8+esp] - rol edx,8 - pxor mm6,[144+eax*8+esp] - pxor mm7,mm3 - pxor mm6,[400+edi*8+esp] - xor cl,BYTE [edi*1+esp] - mov al,dl - mov edx,DWORD [524+esp] - movd ebx,mm7 - movzx ecx,cl - psrlq mm7,8 - movq mm3,mm6 - mov edi,eax - psrlq mm6,8 - pxor mm7,[272+ebp*8+esp] - and al,15 - psllq mm3,56 - pxor mm6,mm2 - shr edi,4 - pinsrw mm1,WORD [ecx*2+esi],2 - pxor mm7,[16+eax*8+esp] - pxor mm6,[144+eax*8+esp] - xor bl,BYTE [ebp*1+esp] - pxor mm7,mm3 - pxor mm6,[400+ebp*8+esp] - movzx ebx,bl - pxor mm2,mm2 - psllq mm1,4 - movd ecx,mm7 - psrlq mm7,4 - movq mm3,mm6 - psrlq mm6,4 - shl ecx,4 - pxor mm7,[16+edi*8+esp] - psllq mm3,60 - movzx ecx,cl - pxor mm7,mm3 - pxor mm6,[144+edi*8+esp] - pinsrw mm0,WORD [ebx*2+esi],2 - pxor mm6,mm1 - movd edx,mm7 - pinsrw mm2,WORD [ecx*2+esi],3 - psllq mm0,12 - pxor mm6,mm0 - psrlq mm7,32 - pxor mm6,mm2 - mov ecx,DWORD [548+esp] - movd ebx,mm7 - movq mm3,mm6 - psllw mm6,8 - psrlw mm3,8 - por mm6,mm3 - bswap edx - pshufw mm6,mm6,27 - bswap ebx - cmp ecx,DWORD [552+esp] - jne NEAR L$004outer - mov eax,DWORD [544+esp] - mov DWORD [12+eax],edx - mov DWORD [8+eax],ebx - movq [eax],mm6 - mov esp,DWORD [556+esp] - emms - pop edi - pop esi - pop ebx - pop ebp - ret -global _gcm_init_clmul -align 16 -_gcm_init_clmul: -L$_gcm_init_clmul_begin: - mov edx,DWORD [4+esp] - mov eax,DWORD [8+esp] - call L$005pic -L$005pic: - pop ecx - lea ecx,[(L$bswap-L$005pic)+ecx] - movdqu xmm2,[eax] - pshufd xmm2,xmm2,78 - pshufd xmm4,xmm2,255 - movdqa xmm3,xmm2 - psllq xmm2,1 - pxor xmm5,xmm5 - psrlq xmm3,63 - pcmpgtd xmm5,xmm4 - pslldq xmm3,8 - por xmm2,xmm3 - pand xmm5,[16+ecx] - pxor xmm2,xmm5 - movdqa xmm0,xmm2 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pshufd xmm4,xmm2,78 - pxor xmm3,xmm0 - pxor xmm4,xmm2 -db 102,15,58,68,194,0 -db 102,15,58,68,202,17 -db 102,15,58,68,220,0 - xorps xmm3,xmm0 - xorps xmm3,xmm1 - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - pshufd xmm3,xmm2,78 - pshufd xmm4,xmm0,78 - pxor xmm3,xmm2 - movdqu [edx],xmm2 - pxor xmm4,xmm0 - movdqu [16+edx],xmm0 -db 102,15,58,15,227,8 - movdqu [32+edx],xmm4 - ret -global _gcm_gmult_clmul -align 16 -_gcm_gmult_clmul: -L$_gcm_gmult_clmul_begin: - mov eax,DWORD [4+esp] - mov edx,DWORD [8+esp] - call L$006pic -L$006pic: - pop ecx - lea ecx,[(L$bswap-L$006pic)+ecx] - movdqu xmm0,[eax] - movdqa xmm5,[ecx] - movups xmm2,[edx] -db 102,15,56,0,197 - movups xmm4,[32+edx] - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pxor xmm3,xmm0 -db 102,15,58,68,194,0 -db 102,15,58,68,202,17 -db 102,15,58,68,220,0 - xorps xmm3,xmm0 - xorps xmm3,xmm1 - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 -db 102,15,56,0,197 - movdqu [eax],xmm0 - ret -global _gcm_ghash_clmul -align 16 -_gcm_ghash_clmul: -L$_gcm_ghash_clmul_begin: - push ebp - push ebx - push esi - push edi - mov eax,DWORD [20+esp] - mov edx,DWORD [24+esp] - mov esi,DWORD [28+esp] - mov ebx,DWORD [32+esp] - call L$007pic -L$007pic: - pop ecx - lea ecx,[(L$bswap-L$007pic)+ecx] - movdqu xmm0,[eax] - movdqa xmm5,[ecx] - movdqu xmm2,[edx] -db 102,15,56,0,197 - sub ebx,16 - jz NEAR L$008odd_tail - movdqu xmm3,[esi] - movdqu xmm6,[16+esi] -db 102,15,56,0,221 -db 102,15,56,0,245 - movdqu xmm5,[32+edx] - pxor xmm0,xmm3 - pshufd xmm3,xmm6,78 - movdqa xmm7,xmm6 - pxor xmm3,xmm6 - lea esi,[32+esi] -db 102,15,58,68,242,0 -db 102,15,58,68,250,17 -db 102,15,58,68,221,0 - movups xmm2,[16+edx] - nop - sub ebx,32 - jbe NEAR L$009even_tail - jmp NEAR L$010mod_loop -align 32 -L$010mod_loop: - pshufd xmm4,xmm0,78 - movdqa xmm1,xmm0 - pxor xmm4,xmm0 - nop -db 102,15,58,68,194,0 -db 102,15,58,68,202,17 -db 102,15,58,68,229,16 - movups xmm2,[edx] - xorps xmm0,xmm6 - movdqa xmm5,[ecx] - xorps xmm1,xmm7 - movdqu xmm7,[esi] - pxor xmm3,xmm0 - movdqu xmm6,[16+esi] - pxor xmm3,xmm1 -db 102,15,56,0,253 - pxor xmm4,xmm3 - movdqa xmm3,xmm4 - psrldq xmm4,8 - pslldq xmm3,8 - pxor xmm1,xmm4 - pxor xmm0,xmm3 -db 102,15,56,0,245 - pxor xmm1,xmm7 - movdqa xmm7,xmm6 - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 -db 102,15,58,68,242,0 - movups xmm5,[32+edx] - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - pshufd xmm3,xmm7,78 - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm3,xmm7 - pxor xmm1,xmm4 -db 102,15,58,68,250,17 - movups xmm2,[16+edx] - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 -db 102,15,58,68,221,0 - lea esi,[32+esi] - sub ebx,32 - ja NEAR L$010mod_loop -L$009even_tail: - pshufd xmm4,xmm0,78 - movdqa xmm1,xmm0 - pxor xmm4,xmm0 -db 102,15,58,68,194,0 -db 102,15,58,68,202,17 -db 102,15,58,68,229,16 - movdqa xmm5,[ecx] - xorps xmm0,xmm6 - xorps xmm1,xmm7 - pxor xmm3,xmm0 - pxor xmm3,xmm1 - pxor xmm4,xmm3 - movdqa xmm3,xmm4 - psrldq xmm4,8 - pslldq xmm3,8 - pxor xmm1,xmm4 - pxor xmm0,xmm3 - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - test ebx,ebx - jnz NEAR L$011done - movups xmm2,[edx] -L$008odd_tail: - movdqu xmm3,[esi] -db 102,15,56,0,221 - pxor xmm0,xmm3 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pshufd xmm4,xmm2,78 - pxor xmm3,xmm0 - pxor xmm4,xmm2 -db 102,15,58,68,194,0 -db 102,15,58,68,202,17 -db 102,15,58,68,220,0 - xorps xmm3,xmm0 - xorps xmm3,xmm1 - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 -L$011done: -db 102,15,56,0,197 - movdqu [eax],xmm0 - pop edi - pop esi - pop ebx - pop ebp - ret -align 64 -L$bswap: -db 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -db 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 -align 64 -L$rem_8bit: -dw 0,450,900,582,1800,1738,1164,1358 -dw 3600,4050,3476,3158,2328,2266,2716,2910 -dw 7200,7650,8100,7782,6952,6890,6316,6510 -dw 4656,5106,4532,4214,5432,5370,5820,6014 -dw 14400,14722,15300,14854,16200,16010,15564,15630 -dw 13904,14226,13780,13334,12632,12442,13020,13086 -dw 9312,9634,10212,9766,9064,8874,8428,8494 -dw 10864,11186,10740,10294,11640,11450,12028,12094 -dw 28800,28994,29444,29382,30600,30282,29708,30158 -dw 32400,32594,32020,31958,31128,30810,31260,31710 -dw 27808,28002,28452,28390,27560,27242,26668,27118 -dw 25264,25458,24884,24822,26040,25722,26172,26622 -dw 18624,18690,19268,19078,20424,19978,19532,19854 -dw 18128,18194,17748,17558,16856,16410,16988,17310 -dw 21728,21794,22372,22182,21480,21034,20588,20910 -dw 23280,23346,22900,22710,24056,23610,24188,24510 -dw 57600,57538,57988,58182,58888,59338,58764,58446 -dw 61200,61138,60564,60758,59416,59866,60316,59998 -dw 64800,64738,65188,65382,64040,64490,63916,63598 -dw 62256,62194,61620,61814,62520,62970,63420,63102 -dw 55616,55426,56004,56070,56904,57226,56780,56334 -dw 55120,54930,54484,54550,53336,53658,54236,53790 -dw 50528,50338,50916,50982,49768,50090,49644,49198 -dw 52080,51890,51444,51510,52344,52666,53244,52798 -dw 37248,36930,37380,37830,38536,38730,38156,38094 -dw 40848,40530,39956,40406,39064,39258,39708,39646 -dw 36256,35938,36388,36838,35496,35690,35116,35054 -dw 33712,33394,32820,33270,33976,34170,34620,34558 -dw 43456,43010,43588,43910,44744,44810,44364,44174 -dw 42960,42514,42068,42390,41176,41242,41820,41630 -dw 46560,46114,46692,47014,45800,45866,45420,45230 -dw 48112,47666,47220,47542,48376,48442,49020,48830 -align 64 -L$rem_4bit: -dd 0,0,0,471859200,0,943718400,0,610271232 -dd 0,1887436800,0,1822425088,0,1220542464,0,1423966208 -dd 0,3774873600,0,4246732800,0,3644850176,0,3311403008 -dd 0,2441084928,0,2376073216,0,2847932416,0,3051356160 -db 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 -db 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 -db 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 -db 0 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/md5-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/md5-586.asm deleted file mode 100644 index c051923082..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/md5-586.asm +++ /dev/null @@ -1,697 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -global _md5_block_asm_data_order -align 16 -_md5_block_asm_data_order: -L$_md5_block_asm_data_order_begin: - push esi - push edi - mov edi,DWORD [12+esp] - mov esi,DWORD [16+esp] - mov ecx,DWORD [20+esp] - push ebp - shl ecx,6 - push ebx - add ecx,esi - sub ecx,64 - mov eax,DWORD [edi] - push ecx - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] -L$000start: - ; - ; R0 section - mov edi,ecx - mov ebp,DWORD [esi] - ; R0 0 - xor edi,edx - and edi,ebx - lea eax,[3614090360+ebp*1+eax] - xor edi,edx - add eax,edi - mov edi,ebx - rol eax,7 - mov ebp,DWORD [4+esi] - add eax,ebx - ; R0 1 - xor edi,ecx - and edi,eax - lea edx,[3905402710+ebp*1+edx] - xor edi,ecx - add edx,edi - mov edi,eax - rol edx,12 - mov ebp,DWORD [8+esi] - add edx,eax - ; R0 2 - xor edi,ebx - and edi,edx - lea ecx,[606105819+ebp*1+ecx] - xor edi,ebx - add ecx,edi - mov edi,edx - rol ecx,17 - mov ebp,DWORD [12+esi] - add ecx,edx - ; R0 3 - xor edi,eax - and edi,ecx - lea ebx,[3250441966+ebp*1+ebx] - xor edi,eax - add ebx,edi - mov edi,ecx - rol ebx,22 - mov ebp,DWORD [16+esi] - add ebx,ecx - ; R0 4 - xor edi,edx - and edi,ebx - lea eax,[4118548399+ebp*1+eax] - xor edi,edx - add eax,edi - mov edi,ebx - rol eax,7 - mov ebp,DWORD [20+esi] - add eax,ebx - ; R0 5 - xor edi,ecx - and edi,eax - lea edx,[1200080426+ebp*1+edx] - xor edi,ecx - add edx,edi - mov edi,eax - rol edx,12 - mov ebp,DWORD [24+esi] - add edx,eax - ; R0 6 - xor edi,ebx - and edi,edx - lea ecx,[2821735955+ebp*1+ecx] - xor edi,ebx - add ecx,edi - mov edi,edx - rol ecx,17 - mov ebp,DWORD [28+esi] - add ecx,edx - ; R0 7 - xor edi,eax - and edi,ecx - lea ebx,[4249261313+ebp*1+ebx] - xor edi,eax - add ebx,edi - mov edi,ecx - rol ebx,22 - mov ebp,DWORD [32+esi] - add ebx,ecx - ; R0 8 - xor edi,edx - and edi,ebx - lea eax,[1770035416+ebp*1+eax] - xor edi,edx - add eax,edi - mov edi,ebx - rol eax,7 - mov ebp,DWORD [36+esi] - add eax,ebx - ; R0 9 - xor edi,ecx - and edi,eax - lea edx,[2336552879+ebp*1+edx] - xor edi,ecx - add edx,edi - mov edi,eax - rol edx,12 - mov ebp,DWORD [40+esi] - add edx,eax - ; R0 10 - xor edi,ebx - and edi,edx - lea ecx,[4294925233+ebp*1+ecx] - xor edi,ebx - add ecx,edi - mov edi,edx - rol ecx,17 - mov ebp,DWORD [44+esi] - add ecx,edx - ; R0 11 - xor edi,eax - and edi,ecx - lea ebx,[2304563134+ebp*1+ebx] - xor edi,eax - add ebx,edi - mov edi,ecx - rol ebx,22 - mov ebp,DWORD [48+esi] - add ebx,ecx - ; R0 12 - xor edi,edx - and edi,ebx - lea eax,[1804603682+ebp*1+eax] - xor edi,edx - add eax,edi - mov edi,ebx - rol eax,7 - mov ebp,DWORD [52+esi] - add eax,ebx - ; R0 13 - xor edi,ecx - and edi,eax - lea edx,[4254626195+ebp*1+edx] - xor edi,ecx - add edx,edi - mov edi,eax - rol edx,12 - mov ebp,DWORD [56+esi] - add edx,eax - ; R0 14 - xor edi,ebx - and edi,edx - lea ecx,[2792965006+ebp*1+ecx] - xor edi,ebx - add ecx,edi - mov edi,edx - rol ecx,17 - mov ebp,DWORD [60+esi] - add ecx,edx - ; R0 15 - xor edi,eax - and edi,ecx - lea ebx,[1236535329+ebp*1+ebx] - xor edi,eax - add ebx,edi - mov edi,ecx - rol ebx,22 - mov ebp,DWORD [4+esi] - add ebx,ecx - ; - ; R1 section - ; R1 16 - lea eax,[4129170786+ebp*1+eax] - xor edi,ebx - and edi,edx - mov ebp,DWORD [24+esi] - xor edi,ecx - add eax,edi - mov edi,ebx - rol eax,5 - add eax,ebx - ; R1 17 - lea edx,[3225465664+ebp*1+edx] - xor edi,eax - and edi,ecx - mov ebp,DWORD [44+esi] - xor edi,ebx - add edx,edi - mov edi,eax - rol edx,9 - add edx,eax - ; R1 18 - lea ecx,[643717713+ebp*1+ecx] - xor edi,edx - and edi,ebx - mov ebp,DWORD [esi] - xor edi,eax - add ecx,edi - mov edi,edx - rol ecx,14 - add ecx,edx - ; R1 19 - lea ebx,[3921069994+ebp*1+ebx] - xor edi,ecx - and edi,eax - mov ebp,DWORD [20+esi] - xor edi,edx - add ebx,edi - mov edi,ecx - rol ebx,20 - add ebx,ecx - ; R1 20 - lea eax,[3593408605+ebp*1+eax] - xor edi,ebx - and edi,edx - mov ebp,DWORD [40+esi] - xor edi,ecx - add eax,edi - mov edi,ebx - rol eax,5 - add eax,ebx - ; R1 21 - lea edx,[38016083+ebp*1+edx] - xor edi,eax - and edi,ecx - mov ebp,DWORD [60+esi] - xor edi,ebx - add edx,edi - mov edi,eax - rol edx,9 - add edx,eax - ; R1 22 - lea ecx,[3634488961+ebp*1+ecx] - xor edi,edx - and edi,ebx - mov ebp,DWORD [16+esi] - xor edi,eax - add ecx,edi - mov edi,edx - rol ecx,14 - add ecx,edx - ; R1 23 - lea ebx,[3889429448+ebp*1+ebx] - xor edi,ecx - and edi,eax - mov ebp,DWORD [36+esi] - xor edi,edx - add ebx,edi - mov edi,ecx - rol ebx,20 - add ebx,ecx - ; R1 24 - lea eax,[568446438+ebp*1+eax] - xor edi,ebx - and edi,edx - mov ebp,DWORD [56+esi] - xor edi,ecx - add eax,edi - mov edi,ebx - rol eax,5 - add eax,ebx - ; R1 25 - lea edx,[3275163606+ebp*1+edx] - xor edi,eax - and edi,ecx - mov ebp,DWORD [12+esi] - xor edi,ebx - add edx,edi - mov edi,eax - rol edx,9 - add edx,eax - ; R1 26 - lea ecx,[4107603335+ebp*1+ecx] - xor edi,edx - and edi,ebx - mov ebp,DWORD [32+esi] - xor edi,eax - add ecx,edi - mov edi,edx - rol ecx,14 - add ecx,edx - ; R1 27 - lea ebx,[1163531501+ebp*1+ebx] - xor edi,ecx - and edi,eax - mov ebp,DWORD [52+esi] - xor edi,edx - add ebx,edi - mov edi,ecx - rol ebx,20 - add ebx,ecx - ; R1 28 - lea eax,[2850285829+ebp*1+eax] - xor edi,ebx - and edi,edx - mov ebp,DWORD [8+esi] - xor edi,ecx - add eax,edi - mov edi,ebx - rol eax,5 - add eax,ebx - ; R1 29 - lea edx,[4243563512+ebp*1+edx] - xor edi,eax - and edi,ecx - mov ebp,DWORD [28+esi] - xor edi,ebx - add edx,edi - mov edi,eax - rol edx,9 - add edx,eax - ; R1 30 - lea ecx,[1735328473+ebp*1+ecx] - xor edi,edx - and edi,ebx - mov ebp,DWORD [48+esi] - xor edi,eax - add ecx,edi - mov edi,edx - rol ecx,14 - add ecx,edx - ; R1 31 - lea ebx,[2368359562+ebp*1+ebx] - xor edi,ecx - and edi,eax - mov ebp,DWORD [20+esi] - xor edi,edx - add ebx,edi - mov edi,ecx - rol ebx,20 - add ebx,ecx - ; - ; R2 section - ; R2 32 - xor edi,edx - xor edi,ebx - lea eax,[4294588738+ebp*1+eax] - add eax,edi - rol eax,4 - mov ebp,DWORD [32+esi] - mov edi,ebx - ; R2 33 - lea edx,[2272392833+ebp*1+edx] - add eax,ebx - xor edi,ecx - xor edi,eax - mov ebp,DWORD [44+esi] - add edx,edi - mov edi,eax - rol edx,11 - add edx,eax - ; R2 34 - xor edi,ebx - xor edi,edx - lea ecx,[1839030562+ebp*1+ecx] - add ecx,edi - rol ecx,16 - mov ebp,DWORD [56+esi] - mov edi,edx - ; R2 35 - lea ebx,[4259657740+ebp*1+ebx] - add ecx,edx - xor edi,eax - xor edi,ecx - mov ebp,DWORD [4+esi] - add ebx,edi - mov edi,ecx - rol ebx,23 - add ebx,ecx - ; R2 36 - xor edi,edx - xor edi,ebx - lea eax,[2763975236+ebp*1+eax] - add eax,edi - rol eax,4 - mov ebp,DWORD [16+esi] - mov edi,ebx - ; R2 37 - lea edx,[1272893353+ebp*1+edx] - add eax,ebx - xor edi,ecx - xor edi,eax - mov ebp,DWORD [28+esi] - add edx,edi - mov edi,eax - rol edx,11 - add edx,eax - ; R2 38 - xor edi,ebx - xor edi,edx - lea ecx,[4139469664+ebp*1+ecx] - add ecx,edi - rol ecx,16 - mov ebp,DWORD [40+esi] - mov edi,edx - ; R2 39 - lea ebx,[3200236656+ebp*1+ebx] - add ecx,edx - xor edi,eax - xor edi,ecx - mov ebp,DWORD [52+esi] - add ebx,edi - mov edi,ecx - rol ebx,23 - add ebx,ecx - ; R2 40 - xor edi,edx - xor edi,ebx - lea eax,[681279174+ebp*1+eax] - add eax,edi - rol eax,4 - mov ebp,DWORD [esi] - mov edi,ebx - ; R2 41 - lea edx,[3936430074+ebp*1+edx] - add eax,ebx - xor edi,ecx - xor edi,eax - mov ebp,DWORD [12+esi] - add edx,edi - mov edi,eax - rol edx,11 - add edx,eax - ; R2 42 - xor edi,ebx - xor edi,edx - lea ecx,[3572445317+ebp*1+ecx] - add ecx,edi - rol ecx,16 - mov ebp,DWORD [24+esi] - mov edi,edx - ; R2 43 - lea ebx,[76029189+ebp*1+ebx] - add ecx,edx - xor edi,eax - xor edi,ecx - mov ebp,DWORD [36+esi] - add ebx,edi - mov edi,ecx - rol ebx,23 - add ebx,ecx - ; R2 44 - xor edi,edx - xor edi,ebx - lea eax,[3654602809+ebp*1+eax] - add eax,edi - rol eax,4 - mov ebp,DWORD [48+esi] - mov edi,ebx - ; R2 45 - lea edx,[3873151461+ebp*1+edx] - add eax,ebx - xor edi,ecx - xor edi,eax - mov ebp,DWORD [60+esi] - add edx,edi - mov edi,eax - rol edx,11 - add edx,eax - ; R2 46 - xor edi,ebx - xor edi,edx - lea ecx,[530742520+ebp*1+ecx] - add ecx,edi - rol ecx,16 - mov ebp,DWORD [8+esi] - mov edi,edx - ; R2 47 - lea ebx,[3299628645+ebp*1+ebx] - add ecx,edx - xor edi,eax - xor edi,ecx - mov ebp,DWORD [esi] - add ebx,edi - mov edi,-1 - rol ebx,23 - add ebx,ecx - ; - ; R3 section - ; R3 48 - xor edi,edx - or edi,ebx - lea eax,[4096336452+ebp*1+eax] - xor edi,ecx - mov ebp,DWORD [28+esi] - add eax,edi - mov edi,-1 - rol eax,6 - xor edi,ecx - add eax,ebx - ; R3 49 - or edi,eax - lea edx,[1126891415+ebp*1+edx] - xor edi,ebx - mov ebp,DWORD [56+esi] - add edx,edi - mov edi,-1 - rol edx,10 - xor edi,ebx - add edx,eax - ; R3 50 - or edi,edx - lea ecx,[2878612391+ebp*1+ecx] - xor edi,eax - mov ebp,DWORD [20+esi] - add ecx,edi - mov edi,-1 - rol ecx,15 - xor edi,eax - add ecx,edx - ; R3 51 - or edi,ecx - lea ebx,[4237533241+ebp*1+ebx] - xor edi,edx - mov ebp,DWORD [48+esi] - add ebx,edi - mov edi,-1 - rol ebx,21 - xor edi,edx - add ebx,ecx - ; R3 52 - or edi,ebx - lea eax,[1700485571+ebp*1+eax] - xor edi,ecx - mov ebp,DWORD [12+esi] - add eax,edi - mov edi,-1 - rol eax,6 - xor edi,ecx - add eax,ebx - ; R3 53 - or edi,eax - lea edx,[2399980690+ebp*1+edx] - xor edi,ebx - mov ebp,DWORD [40+esi] - add edx,edi - mov edi,-1 - rol edx,10 - xor edi,ebx - add edx,eax - ; R3 54 - or edi,edx - lea ecx,[4293915773+ebp*1+ecx] - xor edi,eax - mov ebp,DWORD [4+esi] - add ecx,edi - mov edi,-1 - rol ecx,15 - xor edi,eax - add ecx,edx - ; R3 55 - or edi,ecx - lea ebx,[2240044497+ebp*1+ebx] - xor edi,edx - mov ebp,DWORD [32+esi] - add ebx,edi - mov edi,-1 - rol ebx,21 - xor edi,edx - add ebx,ecx - ; R3 56 - or edi,ebx - lea eax,[1873313359+ebp*1+eax] - xor edi,ecx - mov ebp,DWORD [60+esi] - add eax,edi - mov edi,-1 - rol eax,6 - xor edi,ecx - add eax,ebx - ; R3 57 - or edi,eax - lea edx,[4264355552+ebp*1+edx] - xor edi,ebx - mov ebp,DWORD [24+esi] - add edx,edi - mov edi,-1 - rol edx,10 - xor edi,ebx - add edx,eax - ; R3 58 - or edi,edx - lea ecx,[2734768916+ebp*1+ecx] - xor edi,eax - mov ebp,DWORD [52+esi] - add ecx,edi - mov edi,-1 - rol ecx,15 - xor edi,eax - add ecx,edx - ; R3 59 - or edi,ecx - lea ebx,[1309151649+ebp*1+ebx] - xor edi,edx - mov ebp,DWORD [16+esi] - add ebx,edi - mov edi,-1 - rol ebx,21 - xor edi,edx - add ebx,ecx - ; R3 60 - or edi,ebx - lea eax,[4149444226+ebp*1+eax] - xor edi,ecx - mov ebp,DWORD [44+esi] - add eax,edi - mov edi,-1 - rol eax,6 - xor edi,ecx - add eax,ebx - ; R3 61 - or edi,eax - lea edx,[3174756917+ebp*1+edx] - xor edi,ebx - mov ebp,DWORD [8+esi] - add edx,edi - mov edi,-1 - rol edx,10 - xor edi,ebx - add edx,eax - ; R3 62 - or edi,edx - lea ecx,[718787259+ebp*1+ecx] - xor edi,eax - mov ebp,DWORD [36+esi] - add ecx,edi - mov edi,-1 - rol ecx,15 - xor edi,eax - add ecx,edx - ; R3 63 - or edi,ecx - lea ebx,[3951481745+ebp*1+ebx] - xor edi,edx - mov ebp,DWORD [24+esp] - add ebx,edi - add esi,64 - rol ebx,21 - mov edi,DWORD [ebp] - add ebx,ecx - add eax,edi - mov edi,DWORD [4+ebp] - add ebx,edi - mov edi,DWORD [8+ebp] - add ecx,edi - mov edi,DWORD [12+ebp] - add edx,edi - mov DWORD [ebp],eax - mov DWORD [4+ebp],ebx - mov edi,DWORD [esp] - mov DWORD [8+ebp],ecx - mov DWORD [12+ebp],edx - cmp edi,esi - jae NEAR L$000start - pop eax - pop ebx - pop ebp - pop edi - pop esi - ret diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha1-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha1-586.asm deleted file mode 100644 index 0afe894e52..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha1-586.asm +++ /dev/null @@ -1,3814 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -;extern _OPENSSL_ia32cap_P -global _sha1_block_data_order -align 16 -_sha1_block_data_order: -L$_sha1_block_data_order_begin: - push ebp - push ebx - push esi - push edi - call L$000pic_point -L$000pic_point: - pop ebp - lea esi,[_OPENSSL_ia32cap_P] - lea ebp,[(L$K_XX_XX-L$000pic_point)+ebp] - mov eax,DWORD [esi] - mov edx,DWORD [4+esi] - test edx,512 - jz NEAR L$001x86 - mov ecx,DWORD [8+esi] - test eax,16777216 - jz NEAR L$001x86 - and edx,268435456 - and eax,1073741824 - or eax,edx - cmp eax,1342177280 - je NEAR L$avx_shortcut - jmp NEAR L$ssse3_shortcut -align 16 -L$001x86: - mov ebp,DWORD [20+esp] - mov esi,DWORD [24+esp] - mov eax,DWORD [28+esp] - sub esp,76 - shl eax,6 - add eax,esi - mov DWORD [104+esp],eax - mov edi,DWORD [16+ebp] - jmp NEAR L$002loop -align 16 -L$002loop: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - bswap eax - bswap ebx - bswap ecx - bswap edx - mov DWORD [esp],eax - mov DWORD [4+esp],ebx - mov DWORD [8+esp],ecx - mov DWORD [12+esp],edx - mov eax,DWORD [16+esi] - mov ebx,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov edx,DWORD [28+esi] - bswap eax - bswap ebx - bswap ecx - bswap edx - mov DWORD [16+esp],eax - mov DWORD [20+esp],ebx - mov DWORD [24+esp],ecx - mov DWORD [28+esp],edx - mov eax,DWORD [32+esi] - mov ebx,DWORD [36+esi] - mov ecx,DWORD [40+esi] - mov edx,DWORD [44+esi] - bswap eax - bswap ebx - bswap ecx - bswap edx - mov DWORD [32+esp],eax - mov DWORD [36+esp],ebx - mov DWORD [40+esp],ecx - mov DWORD [44+esp],edx - mov eax,DWORD [48+esi] - mov ebx,DWORD [52+esi] - mov ecx,DWORD [56+esi] - mov edx,DWORD [60+esi] - bswap eax - bswap ebx - bswap ecx - bswap edx - mov DWORD [48+esp],eax - mov DWORD [52+esp],ebx - mov DWORD [56+esp],ecx - mov DWORD [60+esp],edx - mov DWORD [100+esp],esi - mov eax,DWORD [ebp] - mov ebx,DWORD [4+ebp] - mov ecx,DWORD [8+ebp] - mov edx,DWORD [12+ebp] - ; 00_15 0 - mov esi,ecx - mov ebp,eax - rol ebp,5 - xor esi,edx - add ebp,edi - mov edi,DWORD [esp] - and esi,ebx - ror ebx,2 - xor esi,edx - lea ebp,[1518500249+edi*1+ebp] - add ebp,esi - ; 00_15 1 - mov edi,ebx - mov esi,ebp - rol ebp,5 - xor edi,ecx - add ebp,edx - mov edx,DWORD [4+esp] - and edi,eax - ror eax,2 - xor edi,ecx - lea ebp,[1518500249+edx*1+ebp] - add ebp,edi - ; 00_15 2 - mov edx,eax - mov edi,ebp - rol ebp,5 - xor edx,ebx - add ebp,ecx - mov ecx,DWORD [8+esp] - and edx,esi - ror esi,2 - xor edx,ebx - lea ebp,[1518500249+ecx*1+ebp] - add ebp,edx - ; 00_15 3 - mov ecx,esi - mov edx,ebp - rol ebp,5 - xor ecx,eax - add ebp,ebx - mov ebx,DWORD [12+esp] - and ecx,edi - ror edi,2 - xor ecx,eax - lea ebp,[1518500249+ebx*1+ebp] - add ebp,ecx - ; 00_15 4 - mov ebx,edi - mov ecx,ebp - rol ebp,5 - xor ebx,esi - add ebp,eax - mov eax,DWORD [16+esp] - and ebx,edx - ror edx,2 - xor ebx,esi - lea ebp,[1518500249+eax*1+ebp] - add ebp,ebx - ; 00_15 5 - mov eax,edx - mov ebx,ebp - rol ebp,5 - xor eax,edi - add ebp,esi - mov esi,DWORD [20+esp] - and eax,ecx - ror ecx,2 - xor eax,edi - lea ebp,[1518500249+esi*1+ebp] - add ebp,eax - ; 00_15 6 - mov esi,ecx - mov eax,ebp - rol ebp,5 - xor esi,edx - add ebp,edi - mov edi,DWORD [24+esp] - and esi,ebx - ror ebx,2 - xor esi,edx - lea ebp,[1518500249+edi*1+ebp] - add ebp,esi - ; 00_15 7 - mov edi,ebx - mov esi,ebp - rol ebp,5 - xor edi,ecx - add ebp,edx - mov edx,DWORD [28+esp] - and edi,eax - ror eax,2 - xor edi,ecx - lea ebp,[1518500249+edx*1+ebp] - add ebp,edi - ; 00_15 8 - mov edx,eax - mov edi,ebp - rol ebp,5 - xor edx,ebx - add ebp,ecx - mov ecx,DWORD [32+esp] - and edx,esi - ror esi,2 - xor edx,ebx - lea ebp,[1518500249+ecx*1+ebp] - add ebp,edx - ; 00_15 9 - mov ecx,esi - mov edx,ebp - rol ebp,5 - xor ecx,eax - add ebp,ebx - mov ebx,DWORD [36+esp] - and ecx,edi - ror edi,2 - xor ecx,eax - lea ebp,[1518500249+ebx*1+ebp] - add ebp,ecx - ; 00_15 10 - mov ebx,edi - mov ecx,ebp - rol ebp,5 - xor ebx,esi - add ebp,eax - mov eax,DWORD [40+esp] - and ebx,edx - ror edx,2 - xor ebx,esi - lea ebp,[1518500249+eax*1+ebp] - add ebp,ebx - ; 00_15 11 - mov eax,edx - mov ebx,ebp - rol ebp,5 - xor eax,edi - add ebp,esi - mov esi,DWORD [44+esp] - and eax,ecx - ror ecx,2 - xor eax,edi - lea ebp,[1518500249+esi*1+ebp] - add ebp,eax - ; 00_15 12 - mov esi,ecx - mov eax,ebp - rol ebp,5 - xor esi,edx - add ebp,edi - mov edi,DWORD [48+esp] - and esi,ebx - ror ebx,2 - xor esi,edx - lea ebp,[1518500249+edi*1+ebp] - add ebp,esi - ; 00_15 13 - mov edi,ebx - mov esi,ebp - rol ebp,5 - xor edi,ecx - add ebp,edx - mov edx,DWORD [52+esp] - and edi,eax - ror eax,2 - xor edi,ecx - lea ebp,[1518500249+edx*1+ebp] - add ebp,edi - ; 00_15 14 - mov edx,eax - mov edi,ebp - rol ebp,5 - xor edx,ebx - add ebp,ecx - mov ecx,DWORD [56+esp] - and edx,esi - ror esi,2 - xor edx,ebx - lea ebp,[1518500249+ecx*1+ebp] - add ebp,edx - ; 00_15 15 - mov ecx,esi - mov edx,ebp - rol ebp,5 - xor ecx,eax - add ebp,ebx - mov ebx,DWORD [60+esp] - and ecx,edi - ror edi,2 - xor ecx,eax - lea ebp,[1518500249+ebx*1+ebp] - mov ebx,DWORD [esp] - add ecx,ebp - ; 16_19 16 - mov ebp,edi - xor ebx,DWORD [8+esp] - xor ebp,esi - xor ebx,DWORD [32+esp] - and ebp,edx - xor ebx,DWORD [52+esp] - rol ebx,1 - xor ebp,esi - add eax,ebp - mov ebp,ecx - ror edx,2 - mov DWORD [esp],ebx - rol ebp,5 - lea ebx,[1518500249+eax*1+ebx] - mov eax,DWORD [4+esp] - add ebx,ebp - ; 16_19 17 - mov ebp,edx - xor eax,DWORD [12+esp] - xor ebp,edi - xor eax,DWORD [36+esp] - and ebp,ecx - xor eax,DWORD [56+esp] - rol eax,1 - xor ebp,edi - add esi,ebp - mov ebp,ebx - ror ecx,2 - mov DWORD [4+esp],eax - rol ebp,5 - lea eax,[1518500249+esi*1+eax] - mov esi,DWORD [8+esp] - add eax,ebp - ; 16_19 18 - mov ebp,ecx - xor esi,DWORD [16+esp] - xor ebp,edx - xor esi,DWORD [40+esp] - and ebp,ebx - xor esi,DWORD [60+esp] - rol esi,1 - xor ebp,edx - add edi,ebp - mov ebp,eax - ror ebx,2 - mov DWORD [8+esp],esi - rol ebp,5 - lea esi,[1518500249+edi*1+esi] - mov edi,DWORD [12+esp] - add esi,ebp - ; 16_19 19 - mov ebp,ebx - xor edi,DWORD [20+esp] - xor ebp,ecx - xor edi,DWORD [44+esp] - and ebp,eax - xor edi,DWORD [esp] - rol edi,1 - xor ebp,ecx - add edx,ebp - mov ebp,esi - ror eax,2 - mov DWORD [12+esp],edi - rol ebp,5 - lea edi,[1518500249+edx*1+edi] - mov edx,DWORD [16+esp] - add edi,ebp - ; 20_39 20 - mov ebp,esi - xor edx,DWORD [24+esp] - xor ebp,eax - xor edx,DWORD [48+esp] - xor ebp,ebx - xor edx,DWORD [4+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [16+esp],edx - lea edx,[1859775393+ecx*1+edx] - mov ecx,DWORD [20+esp] - add edx,ebp - ; 20_39 21 - mov ebp,edi - xor ecx,DWORD [28+esp] - xor ebp,esi - xor ecx,DWORD [52+esp] - xor ebp,eax - xor ecx,DWORD [8+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [20+esp],ecx - lea ecx,[1859775393+ebx*1+ecx] - mov ebx,DWORD [24+esp] - add ecx,ebp - ; 20_39 22 - mov ebp,edx - xor ebx,DWORD [32+esp] - xor ebp,edi - xor ebx,DWORD [56+esp] - xor ebp,esi - xor ebx,DWORD [12+esp] - rol ebx,1 - add eax,ebp - ror edx,2 - mov ebp,ecx - rol ebp,5 - mov DWORD [24+esp],ebx - lea ebx,[1859775393+eax*1+ebx] - mov eax,DWORD [28+esp] - add ebx,ebp - ; 20_39 23 - mov ebp,ecx - xor eax,DWORD [36+esp] - xor ebp,edx - xor eax,DWORD [60+esp] - xor ebp,edi - xor eax,DWORD [16+esp] - rol eax,1 - add esi,ebp - ror ecx,2 - mov ebp,ebx - rol ebp,5 - mov DWORD [28+esp],eax - lea eax,[1859775393+esi*1+eax] - mov esi,DWORD [32+esp] - add eax,ebp - ; 20_39 24 - mov ebp,ebx - xor esi,DWORD [40+esp] - xor ebp,ecx - xor esi,DWORD [esp] - xor ebp,edx - xor esi,DWORD [20+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - mov DWORD [32+esp],esi - lea esi,[1859775393+edi*1+esi] - mov edi,DWORD [36+esp] - add esi,ebp - ; 20_39 25 - mov ebp,eax - xor edi,DWORD [44+esp] - xor ebp,ebx - xor edi,DWORD [4+esp] - xor ebp,ecx - xor edi,DWORD [24+esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - mov DWORD [36+esp],edi - lea edi,[1859775393+edx*1+edi] - mov edx,DWORD [40+esp] - add edi,ebp - ; 20_39 26 - mov ebp,esi - xor edx,DWORD [48+esp] - xor ebp,eax - xor edx,DWORD [8+esp] - xor ebp,ebx - xor edx,DWORD [28+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [40+esp],edx - lea edx,[1859775393+ecx*1+edx] - mov ecx,DWORD [44+esp] - add edx,ebp - ; 20_39 27 - mov ebp,edi - xor ecx,DWORD [52+esp] - xor ebp,esi - xor ecx,DWORD [12+esp] - xor ebp,eax - xor ecx,DWORD [32+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [44+esp],ecx - lea ecx,[1859775393+ebx*1+ecx] - mov ebx,DWORD [48+esp] - add ecx,ebp - ; 20_39 28 - mov ebp,edx - xor ebx,DWORD [56+esp] - xor ebp,edi - xor ebx,DWORD [16+esp] - xor ebp,esi - xor ebx,DWORD [36+esp] - rol ebx,1 - add eax,ebp - ror edx,2 - mov ebp,ecx - rol ebp,5 - mov DWORD [48+esp],ebx - lea ebx,[1859775393+eax*1+ebx] - mov eax,DWORD [52+esp] - add ebx,ebp - ; 20_39 29 - mov ebp,ecx - xor eax,DWORD [60+esp] - xor ebp,edx - xor eax,DWORD [20+esp] - xor ebp,edi - xor eax,DWORD [40+esp] - rol eax,1 - add esi,ebp - ror ecx,2 - mov ebp,ebx - rol ebp,5 - mov DWORD [52+esp],eax - lea eax,[1859775393+esi*1+eax] - mov esi,DWORD [56+esp] - add eax,ebp - ; 20_39 30 - mov ebp,ebx - xor esi,DWORD [esp] - xor ebp,ecx - xor esi,DWORD [24+esp] - xor ebp,edx - xor esi,DWORD [44+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - mov DWORD [56+esp],esi - lea esi,[1859775393+edi*1+esi] - mov edi,DWORD [60+esp] - add esi,ebp - ; 20_39 31 - mov ebp,eax - xor edi,DWORD [4+esp] - xor ebp,ebx - xor edi,DWORD [28+esp] - xor ebp,ecx - xor edi,DWORD [48+esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - mov DWORD [60+esp],edi - lea edi,[1859775393+edx*1+edi] - mov edx,DWORD [esp] - add edi,ebp - ; 20_39 32 - mov ebp,esi - xor edx,DWORD [8+esp] - xor ebp,eax - xor edx,DWORD [32+esp] - xor ebp,ebx - xor edx,DWORD [52+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [esp],edx - lea edx,[1859775393+ecx*1+edx] - mov ecx,DWORD [4+esp] - add edx,ebp - ; 20_39 33 - mov ebp,edi - xor ecx,DWORD [12+esp] - xor ebp,esi - xor ecx,DWORD [36+esp] - xor ebp,eax - xor ecx,DWORD [56+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [4+esp],ecx - lea ecx,[1859775393+ebx*1+ecx] - mov ebx,DWORD [8+esp] - add ecx,ebp - ; 20_39 34 - mov ebp,edx - xor ebx,DWORD [16+esp] - xor ebp,edi - xor ebx,DWORD [40+esp] - xor ebp,esi - xor ebx,DWORD [60+esp] - rol ebx,1 - add eax,ebp - ror edx,2 - mov ebp,ecx - rol ebp,5 - mov DWORD [8+esp],ebx - lea ebx,[1859775393+eax*1+ebx] - mov eax,DWORD [12+esp] - add ebx,ebp - ; 20_39 35 - mov ebp,ecx - xor eax,DWORD [20+esp] - xor ebp,edx - xor eax,DWORD [44+esp] - xor ebp,edi - xor eax,DWORD [esp] - rol eax,1 - add esi,ebp - ror ecx,2 - mov ebp,ebx - rol ebp,5 - mov DWORD [12+esp],eax - lea eax,[1859775393+esi*1+eax] - mov esi,DWORD [16+esp] - add eax,ebp - ; 20_39 36 - mov ebp,ebx - xor esi,DWORD [24+esp] - xor ebp,ecx - xor esi,DWORD [48+esp] - xor ebp,edx - xor esi,DWORD [4+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - mov DWORD [16+esp],esi - lea esi,[1859775393+edi*1+esi] - mov edi,DWORD [20+esp] - add esi,ebp - ; 20_39 37 - mov ebp,eax - xor edi,DWORD [28+esp] - xor ebp,ebx - xor edi,DWORD [52+esp] - xor ebp,ecx - xor edi,DWORD [8+esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - mov DWORD [20+esp],edi - lea edi,[1859775393+edx*1+edi] - mov edx,DWORD [24+esp] - add edi,ebp - ; 20_39 38 - mov ebp,esi - xor edx,DWORD [32+esp] - xor ebp,eax - xor edx,DWORD [56+esp] - xor ebp,ebx - xor edx,DWORD [12+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [24+esp],edx - lea edx,[1859775393+ecx*1+edx] - mov ecx,DWORD [28+esp] - add edx,ebp - ; 20_39 39 - mov ebp,edi - xor ecx,DWORD [36+esp] - xor ebp,esi - xor ecx,DWORD [60+esp] - xor ebp,eax - xor ecx,DWORD [16+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [28+esp],ecx - lea ecx,[1859775393+ebx*1+ecx] - mov ebx,DWORD [32+esp] - add ecx,ebp - ; 40_59 40 - mov ebp,edi - xor ebx,DWORD [40+esp] - xor ebp,esi - xor ebx,DWORD [esp] - and ebp,edx - xor ebx,DWORD [20+esp] - rol ebx,1 - add ebp,eax - ror edx,2 - mov eax,ecx - rol eax,5 - mov DWORD [32+esp],ebx - lea ebx,[2400959708+ebp*1+ebx] - mov ebp,edi - add ebx,eax - and ebp,esi - mov eax,DWORD [36+esp] - add ebx,ebp - ; 40_59 41 - mov ebp,edx - xor eax,DWORD [44+esp] - xor ebp,edi - xor eax,DWORD [4+esp] - and ebp,ecx - xor eax,DWORD [24+esp] - rol eax,1 - add ebp,esi - ror ecx,2 - mov esi,ebx - rol esi,5 - mov DWORD [36+esp],eax - lea eax,[2400959708+ebp*1+eax] - mov ebp,edx - add eax,esi - and ebp,edi - mov esi,DWORD [40+esp] - add eax,ebp - ; 40_59 42 - mov ebp,ecx - xor esi,DWORD [48+esp] - xor ebp,edx - xor esi,DWORD [8+esp] - and ebp,ebx - xor esi,DWORD [28+esp] - rol esi,1 - add ebp,edi - ror ebx,2 - mov edi,eax - rol edi,5 - mov DWORD [40+esp],esi - lea esi,[2400959708+ebp*1+esi] - mov ebp,ecx - add esi,edi - and ebp,edx - mov edi,DWORD [44+esp] - add esi,ebp - ; 40_59 43 - mov ebp,ebx - xor edi,DWORD [52+esp] - xor ebp,ecx - xor edi,DWORD [12+esp] - and ebp,eax - xor edi,DWORD [32+esp] - rol edi,1 - add ebp,edx - ror eax,2 - mov edx,esi - rol edx,5 - mov DWORD [44+esp],edi - lea edi,[2400959708+ebp*1+edi] - mov ebp,ebx - add edi,edx - and ebp,ecx - mov edx,DWORD [48+esp] - add edi,ebp - ; 40_59 44 - mov ebp,eax - xor edx,DWORD [56+esp] - xor ebp,ebx - xor edx,DWORD [16+esp] - and ebp,esi - xor edx,DWORD [36+esp] - rol edx,1 - add ebp,ecx - ror esi,2 - mov ecx,edi - rol ecx,5 - mov DWORD [48+esp],edx - lea edx,[2400959708+ebp*1+edx] - mov ebp,eax - add edx,ecx - and ebp,ebx - mov ecx,DWORD [52+esp] - add edx,ebp - ; 40_59 45 - mov ebp,esi - xor ecx,DWORD [60+esp] - xor ebp,eax - xor ecx,DWORD [20+esp] - and ebp,edi - xor ecx,DWORD [40+esp] - rol ecx,1 - add ebp,ebx - ror edi,2 - mov ebx,edx - rol ebx,5 - mov DWORD [52+esp],ecx - lea ecx,[2400959708+ebp*1+ecx] - mov ebp,esi - add ecx,ebx - and ebp,eax - mov ebx,DWORD [56+esp] - add ecx,ebp - ; 40_59 46 - mov ebp,edi - xor ebx,DWORD [esp] - xor ebp,esi - xor ebx,DWORD [24+esp] - and ebp,edx - xor ebx,DWORD [44+esp] - rol ebx,1 - add ebp,eax - ror edx,2 - mov eax,ecx - rol eax,5 - mov DWORD [56+esp],ebx - lea ebx,[2400959708+ebp*1+ebx] - mov ebp,edi - add ebx,eax - and ebp,esi - mov eax,DWORD [60+esp] - add ebx,ebp - ; 40_59 47 - mov ebp,edx - xor eax,DWORD [4+esp] - xor ebp,edi - xor eax,DWORD [28+esp] - and ebp,ecx - xor eax,DWORD [48+esp] - rol eax,1 - add ebp,esi - ror ecx,2 - mov esi,ebx - rol esi,5 - mov DWORD [60+esp],eax - lea eax,[2400959708+ebp*1+eax] - mov ebp,edx - add eax,esi - and ebp,edi - mov esi,DWORD [esp] - add eax,ebp - ; 40_59 48 - mov ebp,ecx - xor esi,DWORD [8+esp] - xor ebp,edx - xor esi,DWORD [32+esp] - and ebp,ebx - xor esi,DWORD [52+esp] - rol esi,1 - add ebp,edi - ror ebx,2 - mov edi,eax - rol edi,5 - mov DWORD [esp],esi - lea esi,[2400959708+ebp*1+esi] - mov ebp,ecx - add esi,edi - and ebp,edx - mov edi,DWORD [4+esp] - add esi,ebp - ; 40_59 49 - mov ebp,ebx - xor edi,DWORD [12+esp] - xor ebp,ecx - xor edi,DWORD [36+esp] - and ebp,eax - xor edi,DWORD [56+esp] - rol edi,1 - add ebp,edx - ror eax,2 - mov edx,esi - rol edx,5 - mov DWORD [4+esp],edi - lea edi,[2400959708+ebp*1+edi] - mov ebp,ebx - add edi,edx - and ebp,ecx - mov edx,DWORD [8+esp] - add edi,ebp - ; 40_59 50 - mov ebp,eax - xor edx,DWORD [16+esp] - xor ebp,ebx - xor edx,DWORD [40+esp] - and ebp,esi - xor edx,DWORD [60+esp] - rol edx,1 - add ebp,ecx - ror esi,2 - mov ecx,edi - rol ecx,5 - mov DWORD [8+esp],edx - lea edx,[2400959708+ebp*1+edx] - mov ebp,eax - add edx,ecx - and ebp,ebx - mov ecx,DWORD [12+esp] - add edx,ebp - ; 40_59 51 - mov ebp,esi - xor ecx,DWORD [20+esp] - xor ebp,eax - xor ecx,DWORD [44+esp] - and ebp,edi - xor ecx,DWORD [esp] - rol ecx,1 - add ebp,ebx - ror edi,2 - mov ebx,edx - rol ebx,5 - mov DWORD [12+esp],ecx - lea ecx,[2400959708+ebp*1+ecx] - mov ebp,esi - add ecx,ebx - and ebp,eax - mov ebx,DWORD [16+esp] - add ecx,ebp - ; 40_59 52 - mov ebp,edi - xor ebx,DWORD [24+esp] - xor ebp,esi - xor ebx,DWORD [48+esp] - and ebp,edx - xor ebx,DWORD [4+esp] - rol ebx,1 - add ebp,eax - ror edx,2 - mov eax,ecx - rol eax,5 - mov DWORD [16+esp],ebx - lea ebx,[2400959708+ebp*1+ebx] - mov ebp,edi - add ebx,eax - and ebp,esi - mov eax,DWORD [20+esp] - add ebx,ebp - ; 40_59 53 - mov ebp,edx - xor eax,DWORD [28+esp] - xor ebp,edi - xor eax,DWORD [52+esp] - and ebp,ecx - xor eax,DWORD [8+esp] - rol eax,1 - add ebp,esi - ror ecx,2 - mov esi,ebx - rol esi,5 - mov DWORD [20+esp],eax - lea eax,[2400959708+ebp*1+eax] - mov ebp,edx - add eax,esi - and ebp,edi - mov esi,DWORD [24+esp] - add eax,ebp - ; 40_59 54 - mov ebp,ecx - xor esi,DWORD [32+esp] - xor ebp,edx - xor esi,DWORD [56+esp] - and ebp,ebx - xor esi,DWORD [12+esp] - rol esi,1 - add ebp,edi - ror ebx,2 - mov edi,eax - rol edi,5 - mov DWORD [24+esp],esi - lea esi,[2400959708+ebp*1+esi] - mov ebp,ecx - add esi,edi - and ebp,edx - mov edi,DWORD [28+esp] - add esi,ebp - ; 40_59 55 - mov ebp,ebx - xor edi,DWORD [36+esp] - xor ebp,ecx - xor edi,DWORD [60+esp] - and ebp,eax - xor edi,DWORD [16+esp] - rol edi,1 - add ebp,edx - ror eax,2 - mov edx,esi - rol edx,5 - mov DWORD [28+esp],edi - lea edi,[2400959708+ebp*1+edi] - mov ebp,ebx - add edi,edx - and ebp,ecx - mov edx,DWORD [32+esp] - add edi,ebp - ; 40_59 56 - mov ebp,eax - xor edx,DWORD [40+esp] - xor ebp,ebx - xor edx,DWORD [esp] - and ebp,esi - xor edx,DWORD [20+esp] - rol edx,1 - add ebp,ecx - ror esi,2 - mov ecx,edi - rol ecx,5 - mov DWORD [32+esp],edx - lea edx,[2400959708+ebp*1+edx] - mov ebp,eax - add edx,ecx - and ebp,ebx - mov ecx,DWORD [36+esp] - add edx,ebp - ; 40_59 57 - mov ebp,esi - xor ecx,DWORD [44+esp] - xor ebp,eax - xor ecx,DWORD [4+esp] - and ebp,edi - xor ecx,DWORD [24+esp] - rol ecx,1 - add ebp,ebx - ror edi,2 - mov ebx,edx - rol ebx,5 - mov DWORD [36+esp],ecx - lea ecx,[2400959708+ebp*1+ecx] - mov ebp,esi - add ecx,ebx - and ebp,eax - mov ebx,DWORD [40+esp] - add ecx,ebp - ; 40_59 58 - mov ebp,edi - xor ebx,DWORD [48+esp] - xor ebp,esi - xor ebx,DWORD [8+esp] - and ebp,edx - xor ebx,DWORD [28+esp] - rol ebx,1 - add ebp,eax - ror edx,2 - mov eax,ecx - rol eax,5 - mov DWORD [40+esp],ebx - lea ebx,[2400959708+ebp*1+ebx] - mov ebp,edi - add ebx,eax - and ebp,esi - mov eax,DWORD [44+esp] - add ebx,ebp - ; 40_59 59 - mov ebp,edx - xor eax,DWORD [52+esp] - xor ebp,edi - xor eax,DWORD [12+esp] - and ebp,ecx - xor eax,DWORD [32+esp] - rol eax,1 - add ebp,esi - ror ecx,2 - mov esi,ebx - rol esi,5 - mov DWORD [44+esp],eax - lea eax,[2400959708+ebp*1+eax] - mov ebp,edx - add eax,esi - and ebp,edi - mov esi,DWORD [48+esp] - add eax,ebp - ; 20_39 60 - mov ebp,ebx - xor esi,DWORD [56+esp] - xor ebp,ecx - xor esi,DWORD [16+esp] - xor ebp,edx - xor esi,DWORD [36+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - mov DWORD [48+esp],esi - lea esi,[3395469782+edi*1+esi] - mov edi,DWORD [52+esp] - add esi,ebp - ; 20_39 61 - mov ebp,eax - xor edi,DWORD [60+esp] - xor ebp,ebx - xor edi,DWORD [20+esp] - xor ebp,ecx - xor edi,DWORD [40+esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - mov DWORD [52+esp],edi - lea edi,[3395469782+edx*1+edi] - mov edx,DWORD [56+esp] - add edi,ebp - ; 20_39 62 - mov ebp,esi - xor edx,DWORD [esp] - xor ebp,eax - xor edx,DWORD [24+esp] - xor ebp,ebx - xor edx,DWORD [44+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [56+esp],edx - lea edx,[3395469782+ecx*1+edx] - mov ecx,DWORD [60+esp] - add edx,ebp - ; 20_39 63 - mov ebp,edi - xor ecx,DWORD [4+esp] - xor ebp,esi - xor ecx,DWORD [28+esp] - xor ebp,eax - xor ecx,DWORD [48+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [60+esp],ecx - lea ecx,[3395469782+ebx*1+ecx] - mov ebx,DWORD [esp] - add ecx,ebp - ; 20_39 64 - mov ebp,edx - xor ebx,DWORD [8+esp] - xor ebp,edi - xor ebx,DWORD [32+esp] - xor ebp,esi - xor ebx,DWORD [52+esp] - rol ebx,1 - add eax,ebp - ror edx,2 - mov ebp,ecx - rol ebp,5 - mov DWORD [esp],ebx - lea ebx,[3395469782+eax*1+ebx] - mov eax,DWORD [4+esp] - add ebx,ebp - ; 20_39 65 - mov ebp,ecx - xor eax,DWORD [12+esp] - xor ebp,edx - xor eax,DWORD [36+esp] - xor ebp,edi - xor eax,DWORD [56+esp] - rol eax,1 - add esi,ebp - ror ecx,2 - mov ebp,ebx - rol ebp,5 - mov DWORD [4+esp],eax - lea eax,[3395469782+esi*1+eax] - mov esi,DWORD [8+esp] - add eax,ebp - ; 20_39 66 - mov ebp,ebx - xor esi,DWORD [16+esp] - xor ebp,ecx - xor esi,DWORD [40+esp] - xor ebp,edx - xor esi,DWORD [60+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - mov DWORD [8+esp],esi - lea esi,[3395469782+edi*1+esi] - mov edi,DWORD [12+esp] - add esi,ebp - ; 20_39 67 - mov ebp,eax - xor edi,DWORD [20+esp] - xor ebp,ebx - xor edi,DWORD [44+esp] - xor ebp,ecx - xor edi,DWORD [esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - mov DWORD [12+esp],edi - lea edi,[3395469782+edx*1+edi] - mov edx,DWORD [16+esp] - add edi,ebp - ; 20_39 68 - mov ebp,esi - xor edx,DWORD [24+esp] - xor ebp,eax - xor edx,DWORD [48+esp] - xor ebp,ebx - xor edx,DWORD [4+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [16+esp],edx - lea edx,[3395469782+ecx*1+edx] - mov ecx,DWORD [20+esp] - add edx,ebp - ; 20_39 69 - mov ebp,edi - xor ecx,DWORD [28+esp] - xor ebp,esi - xor ecx,DWORD [52+esp] - xor ebp,eax - xor ecx,DWORD [8+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [20+esp],ecx - lea ecx,[3395469782+ebx*1+ecx] - mov ebx,DWORD [24+esp] - add ecx,ebp - ; 20_39 70 - mov ebp,edx - xor ebx,DWORD [32+esp] - xor ebp,edi - xor ebx,DWORD [56+esp] - xor ebp,esi - xor ebx,DWORD [12+esp] - rol ebx,1 - add eax,ebp - ror edx,2 - mov ebp,ecx - rol ebp,5 - mov DWORD [24+esp],ebx - lea ebx,[3395469782+eax*1+ebx] - mov eax,DWORD [28+esp] - add ebx,ebp - ; 20_39 71 - mov ebp,ecx - xor eax,DWORD [36+esp] - xor ebp,edx - xor eax,DWORD [60+esp] - xor ebp,edi - xor eax,DWORD [16+esp] - rol eax,1 - add esi,ebp - ror ecx,2 - mov ebp,ebx - rol ebp,5 - mov DWORD [28+esp],eax - lea eax,[3395469782+esi*1+eax] - mov esi,DWORD [32+esp] - add eax,ebp - ; 20_39 72 - mov ebp,ebx - xor esi,DWORD [40+esp] - xor ebp,ecx - xor esi,DWORD [esp] - xor ebp,edx - xor esi,DWORD [20+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - mov DWORD [32+esp],esi - lea esi,[3395469782+edi*1+esi] - mov edi,DWORD [36+esp] - add esi,ebp - ; 20_39 73 - mov ebp,eax - xor edi,DWORD [44+esp] - xor ebp,ebx - xor edi,DWORD [4+esp] - xor ebp,ecx - xor edi,DWORD [24+esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - mov DWORD [36+esp],edi - lea edi,[3395469782+edx*1+edi] - mov edx,DWORD [40+esp] - add edi,ebp - ; 20_39 74 - mov ebp,esi - xor edx,DWORD [48+esp] - xor ebp,eax - xor edx,DWORD [8+esp] - xor ebp,ebx - xor edx,DWORD [28+esp] - rol edx,1 - add ecx,ebp - ror esi,2 - mov ebp,edi - rol ebp,5 - mov DWORD [40+esp],edx - lea edx,[3395469782+ecx*1+edx] - mov ecx,DWORD [44+esp] - add edx,ebp - ; 20_39 75 - mov ebp,edi - xor ecx,DWORD [52+esp] - xor ebp,esi - xor ecx,DWORD [12+esp] - xor ebp,eax - xor ecx,DWORD [32+esp] - rol ecx,1 - add ebx,ebp - ror edi,2 - mov ebp,edx - rol ebp,5 - mov DWORD [44+esp],ecx - lea ecx,[3395469782+ebx*1+ecx] - mov ebx,DWORD [48+esp] - add ecx,ebp - ; 20_39 76 - mov ebp,edx - xor ebx,DWORD [56+esp] - xor ebp,edi - xor ebx,DWORD [16+esp] - xor ebp,esi - xor ebx,DWORD [36+esp] - rol ebx,1 - add eax,ebp - ror edx,2 - mov ebp,ecx - rol ebp,5 - mov DWORD [48+esp],ebx - lea ebx,[3395469782+eax*1+ebx] - mov eax,DWORD [52+esp] - add ebx,ebp - ; 20_39 77 - mov ebp,ecx - xor eax,DWORD [60+esp] - xor ebp,edx - xor eax,DWORD [20+esp] - xor ebp,edi - xor eax,DWORD [40+esp] - rol eax,1 - add esi,ebp - ror ecx,2 - mov ebp,ebx - rol ebp,5 - lea eax,[3395469782+esi*1+eax] - mov esi,DWORD [56+esp] - add eax,ebp - ; 20_39 78 - mov ebp,ebx - xor esi,DWORD [esp] - xor ebp,ecx - xor esi,DWORD [24+esp] - xor ebp,edx - xor esi,DWORD [44+esp] - rol esi,1 - add edi,ebp - ror ebx,2 - mov ebp,eax - rol ebp,5 - lea esi,[3395469782+edi*1+esi] - mov edi,DWORD [60+esp] - add esi,ebp - ; 20_39 79 - mov ebp,eax - xor edi,DWORD [4+esp] - xor ebp,ebx - xor edi,DWORD [28+esp] - xor ebp,ecx - xor edi,DWORD [48+esp] - rol edi,1 - add edx,ebp - ror eax,2 - mov ebp,esi - rol ebp,5 - lea edi,[3395469782+edx*1+edi] - add edi,ebp - mov ebp,DWORD [96+esp] - mov edx,DWORD [100+esp] - add edi,DWORD [ebp] - add esi,DWORD [4+ebp] - add eax,DWORD [8+ebp] - add ebx,DWORD [12+ebp] - add ecx,DWORD [16+ebp] - mov DWORD [ebp],edi - add edx,64 - mov DWORD [4+ebp],esi - cmp edx,DWORD [104+esp] - mov DWORD [8+ebp],eax - mov edi,ecx - mov DWORD [12+ebp],ebx - mov esi,edx - mov DWORD [16+ebp],ecx - jb NEAR L$002loop - add esp,76 - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -__sha1_block_data_order_ssse3: - push ebp - push ebx - push esi - push edi - call L$003pic_point -L$003pic_point: - pop ebp - lea ebp,[(L$K_XX_XX-L$003pic_point)+ebp] -L$ssse3_shortcut: - movdqa xmm7,[ebp] - movdqa xmm0,[16+ebp] - movdqa xmm1,[32+ebp] - movdqa xmm2,[48+ebp] - movdqa xmm6,[64+ebp] - mov edi,DWORD [20+esp] - mov ebp,DWORD [24+esp] - mov edx,DWORD [28+esp] - mov esi,esp - sub esp,208 - and esp,-64 - movdqa [112+esp],xmm0 - movdqa [128+esp],xmm1 - movdqa [144+esp],xmm2 - shl edx,6 - movdqa [160+esp],xmm7 - add edx,ebp - movdqa [176+esp],xmm6 - add ebp,64 - mov DWORD [192+esp],edi - mov DWORD [196+esp],ebp - mov DWORD [200+esp],edx - mov DWORD [204+esp],esi - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - mov edi,DWORD [16+edi] - mov esi,ebx - movdqu xmm0,[ebp-64] - movdqu xmm1,[ebp-48] - movdqu xmm2,[ebp-32] - movdqu xmm3,[ebp-16] -db 102,15,56,0,198 -db 102,15,56,0,206 -db 102,15,56,0,214 - movdqa [96+esp],xmm7 -db 102,15,56,0,222 - paddd xmm0,xmm7 - paddd xmm1,xmm7 - paddd xmm2,xmm7 - movdqa [esp],xmm0 - psubd xmm0,xmm7 - movdqa [16+esp],xmm1 - psubd xmm1,xmm7 - movdqa [32+esp],xmm2 - mov ebp,ecx - psubd xmm2,xmm7 - xor ebp,edx - pshufd xmm4,xmm0,238 - and esi,ebp - jmp NEAR L$004loop -align 16 -L$004loop: - ror ebx,2 - xor esi,edx - mov ebp,eax - punpcklqdq xmm4,xmm1 - movdqa xmm6,xmm3 - add edi,DWORD [esp] - xor ebx,ecx - paddd xmm7,xmm3 - movdqa [64+esp],xmm0 - rol eax,5 - add edi,esi - psrldq xmm6,4 - and ebp,ebx - xor ebx,ecx - pxor xmm4,xmm0 - add edi,eax - ror eax,7 - pxor xmm6,xmm2 - xor ebp,ecx - mov esi,edi - add edx,DWORD [4+esp] - pxor xmm4,xmm6 - xor eax,ebx - rol edi,5 - movdqa [48+esp],xmm7 - add edx,ebp - and esi,eax - movdqa xmm0,xmm4 - xor eax,ebx - add edx,edi - ror edi,7 - movdqa xmm6,xmm4 - xor esi,ebx - pslldq xmm0,12 - paddd xmm4,xmm4 - mov ebp,edx - add ecx,DWORD [8+esp] - psrld xmm6,31 - xor edi,eax - rol edx,5 - movdqa xmm7,xmm0 - add ecx,esi - and ebp,edi - xor edi,eax - psrld xmm0,30 - add ecx,edx - ror edx,7 - por xmm4,xmm6 - xor ebp,eax - mov esi,ecx - add ebx,DWORD [12+esp] - pslld xmm7,2 - xor edx,edi - rol ecx,5 - pxor xmm4,xmm0 - movdqa xmm0,[96+esp] - add ebx,ebp - and esi,edx - pxor xmm4,xmm7 - pshufd xmm5,xmm1,238 - xor edx,edi - add ebx,ecx - ror ecx,7 - xor esi,edi - mov ebp,ebx - punpcklqdq xmm5,xmm2 - movdqa xmm7,xmm4 - add eax,DWORD [16+esp] - xor ecx,edx - paddd xmm0,xmm4 - movdqa [80+esp],xmm1 - rol ebx,5 - add eax,esi - psrldq xmm7,4 - and ebp,ecx - xor ecx,edx - pxor xmm5,xmm1 - add eax,ebx - ror ebx,7 - pxor xmm7,xmm3 - xor ebp,edx - mov esi,eax - add edi,DWORD [20+esp] - pxor xmm5,xmm7 - xor ebx,ecx - rol eax,5 - movdqa [esp],xmm0 - add edi,ebp - and esi,ebx - movdqa xmm1,xmm5 - xor ebx,ecx - add edi,eax - ror eax,7 - movdqa xmm7,xmm5 - xor esi,ecx - pslldq xmm1,12 - paddd xmm5,xmm5 - mov ebp,edi - add edx,DWORD [24+esp] - psrld xmm7,31 - xor eax,ebx - rol edi,5 - movdqa xmm0,xmm1 - add edx,esi - and ebp,eax - xor eax,ebx - psrld xmm1,30 - add edx,edi - ror edi,7 - por xmm5,xmm7 - xor ebp,ebx - mov esi,edx - add ecx,DWORD [28+esp] - pslld xmm0,2 - xor edi,eax - rol edx,5 - pxor xmm5,xmm1 - movdqa xmm1,[112+esp] - add ecx,ebp - and esi,edi - pxor xmm5,xmm0 - pshufd xmm6,xmm2,238 - xor edi,eax - add ecx,edx - ror edx,7 - xor esi,eax - mov ebp,ecx - punpcklqdq xmm6,xmm3 - movdqa xmm0,xmm5 - add ebx,DWORD [32+esp] - xor edx,edi - paddd xmm1,xmm5 - movdqa [96+esp],xmm2 - rol ecx,5 - add ebx,esi - psrldq xmm0,4 - and ebp,edx - xor edx,edi - pxor xmm6,xmm2 - add ebx,ecx - ror ecx,7 - pxor xmm0,xmm4 - xor ebp,edi - mov esi,ebx - add eax,DWORD [36+esp] - pxor xmm6,xmm0 - xor ecx,edx - rol ebx,5 - movdqa [16+esp],xmm1 - add eax,ebp - and esi,ecx - movdqa xmm2,xmm6 - xor ecx,edx - add eax,ebx - ror ebx,7 - movdqa xmm0,xmm6 - xor esi,edx - pslldq xmm2,12 - paddd xmm6,xmm6 - mov ebp,eax - add edi,DWORD [40+esp] - psrld xmm0,31 - xor ebx,ecx - rol eax,5 - movdqa xmm1,xmm2 - add edi,esi - and ebp,ebx - xor ebx,ecx - psrld xmm2,30 - add edi,eax - ror eax,7 - por xmm6,xmm0 - xor ebp,ecx - movdqa xmm0,[64+esp] - mov esi,edi - add edx,DWORD [44+esp] - pslld xmm1,2 - xor eax,ebx - rol edi,5 - pxor xmm6,xmm2 - movdqa xmm2,[112+esp] - add edx,ebp - and esi,eax - pxor xmm6,xmm1 - pshufd xmm7,xmm3,238 - xor eax,ebx - add edx,edi - ror edi,7 - xor esi,ebx - mov ebp,edx - punpcklqdq xmm7,xmm4 - movdqa xmm1,xmm6 - add ecx,DWORD [48+esp] - xor edi,eax - paddd xmm2,xmm6 - movdqa [64+esp],xmm3 - rol edx,5 - add ecx,esi - psrldq xmm1,4 - and ebp,edi - xor edi,eax - pxor xmm7,xmm3 - add ecx,edx - ror edx,7 - pxor xmm1,xmm5 - xor ebp,eax - mov esi,ecx - add ebx,DWORD [52+esp] - pxor xmm7,xmm1 - xor edx,edi - rol ecx,5 - movdqa [32+esp],xmm2 - add ebx,ebp - and esi,edx - movdqa xmm3,xmm7 - xor edx,edi - add ebx,ecx - ror ecx,7 - movdqa xmm1,xmm7 - xor esi,edi - pslldq xmm3,12 - paddd xmm7,xmm7 - mov ebp,ebx - add eax,DWORD [56+esp] - psrld xmm1,31 - xor ecx,edx - rol ebx,5 - movdqa xmm2,xmm3 - add eax,esi - and ebp,ecx - xor ecx,edx - psrld xmm3,30 - add eax,ebx - ror ebx,7 - por xmm7,xmm1 - xor ebp,edx - movdqa xmm1,[80+esp] - mov esi,eax - add edi,DWORD [60+esp] - pslld xmm2,2 - xor ebx,ecx - rol eax,5 - pxor xmm7,xmm3 - movdqa xmm3,[112+esp] - add edi,ebp - and esi,ebx - pxor xmm7,xmm2 - pshufd xmm2,xmm6,238 - xor ebx,ecx - add edi,eax - ror eax,7 - pxor xmm0,xmm4 - punpcklqdq xmm2,xmm7 - xor esi,ecx - mov ebp,edi - add edx,DWORD [esp] - pxor xmm0,xmm1 - movdqa [80+esp],xmm4 - xor eax,ebx - rol edi,5 - movdqa xmm4,xmm3 - add edx,esi - paddd xmm3,xmm7 - and ebp,eax - pxor xmm0,xmm2 - xor eax,ebx - add edx,edi - ror edi,7 - xor ebp,ebx - movdqa xmm2,xmm0 - movdqa [48+esp],xmm3 - mov esi,edx - add ecx,DWORD [4+esp] - xor edi,eax - rol edx,5 - pslld xmm0,2 - add ecx,ebp - and esi,edi - psrld xmm2,30 - xor edi,eax - add ecx,edx - ror edx,7 - xor esi,eax - mov ebp,ecx - add ebx,DWORD [8+esp] - xor edx,edi - rol ecx,5 - por xmm0,xmm2 - add ebx,esi - and ebp,edx - movdqa xmm2,[96+esp] - xor edx,edi - add ebx,ecx - add eax,DWORD [12+esp] - xor ebp,edi - mov esi,ebx - pshufd xmm3,xmm7,238 - rol ebx,5 - add eax,ebp - xor esi,edx - ror ecx,7 - add eax,ebx - add edi,DWORD [16+esp] - pxor xmm1,xmm5 - punpcklqdq xmm3,xmm0 - xor esi,ecx - mov ebp,eax - rol eax,5 - pxor xmm1,xmm2 - movdqa [96+esp],xmm5 - add edi,esi - xor ebp,ecx - movdqa xmm5,xmm4 - ror ebx,7 - paddd xmm4,xmm0 - add edi,eax - pxor xmm1,xmm3 - add edx,DWORD [20+esp] - xor ebp,ebx - mov esi,edi - rol edi,5 - movdqa xmm3,xmm1 - movdqa [esp],xmm4 - add edx,ebp - xor esi,ebx - ror eax,7 - add edx,edi - pslld xmm1,2 - add ecx,DWORD [24+esp] - xor esi,eax - psrld xmm3,30 - mov ebp,edx - rol edx,5 - add ecx,esi - xor ebp,eax - ror edi,7 - add ecx,edx - por xmm1,xmm3 - add ebx,DWORD [28+esp] - xor ebp,edi - movdqa xmm3,[64+esp] - mov esi,ecx - rol ecx,5 - add ebx,ebp - xor esi,edi - ror edx,7 - pshufd xmm4,xmm0,238 - add ebx,ecx - add eax,DWORD [32+esp] - pxor xmm2,xmm6 - punpcklqdq xmm4,xmm1 - xor esi,edx - mov ebp,ebx - rol ebx,5 - pxor xmm2,xmm3 - movdqa [64+esp],xmm6 - add eax,esi - xor ebp,edx - movdqa xmm6,[128+esp] - ror ecx,7 - paddd xmm5,xmm1 - add eax,ebx - pxor xmm2,xmm4 - add edi,DWORD [36+esp] - xor ebp,ecx - mov esi,eax - rol eax,5 - movdqa xmm4,xmm2 - movdqa [16+esp],xmm5 - add edi,ebp - xor esi,ecx - ror ebx,7 - add edi,eax - pslld xmm2,2 - add edx,DWORD [40+esp] - xor esi,ebx - psrld xmm4,30 - mov ebp,edi - rol edi,5 - add edx,esi - xor ebp,ebx - ror eax,7 - add edx,edi - por xmm2,xmm4 - add ecx,DWORD [44+esp] - xor ebp,eax - movdqa xmm4,[80+esp] - mov esi,edx - rol edx,5 - add ecx,ebp - xor esi,eax - ror edi,7 - pshufd xmm5,xmm1,238 - add ecx,edx - add ebx,DWORD [48+esp] - pxor xmm3,xmm7 - punpcklqdq xmm5,xmm2 - xor esi,edi - mov ebp,ecx - rol ecx,5 - pxor xmm3,xmm4 - movdqa [80+esp],xmm7 - add ebx,esi - xor ebp,edi - movdqa xmm7,xmm6 - ror edx,7 - paddd xmm6,xmm2 - add ebx,ecx - pxor xmm3,xmm5 - add eax,DWORD [52+esp] - xor ebp,edx - mov esi,ebx - rol ebx,5 - movdqa xmm5,xmm3 - movdqa [32+esp],xmm6 - add eax,ebp - xor esi,edx - ror ecx,7 - add eax,ebx - pslld xmm3,2 - add edi,DWORD [56+esp] - xor esi,ecx - psrld xmm5,30 - mov ebp,eax - rol eax,5 - add edi,esi - xor ebp,ecx - ror ebx,7 - add edi,eax - por xmm3,xmm5 - add edx,DWORD [60+esp] - xor ebp,ebx - movdqa xmm5,[96+esp] - mov esi,edi - rol edi,5 - add edx,ebp - xor esi,ebx - ror eax,7 - pshufd xmm6,xmm2,238 - add edx,edi - add ecx,DWORD [esp] - pxor xmm4,xmm0 - punpcklqdq xmm6,xmm3 - xor esi,eax - mov ebp,edx - rol edx,5 - pxor xmm4,xmm5 - movdqa [96+esp],xmm0 - add ecx,esi - xor ebp,eax - movdqa xmm0,xmm7 - ror edi,7 - paddd xmm7,xmm3 - add ecx,edx - pxor xmm4,xmm6 - add ebx,DWORD [4+esp] - xor ebp,edi - mov esi,ecx - rol ecx,5 - movdqa xmm6,xmm4 - movdqa [48+esp],xmm7 - add ebx,ebp - xor esi,edi - ror edx,7 - add ebx,ecx - pslld xmm4,2 - add eax,DWORD [8+esp] - xor esi,edx - psrld xmm6,30 - mov ebp,ebx - rol ebx,5 - add eax,esi - xor ebp,edx - ror ecx,7 - add eax,ebx - por xmm4,xmm6 - add edi,DWORD [12+esp] - xor ebp,ecx - movdqa xmm6,[64+esp] - mov esi,eax - rol eax,5 - add edi,ebp - xor esi,ecx - ror ebx,7 - pshufd xmm7,xmm3,238 - add edi,eax - add edx,DWORD [16+esp] - pxor xmm5,xmm1 - punpcklqdq xmm7,xmm4 - xor esi,ebx - mov ebp,edi - rol edi,5 - pxor xmm5,xmm6 - movdqa [64+esp],xmm1 - add edx,esi - xor ebp,ebx - movdqa xmm1,xmm0 - ror eax,7 - paddd xmm0,xmm4 - add edx,edi - pxor xmm5,xmm7 - add ecx,DWORD [20+esp] - xor ebp,eax - mov esi,edx - rol edx,5 - movdqa xmm7,xmm5 - movdqa [esp],xmm0 - add ecx,ebp - xor esi,eax - ror edi,7 - add ecx,edx - pslld xmm5,2 - add ebx,DWORD [24+esp] - xor esi,edi - psrld xmm7,30 - mov ebp,ecx - rol ecx,5 - add ebx,esi - xor ebp,edi - ror edx,7 - add ebx,ecx - por xmm5,xmm7 - add eax,DWORD [28+esp] - movdqa xmm7,[80+esp] - ror ecx,7 - mov esi,ebx - xor ebp,edx - rol ebx,5 - pshufd xmm0,xmm4,238 - add eax,ebp - xor esi,ecx - xor ecx,edx - add eax,ebx - add edi,DWORD [32+esp] - pxor xmm6,xmm2 - punpcklqdq xmm0,xmm5 - and esi,ecx - xor ecx,edx - ror ebx,7 - pxor xmm6,xmm7 - movdqa [80+esp],xmm2 - mov ebp,eax - xor esi,ecx - rol eax,5 - movdqa xmm2,xmm1 - add edi,esi - paddd xmm1,xmm5 - xor ebp,ebx - pxor xmm6,xmm0 - xor ebx,ecx - add edi,eax - add edx,DWORD [36+esp] - and ebp,ebx - movdqa xmm0,xmm6 - movdqa [16+esp],xmm1 - xor ebx,ecx - ror eax,7 - mov esi,edi - xor ebp,ebx - rol edi,5 - pslld xmm6,2 - add edx,ebp - xor esi,eax - psrld xmm0,30 - xor eax,ebx - add edx,edi - add ecx,DWORD [40+esp] - and esi,eax - xor eax,ebx - ror edi,7 - por xmm6,xmm0 - mov ebp,edx - xor esi,eax - movdqa xmm0,[96+esp] - rol edx,5 - add ecx,esi - xor ebp,edi - xor edi,eax - add ecx,edx - pshufd xmm1,xmm5,238 - add ebx,DWORD [44+esp] - and ebp,edi - xor edi,eax - ror edx,7 - mov esi,ecx - xor ebp,edi - rol ecx,5 - add ebx,ebp - xor esi,edx - xor edx,edi - add ebx,ecx - add eax,DWORD [48+esp] - pxor xmm7,xmm3 - punpcklqdq xmm1,xmm6 - and esi,edx - xor edx,edi - ror ecx,7 - pxor xmm7,xmm0 - movdqa [96+esp],xmm3 - mov ebp,ebx - xor esi,edx - rol ebx,5 - movdqa xmm3,[144+esp] - add eax,esi - paddd xmm2,xmm6 - xor ebp,ecx - pxor xmm7,xmm1 - xor ecx,edx - add eax,ebx - add edi,DWORD [52+esp] - and ebp,ecx - movdqa xmm1,xmm7 - movdqa [32+esp],xmm2 - xor ecx,edx - ror ebx,7 - mov esi,eax - xor ebp,ecx - rol eax,5 - pslld xmm7,2 - add edi,ebp - xor esi,ebx - psrld xmm1,30 - xor ebx,ecx - add edi,eax - add edx,DWORD [56+esp] - and esi,ebx - xor ebx,ecx - ror eax,7 - por xmm7,xmm1 - mov ebp,edi - xor esi,ebx - movdqa xmm1,[64+esp] - rol edi,5 - add edx,esi - xor ebp,eax - xor eax,ebx - add edx,edi - pshufd xmm2,xmm6,238 - add ecx,DWORD [60+esp] - and ebp,eax - xor eax,ebx - ror edi,7 - mov esi,edx - xor ebp,eax - rol edx,5 - add ecx,ebp - xor esi,edi - xor edi,eax - add ecx,edx - add ebx,DWORD [esp] - pxor xmm0,xmm4 - punpcklqdq xmm2,xmm7 - and esi,edi - xor edi,eax - ror edx,7 - pxor xmm0,xmm1 - movdqa [64+esp],xmm4 - mov ebp,ecx - xor esi,edi - rol ecx,5 - movdqa xmm4,xmm3 - add ebx,esi - paddd xmm3,xmm7 - xor ebp,edx - pxor xmm0,xmm2 - xor edx,edi - add ebx,ecx - add eax,DWORD [4+esp] - and ebp,edx - movdqa xmm2,xmm0 - movdqa [48+esp],xmm3 - xor edx,edi - ror ecx,7 - mov esi,ebx - xor ebp,edx - rol ebx,5 - pslld xmm0,2 - add eax,ebp - xor esi,ecx - psrld xmm2,30 - xor ecx,edx - add eax,ebx - add edi,DWORD [8+esp] - and esi,ecx - xor ecx,edx - ror ebx,7 - por xmm0,xmm2 - mov ebp,eax - xor esi,ecx - movdqa xmm2,[80+esp] - rol eax,5 - add edi,esi - xor ebp,ebx - xor ebx,ecx - add edi,eax - pshufd xmm3,xmm7,238 - add edx,DWORD [12+esp] - and ebp,ebx - xor ebx,ecx - ror eax,7 - mov esi,edi - xor ebp,ebx - rol edi,5 - add edx,ebp - xor esi,eax - xor eax,ebx - add edx,edi - add ecx,DWORD [16+esp] - pxor xmm1,xmm5 - punpcklqdq xmm3,xmm0 - and esi,eax - xor eax,ebx - ror edi,7 - pxor xmm1,xmm2 - movdqa [80+esp],xmm5 - mov ebp,edx - xor esi,eax - rol edx,5 - movdqa xmm5,xmm4 - add ecx,esi - paddd xmm4,xmm0 - xor ebp,edi - pxor xmm1,xmm3 - xor edi,eax - add ecx,edx - add ebx,DWORD [20+esp] - and ebp,edi - movdqa xmm3,xmm1 - movdqa [esp],xmm4 - xor edi,eax - ror edx,7 - mov esi,ecx - xor ebp,edi - rol ecx,5 - pslld xmm1,2 - add ebx,ebp - xor esi,edx - psrld xmm3,30 - xor edx,edi - add ebx,ecx - add eax,DWORD [24+esp] - and esi,edx - xor edx,edi - ror ecx,7 - por xmm1,xmm3 - mov ebp,ebx - xor esi,edx - movdqa xmm3,[96+esp] - rol ebx,5 - add eax,esi - xor ebp,ecx - xor ecx,edx - add eax,ebx - pshufd xmm4,xmm0,238 - add edi,DWORD [28+esp] - and ebp,ecx - xor ecx,edx - ror ebx,7 - mov esi,eax - xor ebp,ecx - rol eax,5 - add edi,ebp - xor esi,ebx - xor ebx,ecx - add edi,eax - add edx,DWORD [32+esp] - pxor xmm2,xmm6 - punpcklqdq xmm4,xmm1 - and esi,ebx - xor ebx,ecx - ror eax,7 - pxor xmm2,xmm3 - movdqa [96+esp],xmm6 - mov ebp,edi - xor esi,ebx - rol edi,5 - movdqa xmm6,xmm5 - add edx,esi - paddd xmm5,xmm1 - xor ebp,eax - pxor xmm2,xmm4 - xor eax,ebx - add edx,edi - add ecx,DWORD [36+esp] - and ebp,eax - movdqa xmm4,xmm2 - movdqa [16+esp],xmm5 - xor eax,ebx - ror edi,7 - mov esi,edx - xor ebp,eax - rol edx,5 - pslld xmm2,2 - add ecx,ebp - xor esi,edi - psrld xmm4,30 - xor edi,eax - add ecx,edx - add ebx,DWORD [40+esp] - and esi,edi - xor edi,eax - ror edx,7 - por xmm2,xmm4 - mov ebp,ecx - xor esi,edi - movdqa xmm4,[64+esp] - rol ecx,5 - add ebx,esi - xor ebp,edx - xor edx,edi - add ebx,ecx - pshufd xmm5,xmm1,238 - add eax,DWORD [44+esp] - and ebp,edx - xor edx,edi - ror ecx,7 - mov esi,ebx - xor ebp,edx - rol ebx,5 - add eax,ebp - xor esi,edx - add eax,ebx - add edi,DWORD [48+esp] - pxor xmm3,xmm7 - punpcklqdq xmm5,xmm2 - xor esi,ecx - mov ebp,eax - rol eax,5 - pxor xmm3,xmm4 - movdqa [64+esp],xmm7 - add edi,esi - xor ebp,ecx - movdqa xmm7,xmm6 - ror ebx,7 - paddd xmm6,xmm2 - add edi,eax - pxor xmm3,xmm5 - add edx,DWORD [52+esp] - xor ebp,ebx - mov esi,edi - rol edi,5 - movdqa xmm5,xmm3 - movdqa [32+esp],xmm6 - add edx,ebp - xor esi,ebx - ror eax,7 - add edx,edi - pslld xmm3,2 - add ecx,DWORD [56+esp] - xor esi,eax - psrld xmm5,30 - mov ebp,edx - rol edx,5 - add ecx,esi - xor ebp,eax - ror edi,7 - add ecx,edx - por xmm3,xmm5 - add ebx,DWORD [60+esp] - xor ebp,edi - mov esi,ecx - rol ecx,5 - add ebx,ebp - xor esi,edi - ror edx,7 - add ebx,ecx - add eax,DWORD [esp] - xor esi,edx - mov ebp,ebx - rol ebx,5 - add eax,esi - xor ebp,edx - ror ecx,7 - paddd xmm7,xmm3 - add eax,ebx - add edi,DWORD [4+esp] - xor ebp,ecx - mov esi,eax - movdqa [48+esp],xmm7 - rol eax,5 - add edi,ebp - xor esi,ecx - ror ebx,7 - add edi,eax - add edx,DWORD [8+esp] - xor esi,ebx - mov ebp,edi - rol edi,5 - add edx,esi - xor ebp,ebx - ror eax,7 - add edx,edi - add ecx,DWORD [12+esp] - xor ebp,eax - mov esi,edx - rol edx,5 - add ecx,ebp - xor esi,eax - ror edi,7 - add ecx,edx - mov ebp,DWORD [196+esp] - cmp ebp,DWORD [200+esp] - je NEAR L$005done - movdqa xmm7,[160+esp] - movdqa xmm6,[176+esp] - movdqu xmm0,[ebp] - movdqu xmm1,[16+ebp] - movdqu xmm2,[32+ebp] - movdqu xmm3,[48+ebp] - add ebp,64 -db 102,15,56,0,198 - mov DWORD [196+esp],ebp - movdqa [96+esp],xmm7 - add ebx,DWORD [16+esp] - xor esi,edi - mov ebp,ecx - rol ecx,5 - add ebx,esi - xor ebp,edi - ror edx,7 -db 102,15,56,0,206 - add ebx,ecx - add eax,DWORD [20+esp] - xor ebp,edx - mov esi,ebx - paddd xmm0,xmm7 - rol ebx,5 - add eax,ebp - xor esi,edx - ror ecx,7 - movdqa [esp],xmm0 - add eax,ebx - add edi,DWORD [24+esp] - xor esi,ecx - mov ebp,eax - psubd xmm0,xmm7 - rol eax,5 - add edi,esi - xor ebp,ecx - ror ebx,7 - add edi,eax - add edx,DWORD [28+esp] - xor ebp,ebx - mov esi,edi - rol edi,5 - add edx,ebp - xor esi,ebx - ror eax,7 - add edx,edi - add ecx,DWORD [32+esp] - xor esi,eax - mov ebp,edx - rol edx,5 - add ecx,esi - xor ebp,eax - ror edi,7 -db 102,15,56,0,214 - add ecx,edx - add ebx,DWORD [36+esp] - xor ebp,edi - mov esi,ecx - paddd xmm1,xmm7 - rol ecx,5 - add ebx,ebp - xor esi,edi - ror edx,7 - movdqa [16+esp],xmm1 - add ebx,ecx - add eax,DWORD [40+esp] - xor esi,edx - mov ebp,ebx - psubd xmm1,xmm7 - rol ebx,5 - add eax,esi - xor ebp,edx - ror ecx,7 - add eax,ebx - add edi,DWORD [44+esp] - xor ebp,ecx - mov esi,eax - rol eax,5 - add edi,ebp - xor esi,ecx - ror ebx,7 - add edi,eax - add edx,DWORD [48+esp] - xor esi,ebx - mov ebp,edi - rol edi,5 - add edx,esi - xor ebp,ebx - ror eax,7 -db 102,15,56,0,222 - add edx,edi - add ecx,DWORD [52+esp] - xor ebp,eax - mov esi,edx - paddd xmm2,xmm7 - rol edx,5 - add ecx,ebp - xor esi,eax - ror edi,7 - movdqa [32+esp],xmm2 - add ecx,edx - add ebx,DWORD [56+esp] - xor esi,edi - mov ebp,ecx - psubd xmm2,xmm7 - rol ecx,5 - add ebx,esi - xor ebp,edi - ror edx,7 - add ebx,ecx - add eax,DWORD [60+esp] - xor ebp,edx - mov esi,ebx - rol ebx,5 - add eax,ebp - ror ecx,7 - add eax,ebx - mov ebp,DWORD [192+esp] - add eax,DWORD [ebp] - add esi,DWORD [4+ebp] - add ecx,DWORD [8+ebp] - mov DWORD [ebp],eax - add edx,DWORD [12+ebp] - mov DWORD [4+ebp],esi - add edi,DWORD [16+ebp] - mov DWORD [8+ebp],ecx - mov ebx,ecx - mov DWORD [12+ebp],edx - xor ebx,edx - mov DWORD [16+ebp],edi - mov ebp,esi - pshufd xmm4,xmm0,238 - and esi,ebx - mov ebx,ebp - jmp NEAR L$004loop -align 16 -L$005done: - add ebx,DWORD [16+esp] - xor esi,edi - mov ebp,ecx - rol ecx,5 - add ebx,esi - xor ebp,edi - ror edx,7 - add ebx,ecx - add eax,DWORD [20+esp] - xor ebp,edx - mov esi,ebx - rol ebx,5 - add eax,ebp - xor esi,edx - ror ecx,7 - add eax,ebx - add edi,DWORD [24+esp] - xor esi,ecx - mov ebp,eax - rol eax,5 - add edi,esi - xor ebp,ecx - ror ebx,7 - add edi,eax - add edx,DWORD [28+esp] - xor ebp,ebx - mov esi,edi - rol edi,5 - add edx,ebp - xor esi,ebx - ror eax,7 - add edx,edi - add ecx,DWORD [32+esp] - xor esi,eax - mov ebp,edx - rol edx,5 - add ecx,esi - xor ebp,eax - ror edi,7 - add ecx,edx - add ebx,DWORD [36+esp] - xor ebp,edi - mov esi,ecx - rol ecx,5 - add ebx,ebp - xor esi,edi - ror edx,7 - add ebx,ecx - add eax,DWORD [40+esp] - xor esi,edx - mov ebp,ebx - rol ebx,5 - add eax,esi - xor ebp,edx - ror ecx,7 - add eax,ebx - add edi,DWORD [44+esp] - xor ebp,ecx - mov esi,eax - rol eax,5 - add edi,ebp - xor esi,ecx - ror ebx,7 - add edi,eax - add edx,DWORD [48+esp] - xor esi,ebx - mov ebp,edi - rol edi,5 - add edx,esi - xor ebp,ebx - ror eax,7 - add edx,edi - add ecx,DWORD [52+esp] - xor ebp,eax - mov esi,edx - rol edx,5 - add ecx,ebp - xor esi,eax - ror edi,7 - add ecx,edx - add ebx,DWORD [56+esp] - xor esi,edi - mov ebp,ecx - rol ecx,5 - add ebx,esi - xor ebp,edi - ror edx,7 - add ebx,ecx - add eax,DWORD [60+esp] - xor ebp,edx - mov esi,ebx - rol ebx,5 - add eax,ebp - ror ecx,7 - add eax,ebx - mov ebp,DWORD [192+esp] - add eax,DWORD [ebp] - mov esp,DWORD [204+esp] - add esi,DWORD [4+ebp] - add ecx,DWORD [8+ebp] - mov DWORD [ebp],eax - add edx,DWORD [12+ebp] - mov DWORD [4+ebp],esi - add edi,DWORD [16+ebp] - mov DWORD [8+ebp],ecx - mov DWORD [12+ebp],edx - mov DWORD [16+ebp],edi - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -__sha1_block_data_order_avx: - push ebp - push ebx - push esi - push edi - call L$006pic_point -L$006pic_point: - pop ebp - lea ebp,[(L$K_XX_XX-L$006pic_point)+ebp] -L$avx_shortcut: - vzeroall - vmovdqa xmm7,[ebp] - vmovdqa xmm0,[16+ebp] - vmovdqa xmm1,[32+ebp] - vmovdqa xmm2,[48+ebp] - vmovdqa xmm6,[64+ebp] - mov edi,DWORD [20+esp] - mov ebp,DWORD [24+esp] - mov edx,DWORD [28+esp] - mov esi,esp - sub esp,208 - and esp,-64 - vmovdqa [112+esp],xmm0 - vmovdqa [128+esp],xmm1 - vmovdqa [144+esp],xmm2 - shl edx,6 - vmovdqa [160+esp],xmm7 - add edx,ebp - vmovdqa [176+esp],xmm6 - add ebp,64 - mov DWORD [192+esp],edi - mov DWORD [196+esp],ebp - mov DWORD [200+esp],edx - mov DWORD [204+esp],esi - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - mov edi,DWORD [16+edi] - mov esi,ebx - vmovdqu xmm0,[ebp-64] - vmovdqu xmm1,[ebp-48] - vmovdqu xmm2,[ebp-32] - vmovdqu xmm3,[ebp-16] - vpshufb xmm0,xmm0,xmm6 - vpshufb xmm1,xmm1,xmm6 - vpshufb xmm2,xmm2,xmm6 - vmovdqa [96+esp],xmm7 - vpshufb xmm3,xmm3,xmm6 - vpaddd xmm4,xmm0,xmm7 - vpaddd xmm5,xmm1,xmm7 - vpaddd xmm6,xmm2,xmm7 - vmovdqa [esp],xmm4 - mov ebp,ecx - vmovdqa [16+esp],xmm5 - xor ebp,edx - vmovdqa [32+esp],xmm6 - and esi,ebp - jmp NEAR L$007loop -align 16 -L$007loop: - shrd ebx,ebx,2 - xor esi,edx - vpalignr xmm4,xmm1,xmm0,8 - mov ebp,eax - add edi,DWORD [esp] - vpaddd xmm7,xmm7,xmm3 - vmovdqa [64+esp],xmm0 - xor ebx,ecx - shld eax,eax,5 - vpsrldq xmm6,xmm3,4 - add edi,esi - and ebp,ebx - vpxor xmm4,xmm4,xmm0 - xor ebx,ecx - add edi,eax - vpxor xmm6,xmm6,xmm2 - shrd eax,eax,7 - xor ebp,ecx - vmovdqa [48+esp],xmm7 - mov esi,edi - add edx,DWORD [4+esp] - vpxor xmm4,xmm4,xmm6 - xor eax,ebx - shld edi,edi,5 - add edx,ebp - and esi,eax - vpsrld xmm6,xmm4,31 - xor eax,ebx - add edx,edi - shrd edi,edi,7 - xor esi,ebx - vpslldq xmm0,xmm4,12 - vpaddd xmm4,xmm4,xmm4 - mov ebp,edx - add ecx,DWORD [8+esp] - xor edi,eax - shld edx,edx,5 - vpsrld xmm7,xmm0,30 - vpor xmm4,xmm4,xmm6 - add ecx,esi - and ebp,edi - xor edi,eax - add ecx,edx - vpslld xmm0,xmm0,2 - shrd edx,edx,7 - xor ebp,eax - vpxor xmm4,xmm4,xmm7 - mov esi,ecx - add ebx,DWORD [12+esp] - xor edx,edi - shld ecx,ecx,5 - vpxor xmm4,xmm4,xmm0 - add ebx,ebp - and esi,edx - vmovdqa xmm0,[96+esp] - xor edx,edi - add ebx,ecx - shrd ecx,ecx,7 - xor esi,edi - vpalignr xmm5,xmm2,xmm1,8 - mov ebp,ebx - add eax,DWORD [16+esp] - vpaddd xmm0,xmm0,xmm4 - vmovdqa [80+esp],xmm1 - xor ecx,edx - shld ebx,ebx,5 - vpsrldq xmm7,xmm4,4 - add eax,esi - and ebp,ecx - vpxor xmm5,xmm5,xmm1 - xor ecx,edx - add eax,ebx - vpxor xmm7,xmm7,xmm3 - shrd ebx,ebx,7 - xor ebp,edx - vmovdqa [esp],xmm0 - mov esi,eax - add edi,DWORD [20+esp] - vpxor xmm5,xmm5,xmm7 - xor ebx,ecx - shld eax,eax,5 - add edi,ebp - and esi,ebx - vpsrld xmm7,xmm5,31 - xor ebx,ecx - add edi,eax - shrd eax,eax,7 - xor esi,ecx - vpslldq xmm1,xmm5,12 - vpaddd xmm5,xmm5,xmm5 - mov ebp,edi - add edx,DWORD [24+esp] - xor eax,ebx - shld edi,edi,5 - vpsrld xmm0,xmm1,30 - vpor xmm5,xmm5,xmm7 - add edx,esi - and ebp,eax - xor eax,ebx - add edx,edi - vpslld xmm1,xmm1,2 - shrd edi,edi,7 - xor ebp,ebx - vpxor xmm5,xmm5,xmm0 - mov esi,edx - add ecx,DWORD [28+esp] - xor edi,eax - shld edx,edx,5 - vpxor xmm5,xmm5,xmm1 - add ecx,ebp - and esi,edi - vmovdqa xmm1,[112+esp] - xor edi,eax - add ecx,edx - shrd edx,edx,7 - xor esi,eax - vpalignr xmm6,xmm3,xmm2,8 - mov ebp,ecx - add ebx,DWORD [32+esp] - vpaddd xmm1,xmm1,xmm5 - vmovdqa [96+esp],xmm2 - xor edx,edi - shld ecx,ecx,5 - vpsrldq xmm0,xmm5,4 - add ebx,esi - and ebp,edx - vpxor xmm6,xmm6,xmm2 - xor edx,edi - add ebx,ecx - vpxor xmm0,xmm0,xmm4 - shrd ecx,ecx,7 - xor ebp,edi - vmovdqa [16+esp],xmm1 - mov esi,ebx - add eax,DWORD [36+esp] - vpxor xmm6,xmm6,xmm0 - xor ecx,edx - shld ebx,ebx,5 - add eax,ebp - and esi,ecx - vpsrld xmm0,xmm6,31 - xor ecx,edx - add eax,ebx - shrd ebx,ebx,7 - xor esi,edx - vpslldq xmm2,xmm6,12 - vpaddd xmm6,xmm6,xmm6 - mov ebp,eax - add edi,DWORD [40+esp] - xor ebx,ecx - shld eax,eax,5 - vpsrld xmm1,xmm2,30 - vpor xmm6,xmm6,xmm0 - add edi,esi - and ebp,ebx - xor ebx,ecx - add edi,eax - vpslld xmm2,xmm2,2 - vmovdqa xmm0,[64+esp] - shrd eax,eax,7 - xor ebp,ecx - vpxor xmm6,xmm6,xmm1 - mov esi,edi - add edx,DWORD [44+esp] - xor eax,ebx - shld edi,edi,5 - vpxor xmm6,xmm6,xmm2 - add edx,ebp - and esi,eax - vmovdqa xmm2,[112+esp] - xor eax,ebx - add edx,edi - shrd edi,edi,7 - xor esi,ebx - vpalignr xmm7,xmm4,xmm3,8 - mov ebp,edx - add ecx,DWORD [48+esp] - vpaddd xmm2,xmm2,xmm6 - vmovdqa [64+esp],xmm3 - xor edi,eax - shld edx,edx,5 - vpsrldq xmm1,xmm6,4 - add ecx,esi - and ebp,edi - vpxor xmm7,xmm7,xmm3 - xor edi,eax - add ecx,edx - vpxor xmm1,xmm1,xmm5 - shrd edx,edx,7 - xor ebp,eax - vmovdqa [32+esp],xmm2 - mov esi,ecx - add ebx,DWORD [52+esp] - vpxor xmm7,xmm7,xmm1 - xor edx,edi - shld ecx,ecx,5 - add ebx,ebp - and esi,edx - vpsrld xmm1,xmm7,31 - xor edx,edi - add ebx,ecx - shrd ecx,ecx,7 - xor esi,edi - vpslldq xmm3,xmm7,12 - vpaddd xmm7,xmm7,xmm7 - mov ebp,ebx - add eax,DWORD [56+esp] - xor ecx,edx - shld ebx,ebx,5 - vpsrld xmm2,xmm3,30 - vpor xmm7,xmm7,xmm1 - add eax,esi - and ebp,ecx - xor ecx,edx - add eax,ebx - vpslld xmm3,xmm3,2 - vmovdqa xmm1,[80+esp] - shrd ebx,ebx,7 - xor ebp,edx - vpxor xmm7,xmm7,xmm2 - mov esi,eax - add edi,DWORD [60+esp] - xor ebx,ecx - shld eax,eax,5 - vpxor xmm7,xmm7,xmm3 - add edi,ebp - and esi,ebx - vmovdqa xmm3,[112+esp] - xor ebx,ecx - add edi,eax - vpalignr xmm2,xmm7,xmm6,8 - vpxor xmm0,xmm0,xmm4 - shrd eax,eax,7 - xor esi,ecx - mov ebp,edi - add edx,DWORD [esp] - vpxor xmm0,xmm0,xmm1 - vmovdqa [80+esp],xmm4 - xor eax,ebx - shld edi,edi,5 - vmovdqa xmm4,xmm3 - vpaddd xmm3,xmm3,xmm7 - add edx,esi - and ebp,eax - vpxor xmm0,xmm0,xmm2 - xor eax,ebx - add edx,edi - shrd edi,edi,7 - xor ebp,ebx - vpsrld xmm2,xmm0,30 - vmovdqa [48+esp],xmm3 - mov esi,edx - add ecx,DWORD [4+esp] - xor edi,eax - shld edx,edx,5 - vpslld xmm0,xmm0,2 - add ecx,ebp - and esi,edi - xor edi,eax - add ecx,edx - shrd edx,edx,7 - xor esi,eax - mov ebp,ecx - add ebx,DWORD [8+esp] - vpor xmm0,xmm0,xmm2 - xor edx,edi - shld ecx,ecx,5 - vmovdqa xmm2,[96+esp] - add ebx,esi - and ebp,edx - xor edx,edi - add ebx,ecx - add eax,DWORD [12+esp] - xor ebp,edi - mov esi,ebx - shld ebx,ebx,5 - add eax,ebp - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - vpalignr xmm3,xmm0,xmm7,8 - vpxor xmm1,xmm1,xmm5 - add edi,DWORD [16+esp] - xor esi,ecx - mov ebp,eax - shld eax,eax,5 - vpxor xmm1,xmm1,xmm2 - vmovdqa [96+esp],xmm5 - add edi,esi - xor ebp,ecx - vmovdqa xmm5,xmm4 - vpaddd xmm4,xmm4,xmm0 - shrd ebx,ebx,7 - add edi,eax - vpxor xmm1,xmm1,xmm3 - add edx,DWORD [20+esp] - xor ebp,ebx - mov esi,edi - shld edi,edi,5 - vpsrld xmm3,xmm1,30 - vmovdqa [esp],xmm4 - add edx,ebp - xor esi,ebx - shrd eax,eax,7 - add edx,edi - vpslld xmm1,xmm1,2 - add ecx,DWORD [24+esp] - xor esi,eax - mov ebp,edx - shld edx,edx,5 - add ecx,esi - xor ebp,eax - shrd edi,edi,7 - add ecx,edx - vpor xmm1,xmm1,xmm3 - add ebx,DWORD [28+esp] - xor ebp,edi - vmovdqa xmm3,[64+esp] - mov esi,ecx - shld ecx,ecx,5 - add ebx,ebp - xor esi,edi - shrd edx,edx,7 - add ebx,ecx - vpalignr xmm4,xmm1,xmm0,8 - vpxor xmm2,xmm2,xmm6 - add eax,DWORD [32+esp] - xor esi,edx - mov ebp,ebx - shld ebx,ebx,5 - vpxor xmm2,xmm2,xmm3 - vmovdqa [64+esp],xmm6 - add eax,esi - xor ebp,edx - vmovdqa xmm6,[128+esp] - vpaddd xmm5,xmm5,xmm1 - shrd ecx,ecx,7 - add eax,ebx - vpxor xmm2,xmm2,xmm4 - add edi,DWORD [36+esp] - xor ebp,ecx - mov esi,eax - shld eax,eax,5 - vpsrld xmm4,xmm2,30 - vmovdqa [16+esp],xmm5 - add edi,ebp - xor esi,ecx - shrd ebx,ebx,7 - add edi,eax - vpslld xmm2,xmm2,2 - add edx,DWORD [40+esp] - xor esi,ebx - mov ebp,edi - shld edi,edi,5 - add edx,esi - xor ebp,ebx - shrd eax,eax,7 - add edx,edi - vpor xmm2,xmm2,xmm4 - add ecx,DWORD [44+esp] - xor ebp,eax - vmovdqa xmm4,[80+esp] - mov esi,edx - shld edx,edx,5 - add ecx,ebp - xor esi,eax - shrd edi,edi,7 - add ecx,edx - vpalignr xmm5,xmm2,xmm1,8 - vpxor xmm3,xmm3,xmm7 - add ebx,DWORD [48+esp] - xor esi,edi - mov ebp,ecx - shld ecx,ecx,5 - vpxor xmm3,xmm3,xmm4 - vmovdqa [80+esp],xmm7 - add ebx,esi - xor ebp,edi - vmovdqa xmm7,xmm6 - vpaddd xmm6,xmm6,xmm2 - shrd edx,edx,7 - add ebx,ecx - vpxor xmm3,xmm3,xmm5 - add eax,DWORD [52+esp] - xor ebp,edx - mov esi,ebx - shld ebx,ebx,5 - vpsrld xmm5,xmm3,30 - vmovdqa [32+esp],xmm6 - add eax,ebp - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - vpslld xmm3,xmm3,2 - add edi,DWORD [56+esp] - xor esi,ecx - mov ebp,eax - shld eax,eax,5 - add edi,esi - xor ebp,ecx - shrd ebx,ebx,7 - add edi,eax - vpor xmm3,xmm3,xmm5 - add edx,DWORD [60+esp] - xor ebp,ebx - vmovdqa xmm5,[96+esp] - mov esi,edi - shld edi,edi,5 - add edx,ebp - xor esi,ebx - shrd eax,eax,7 - add edx,edi - vpalignr xmm6,xmm3,xmm2,8 - vpxor xmm4,xmm4,xmm0 - add ecx,DWORD [esp] - xor esi,eax - mov ebp,edx - shld edx,edx,5 - vpxor xmm4,xmm4,xmm5 - vmovdqa [96+esp],xmm0 - add ecx,esi - xor ebp,eax - vmovdqa xmm0,xmm7 - vpaddd xmm7,xmm7,xmm3 - shrd edi,edi,7 - add ecx,edx - vpxor xmm4,xmm4,xmm6 - add ebx,DWORD [4+esp] - xor ebp,edi - mov esi,ecx - shld ecx,ecx,5 - vpsrld xmm6,xmm4,30 - vmovdqa [48+esp],xmm7 - add ebx,ebp - xor esi,edi - shrd edx,edx,7 - add ebx,ecx - vpslld xmm4,xmm4,2 - add eax,DWORD [8+esp] - xor esi,edx - mov ebp,ebx - shld ebx,ebx,5 - add eax,esi - xor ebp,edx - shrd ecx,ecx,7 - add eax,ebx - vpor xmm4,xmm4,xmm6 - add edi,DWORD [12+esp] - xor ebp,ecx - vmovdqa xmm6,[64+esp] - mov esi,eax - shld eax,eax,5 - add edi,ebp - xor esi,ecx - shrd ebx,ebx,7 - add edi,eax - vpalignr xmm7,xmm4,xmm3,8 - vpxor xmm5,xmm5,xmm1 - add edx,DWORD [16+esp] - xor esi,ebx - mov ebp,edi - shld edi,edi,5 - vpxor xmm5,xmm5,xmm6 - vmovdqa [64+esp],xmm1 - add edx,esi - xor ebp,ebx - vmovdqa xmm1,xmm0 - vpaddd xmm0,xmm0,xmm4 - shrd eax,eax,7 - add edx,edi - vpxor xmm5,xmm5,xmm7 - add ecx,DWORD [20+esp] - xor ebp,eax - mov esi,edx - shld edx,edx,5 - vpsrld xmm7,xmm5,30 - vmovdqa [esp],xmm0 - add ecx,ebp - xor esi,eax - shrd edi,edi,7 - add ecx,edx - vpslld xmm5,xmm5,2 - add ebx,DWORD [24+esp] - xor esi,edi - mov ebp,ecx - shld ecx,ecx,5 - add ebx,esi - xor ebp,edi - shrd edx,edx,7 - add ebx,ecx - vpor xmm5,xmm5,xmm7 - add eax,DWORD [28+esp] - vmovdqa xmm7,[80+esp] - shrd ecx,ecx,7 - mov esi,ebx - xor ebp,edx - shld ebx,ebx,5 - add eax,ebp - xor esi,ecx - xor ecx,edx - add eax,ebx - vpalignr xmm0,xmm5,xmm4,8 - vpxor xmm6,xmm6,xmm2 - add edi,DWORD [32+esp] - and esi,ecx - xor ecx,edx - shrd ebx,ebx,7 - vpxor xmm6,xmm6,xmm7 - vmovdqa [80+esp],xmm2 - mov ebp,eax - xor esi,ecx - vmovdqa xmm2,xmm1 - vpaddd xmm1,xmm1,xmm5 - shld eax,eax,5 - add edi,esi - vpxor xmm6,xmm6,xmm0 - xor ebp,ebx - xor ebx,ecx - add edi,eax - add edx,DWORD [36+esp] - vpsrld xmm0,xmm6,30 - vmovdqa [16+esp],xmm1 - and ebp,ebx - xor ebx,ecx - shrd eax,eax,7 - mov esi,edi - vpslld xmm6,xmm6,2 - xor ebp,ebx - shld edi,edi,5 - add edx,ebp - xor esi,eax - xor eax,ebx - add edx,edi - add ecx,DWORD [40+esp] - and esi,eax - vpor xmm6,xmm6,xmm0 - xor eax,ebx - shrd edi,edi,7 - vmovdqa xmm0,[96+esp] - mov ebp,edx - xor esi,eax - shld edx,edx,5 - add ecx,esi - xor ebp,edi - xor edi,eax - add ecx,edx - add ebx,DWORD [44+esp] - and ebp,edi - xor edi,eax - shrd edx,edx,7 - mov esi,ecx - xor ebp,edi - shld ecx,ecx,5 - add ebx,ebp - xor esi,edx - xor edx,edi - add ebx,ecx - vpalignr xmm1,xmm6,xmm5,8 - vpxor xmm7,xmm7,xmm3 - add eax,DWORD [48+esp] - and esi,edx - xor edx,edi - shrd ecx,ecx,7 - vpxor xmm7,xmm7,xmm0 - vmovdqa [96+esp],xmm3 - mov ebp,ebx - xor esi,edx - vmovdqa xmm3,[144+esp] - vpaddd xmm2,xmm2,xmm6 - shld ebx,ebx,5 - add eax,esi - vpxor xmm7,xmm7,xmm1 - xor ebp,ecx - xor ecx,edx - add eax,ebx - add edi,DWORD [52+esp] - vpsrld xmm1,xmm7,30 - vmovdqa [32+esp],xmm2 - and ebp,ecx - xor ecx,edx - shrd ebx,ebx,7 - mov esi,eax - vpslld xmm7,xmm7,2 - xor ebp,ecx - shld eax,eax,5 - add edi,ebp - xor esi,ebx - xor ebx,ecx - add edi,eax - add edx,DWORD [56+esp] - and esi,ebx - vpor xmm7,xmm7,xmm1 - xor ebx,ecx - shrd eax,eax,7 - vmovdqa xmm1,[64+esp] - mov ebp,edi - xor esi,ebx - shld edi,edi,5 - add edx,esi - xor ebp,eax - xor eax,ebx - add edx,edi - add ecx,DWORD [60+esp] - and ebp,eax - xor eax,ebx - shrd edi,edi,7 - mov esi,edx - xor ebp,eax - shld edx,edx,5 - add ecx,ebp - xor esi,edi - xor edi,eax - add ecx,edx - vpalignr xmm2,xmm7,xmm6,8 - vpxor xmm0,xmm0,xmm4 - add ebx,DWORD [esp] - and esi,edi - xor edi,eax - shrd edx,edx,7 - vpxor xmm0,xmm0,xmm1 - vmovdqa [64+esp],xmm4 - mov ebp,ecx - xor esi,edi - vmovdqa xmm4,xmm3 - vpaddd xmm3,xmm3,xmm7 - shld ecx,ecx,5 - add ebx,esi - vpxor xmm0,xmm0,xmm2 - xor ebp,edx - xor edx,edi - add ebx,ecx - add eax,DWORD [4+esp] - vpsrld xmm2,xmm0,30 - vmovdqa [48+esp],xmm3 - and ebp,edx - xor edx,edi - shrd ecx,ecx,7 - mov esi,ebx - vpslld xmm0,xmm0,2 - xor ebp,edx - shld ebx,ebx,5 - add eax,ebp - xor esi,ecx - xor ecx,edx - add eax,ebx - add edi,DWORD [8+esp] - and esi,ecx - vpor xmm0,xmm0,xmm2 - xor ecx,edx - shrd ebx,ebx,7 - vmovdqa xmm2,[80+esp] - mov ebp,eax - xor esi,ecx - shld eax,eax,5 - add edi,esi - xor ebp,ebx - xor ebx,ecx - add edi,eax - add edx,DWORD [12+esp] - and ebp,ebx - xor ebx,ecx - shrd eax,eax,7 - mov esi,edi - xor ebp,ebx - shld edi,edi,5 - add edx,ebp - xor esi,eax - xor eax,ebx - add edx,edi - vpalignr xmm3,xmm0,xmm7,8 - vpxor xmm1,xmm1,xmm5 - add ecx,DWORD [16+esp] - and esi,eax - xor eax,ebx - shrd edi,edi,7 - vpxor xmm1,xmm1,xmm2 - vmovdqa [80+esp],xmm5 - mov ebp,edx - xor esi,eax - vmovdqa xmm5,xmm4 - vpaddd xmm4,xmm4,xmm0 - shld edx,edx,5 - add ecx,esi - vpxor xmm1,xmm1,xmm3 - xor ebp,edi - xor edi,eax - add ecx,edx - add ebx,DWORD [20+esp] - vpsrld xmm3,xmm1,30 - vmovdqa [esp],xmm4 - and ebp,edi - xor edi,eax - shrd edx,edx,7 - mov esi,ecx - vpslld xmm1,xmm1,2 - xor ebp,edi - shld ecx,ecx,5 - add ebx,ebp - xor esi,edx - xor edx,edi - add ebx,ecx - add eax,DWORD [24+esp] - and esi,edx - vpor xmm1,xmm1,xmm3 - xor edx,edi - shrd ecx,ecx,7 - vmovdqa xmm3,[96+esp] - mov ebp,ebx - xor esi,edx - shld ebx,ebx,5 - add eax,esi - xor ebp,ecx - xor ecx,edx - add eax,ebx - add edi,DWORD [28+esp] - and ebp,ecx - xor ecx,edx - shrd ebx,ebx,7 - mov esi,eax - xor ebp,ecx - shld eax,eax,5 - add edi,ebp - xor esi,ebx - xor ebx,ecx - add edi,eax - vpalignr xmm4,xmm1,xmm0,8 - vpxor xmm2,xmm2,xmm6 - add edx,DWORD [32+esp] - and esi,ebx - xor ebx,ecx - shrd eax,eax,7 - vpxor xmm2,xmm2,xmm3 - vmovdqa [96+esp],xmm6 - mov ebp,edi - xor esi,ebx - vmovdqa xmm6,xmm5 - vpaddd xmm5,xmm5,xmm1 - shld edi,edi,5 - add edx,esi - vpxor xmm2,xmm2,xmm4 - xor ebp,eax - xor eax,ebx - add edx,edi - add ecx,DWORD [36+esp] - vpsrld xmm4,xmm2,30 - vmovdqa [16+esp],xmm5 - and ebp,eax - xor eax,ebx - shrd edi,edi,7 - mov esi,edx - vpslld xmm2,xmm2,2 - xor ebp,eax - shld edx,edx,5 - add ecx,ebp - xor esi,edi - xor edi,eax - add ecx,edx - add ebx,DWORD [40+esp] - and esi,edi - vpor xmm2,xmm2,xmm4 - xor edi,eax - shrd edx,edx,7 - vmovdqa xmm4,[64+esp] - mov ebp,ecx - xor esi,edi - shld ecx,ecx,5 - add ebx,esi - xor ebp,edx - xor edx,edi - add ebx,ecx - add eax,DWORD [44+esp] - and ebp,edx - xor edx,edi - shrd ecx,ecx,7 - mov esi,ebx - xor ebp,edx - shld ebx,ebx,5 - add eax,ebp - xor esi,edx - add eax,ebx - vpalignr xmm5,xmm2,xmm1,8 - vpxor xmm3,xmm3,xmm7 - add edi,DWORD [48+esp] - xor esi,ecx - mov ebp,eax - shld eax,eax,5 - vpxor xmm3,xmm3,xmm4 - vmovdqa [64+esp],xmm7 - add edi,esi - xor ebp,ecx - vmovdqa xmm7,xmm6 - vpaddd xmm6,xmm6,xmm2 - shrd ebx,ebx,7 - add edi,eax - vpxor xmm3,xmm3,xmm5 - add edx,DWORD [52+esp] - xor ebp,ebx - mov esi,edi - shld edi,edi,5 - vpsrld xmm5,xmm3,30 - vmovdqa [32+esp],xmm6 - add edx,ebp - xor esi,ebx - shrd eax,eax,7 - add edx,edi - vpslld xmm3,xmm3,2 - add ecx,DWORD [56+esp] - xor esi,eax - mov ebp,edx - shld edx,edx,5 - add ecx,esi - xor ebp,eax - shrd edi,edi,7 - add ecx,edx - vpor xmm3,xmm3,xmm5 - add ebx,DWORD [60+esp] - xor ebp,edi - mov esi,ecx - shld ecx,ecx,5 - add ebx,ebp - xor esi,edi - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD [esp] - vpaddd xmm7,xmm7,xmm3 - xor esi,edx - mov ebp,ebx - shld ebx,ebx,5 - add eax,esi - vmovdqa [48+esp],xmm7 - xor ebp,edx - shrd ecx,ecx,7 - add eax,ebx - add edi,DWORD [4+esp] - xor ebp,ecx - mov esi,eax - shld eax,eax,5 - add edi,ebp - xor esi,ecx - shrd ebx,ebx,7 - add edi,eax - add edx,DWORD [8+esp] - xor esi,ebx - mov ebp,edi - shld edi,edi,5 - add edx,esi - xor ebp,ebx - shrd eax,eax,7 - add edx,edi - add ecx,DWORD [12+esp] - xor ebp,eax - mov esi,edx - shld edx,edx,5 - add ecx,ebp - xor esi,eax - shrd edi,edi,7 - add ecx,edx - mov ebp,DWORD [196+esp] - cmp ebp,DWORD [200+esp] - je NEAR L$008done - vmovdqa xmm7,[160+esp] - vmovdqa xmm6,[176+esp] - vmovdqu xmm0,[ebp] - vmovdqu xmm1,[16+ebp] - vmovdqu xmm2,[32+ebp] - vmovdqu xmm3,[48+ebp] - add ebp,64 - vpshufb xmm0,xmm0,xmm6 - mov DWORD [196+esp],ebp - vmovdqa [96+esp],xmm7 - add ebx,DWORD [16+esp] - xor esi,edi - vpshufb xmm1,xmm1,xmm6 - mov ebp,ecx - shld ecx,ecx,5 - vpaddd xmm4,xmm0,xmm7 - add ebx,esi - xor ebp,edi - shrd edx,edx,7 - add ebx,ecx - vmovdqa [esp],xmm4 - add eax,DWORD [20+esp] - xor ebp,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,ebp - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - add edi,DWORD [24+esp] - xor esi,ecx - mov ebp,eax - shld eax,eax,5 - add edi,esi - xor ebp,ecx - shrd ebx,ebx,7 - add edi,eax - add edx,DWORD [28+esp] - xor ebp,ebx - mov esi,edi - shld edi,edi,5 - add edx,ebp - xor esi,ebx - shrd eax,eax,7 - add edx,edi - add ecx,DWORD [32+esp] - xor esi,eax - vpshufb xmm2,xmm2,xmm6 - mov ebp,edx - shld edx,edx,5 - vpaddd xmm5,xmm1,xmm7 - add ecx,esi - xor ebp,eax - shrd edi,edi,7 - add ecx,edx - vmovdqa [16+esp],xmm5 - add ebx,DWORD [36+esp] - xor ebp,edi - mov esi,ecx - shld ecx,ecx,5 - add ebx,ebp - xor esi,edi - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD [40+esp] - xor esi,edx - mov ebp,ebx - shld ebx,ebx,5 - add eax,esi - xor ebp,edx - shrd ecx,ecx,7 - add eax,ebx - add edi,DWORD [44+esp] - xor ebp,ecx - mov esi,eax - shld eax,eax,5 - add edi,ebp - xor esi,ecx - shrd ebx,ebx,7 - add edi,eax - add edx,DWORD [48+esp] - xor esi,ebx - vpshufb xmm3,xmm3,xmm6 - mov ebp,edi - shld edi,edi,5 - vpaddd xmm6,xmm2,xmm7 - add edx,esi - xor ebp,ebx - shrd eax,eax,7 - add edx,edi - vmovdqa [32+esp],xmm6 - add ecx,DWORD [52+esp] - xor ebp,eax - mov esi,edx - shld edx,edx,5 - add ecx,ebp - xor esi,eax - shrd edi,edi,7 - add ecx,edx - add ebx,DWORD [56+esp] - xor esi,edi - mov ebp,ecx - shld ecx,ecx,5 - add ebx,esi - xor ebp,edi - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD [60+esp] - xor ebp,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,ebp - shrd ecx,ecx,7 - add eax,ebx - mov ebp,DWORD [192+esp] - add eax,DWORD [ebp] - add esi,DWORD [4+ebp] - add ecx,DWORD [8+ebp] - mov DWORD [ebp],eax - add edx,DWORD [12+ebp] - mov DWORD [4+ebp],esi - add edi,DWORD [16+ebp] - mov ebx,ecx - mov DWORD [8+ebp],ecx - xor ebx,edx - mov DWORD [12+ebp],edx - mov DWORD [16+ebp],edi - mov ebp,esi - and esi,ebx - mov ebx,ebp - jmp NEAR L$007loop -align 16 -L$008done: - add ebx,DWORD [16+esp] - xor esi,edi - mov ebp,ecx - shld ecx,ecx,5 - add ebx,esi - xor ebp,edi - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD [20+esp] - xor ebp,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,ebp - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - add edi,DWORD [24+esp] - xor esi,ecx - mov ebp,eax - shld eax,eax,5 - add edi,esi - xor ebp,ecx - shrd ebx,ebx,7 - add edi,eax - add edx,DWORD [28+esp] - xor ebp,ebx - mov esi,edi - shld edi,edi,5 - add edx,ebp - xor esi,ebx - shrd eax,eax,7 - add edx,edi - add ecx,DWORD [32+esp] - xor esi,eax - mov ebp,edx - shld edx,edx,5 - add ecx,esi - xor ebp,eax - shrd edi,edi,7 - add ecx,edx - add ebx,DWORD [36+esp] - xor ebp,edi - mov esi,ecx - shld ecx,ecx,5 - add ebx,ebp - xor esi,edi - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD [40+esp] - xor esi,edx - mov ebp,ebx - shld ebx,ebx,5 - add eax,esi - xor ebp,edx - shrd ecx,ecx,7 - add eax,ebx - add edi,DWORD [44+esp] - xor ebp,ecx - mov esi,eax - shld eax,eax,5 - add edi,ebp - xor esi,ecx - shrd ebx,ebx,7 - add edi,eax - add edx,DWORD [48+esp] - xor esi,ebx - mov ebp,edi - shld edi,edi,5 - add edx,esi - xor ebp,ebx - shrd eax,eax,7 - add edx,edi - add ecx,DWORD [52+esp] - xor ebp,eax - mov esi,edx - shld edx,edx,5 - add ecx,ebp - xor esi,eax - shrd edi,edi,7 - add ecx,edx - add ebx,DWORD [56+esp] - xor esi,edi - mov ebp,ecx - shld ecx,ecx,5 - add ebx,esi - xor ebp,edi - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD [60+esp] - xor ebp,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,ebp - shrd ecx,ecx,7 - add eax,ebx - vzeroall - mov ebp,DWORD [192+esp] - add eax,DWORD [ebp] - mov esp,DWORD [204+esp] - add esi,DWORD [4+ebp] - add ecx,DWORD [8+ebp] - mov DWORD [ebp],eax - add edx,DWORD [12+ebp] - mov DWORD [4+ebp],esi - add edi,DWORD [16+ebp] - mov DWORD [8+ebp],ecx - mov DWORD [12+ebp],edx - mov DWORD [16+ebp],edi - pop edi - pop esi - pop ebx - pop ebp - ret -align 64 -L$K_XX_XX: -dd 1518500249,1518500249,1518500249,1518500249 -dd 1859775393,1859775393,1859775393,1859775393 -dd 2400959708,2400959708,2400959708,2400959708 -dd 3395469782,3395469782,3395469782,3395469782 -dd 66051,67438087,134810123,202182159 -db 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -db 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 -db 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 -db 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 -db 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha256-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha256-586.asm deleted file mode 100644 index b5dc26ba71..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha256-586.asm +++ /dev/null @@ -1,5579 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -;extern _OPENSSL_ia32cap_P -global _sha256_block_data_order -align 16 -_sha256_block_data_order: -L$_sha256_block_data_order_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov ebx,esp - call L$000pic_point -L$000pic_point: - pop ebp - lea ebp,[(L$001K256-L$000pic_point)+ebp] - sub esp,16 - and esp,-64 - shl eax,6 - add eax,edi - mov DWORD [esp],esi - mov DWORD [4+esp],edi - mov DWORD [8+esp],eax - mov DWORD [12+esp],ebx - lea edx,[_OPENSSL_ia32cap_P] - mov ecx,DWORD [edx] - mov ebx,DWORD [4+edx] - test ecx,1048576 - jnz NEAR L$002loop - mov edx,DWORD [8+edx] - test ecx,16777216 - jz NEAR L$003no_xmm - and ecx,1073741824 - and ebx,268435968 - or ecx,ebx - and ecx,1342177280 - cmp ecx,1342177280 - je NEAR L$004AVX - test ebx,512 - jnz NEAR L$005SSSE3 -L$003no_xmm: - sub eax,edi - cmp eax,256 - jae NEAR L$006unrolled - jmp NEAR L$002loop -align 16 -L$002loop: - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - bswap eax - mov edx,DWORD [12+edi] - bswap ebx - push eax - bswap ecx - push ebx - bswap edx - push ecx - push edx - mov eax,DWORD [16+edi] - mov ebx,DWORD [20+edi] - mov ecx,DWORD [24+edi] - bswap eax - mov edx,DWORD [28+edi] - bswap ebx - push eax - bswap ecx - push ebx - bswap edx - push ecx - push edx - mov eax,DWORD [32+edi] - mov ebx,DWORD [36+edi] - mov ecx,DWORD [40+edi] - bswap eax - mov edx,DWORD [44+edi] - bswap ebx - push eax - bswap ecx - push ebx - bswap edx - push ecx - push edx - mov eax,DWORD [48+edi] - mov ebx,DWORD [52+edi] - mov ecx,DWORD [56+edi] - bswap eax - mov edx,DWORD [60+edi] - bswap ebx - push eax - bswap ecx - push ebx - bswap edx - push ecx - push edx - add edi,64 - lea esp,[esp-36] - mov DWORD [104+esp],edi - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edi,DWORD [12+esi] - mov DWORD [8+esp],ebx - xor ebx,ecx - mov DWORD [12+esp],ecx - mov DWORD [16+esp],edi - mov DWORD [esp],ebx - mov edx,DWORD [16+esi] - mov ebx,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov edi,DWORD [28+esi] - mov DWORD [24+esp],ebx - mov DWORD [28+esp],ecx - mov DWORD [32+esp],edi -align 16 -L$00700_15: - mov ecx,edx - mov esi,DWORD [24+esp] - ror ecx,14 - mov edi,DWORD [28+esp] - xor ecx,edx - xor esi,edi - mov ebx,DWORD [96+esp] - ror ecx,5 - and esi,edx - mov DWORD [20+esp],edx - xor edx,ecx - add ebx,DWORD [32+esp] - xor esi,edi - ror edx,6 - mov ecx,eax - add ebx,esi - ror ecx,9 - add ebx,edx - mov edi,DWORD [8+esp] - xor ecx,eax - mov DWORD [4+esp],eax - lea esp,[esp-4] - ror ecx,11 - mov esi,DWORD [ebp] - xor ecx,eax - mov edx,DWORD [20+esp] - xor eax,edi - ror ecx,2 - add ebx,esi - mov DWORD [esp],eax - add edx,ebx - and eax,DWORD [4+esp] - add ebx,ecx - xor eax,edi - add ebp,4 - add eax,ebx - cmp esi,3248222580 - jne NEAR L$00700_15 - mov ecx,DWORD [156+esp] - jmp NEAR L$00816_63 -align 16 -L$00816_63: - mov ebx,ecx - mov esi,DWORD [104+esp] - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [160+esp] - shr edi,10 - add ebx,DWORD [124+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [24+esp] - ror ecx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor ecx,edx - xor esi,edi - mov DWORD [96+esp],ebx - ror ecx,5 - and esi,edx - mov DWORD [20+esp],edx - xor edx,ecx - add ebx,DWORD [32+esp] - xor esi,edi - ror edx,6 - mov ecx,eax - add ebx,esi - ror ecx,9 - add ebx,edx - mov edi,DWORD [8+esp] - xor ecx,eax - mov DWORD [4+esp],eax - lea esp,[esp-4] - ror ecx,11 - mov esi,DWORD [ebp] - xor ecx,eax - mov edx,DWORD [20+esp] - xor eax,edi - ror ecx,2 - add ebx,esi - mov DWORD [esp],eax - add edx,ebx - and eax,DWORD [4+esp] - add ebx,ecx - xor eax,edi - mov ecx,DWORD [156+esp] - add ebp,4 - add eax,ebx - cmp esi,3329325298 - jne NEAR L$00816_63 - mov esi,DWORD [356+esp] - mov ebx,DWORD [8+esp] - mov ecx,DWORD [16+esp] - add eax,DWORD [esi] - add ebx,DWORD [4+esi] - add edi,DWORD [8+esi] - add ecx,DWORD [12+esi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],edi - mov DWORD [12+esi],ecx - mov eax,DWORD [24+esp] - mov ebx,DWORD [28+esp] - mov ecx,DWORD [32+esp] - mov edi,DWORD [360+esp] - add edx,DWORD [16+esi] - add eax,DWORD [20+esi] - add ebx,DWORD [24+esi] - add ecx,DWORD [28+esi] - mov DWORD [16+esi],edx - mov DWORD [20+esi],eax - mov DWORD [24+esi],ebx - mov DWORD [28+esi],ecx - lea esp,[356+esp] - sub ebp,256 - cmp edi,DWORD [8+esp] - jb NEAR L$002loop - mov esp,DWORD [12+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -align 64 -L$001K256: -dd 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298 -dd 66051,67438087,134810123,202182159 -db 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 -db 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 -db 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -db 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -db 62,0 -align 16 -L$006unrolled: - lea esp,[esp-96] - mov eax,DWORD [esi] - mov ebp,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov ebx,DWORD [12+esi] - mov DWORD [4+esp],ebp - xor ebp,ecx - mov DWORD [8+esp],ecx - mov DWORD [12+esp],ebx - mov edx,DWORD [16+esi] - mov ebx,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov esi,DWORD [28+esi] - mov DWORD [20+esp],ebx - mov DWORD [24+esp],ecx - mov DWORD [28+esp],esi - jmp NEAR L$009grand_loop -align 16 -L$009grand_loop: - mov ebx,DWORD [edi] - mov ecx,DWORD [4+edi] - bswap ebx - mov esi,DWORD [8+edi] - bswap ecx - mov DWORD [32+esp],ebx - bswap esi - mov DWORD [36+esp],ecx - mov DWORD [40+esp],esi - mov ebx,DWORD [12+edi] - mov ecx,DWORD [16+edi] - bswap ebx - mov esi,DWORD [20+edi] - bswap ecx - mov DWORD [44+esp],ebx - bswap esi - mov DWORD [48+esp],ecx - mov DWORD [52+esp],esi - mov ebx,DWORD [24+edi] - mov ecx,DWORD [28+edi] - bswap ebx - mov esi,DWORD [32+edi] - bswap ecx - mov DWORD [56+esp],ebx - bswap esi - mov DWORD [60+esp],ecx - mov DWORD [64+esp],esi - mov ebx,DWORD [36+edi] - mov ecx,DWORD [40+edi] - bswap ebx - mov esi,DWORD [44+edi] - bswap ecx - mov DWORD [68+esp],ebx - bswap esi - mov DWORD [72+esp],ecx - mov DWORD [76+esp],esi - mov ebx,DWORD [48+edi] - mov ecx,DWORD [52+edi] - bswap ebx - mov esi,DWORD [56+edi] - bswap ecx - mov DWORD [80+esp],ebx - bswap esi - mov DWORD [84+esp],ecx - mov DWORD [88+esp],esi - mov ebx,DWORD [60+edi] - add edi,64 - bswap ebx - mov DWORD [100+esp],edi - mov DWORD [92+esp],ebx - mov ecx,edx - mov esi,DWORD [20+esp] - ror edx,14 - mov edi,DWORD [24+esp] - xor edx,ecx - mov ebx,DWORD [32+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1116352408+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [16+esp] - ror edx,14 - mov edi,DWORD [20+esp] - xor edx,esi - mov ebx,DWORD [36+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1899447441+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [12+esp] - ror edx,14 - mov edi,DWORD [16+esp] - xor edx,ecx - mov ebx,DWORD [40+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3049323471+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [8+esp] - ror edx,14 - mov edi,DWORD [12+esp] - xor edx,esi - mov ebx,DWORD [44+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3921009573+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [4+esp] - ror edx,14 - mov edi,DWORD [8+esp] - xor edx,ecx - mov ebx,DWORD [48+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[961987163+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [esp] - ror edx,14 - mov edi,DWORD [4+esp] - xor edx,esi - mov ebx,DWORD [52+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1508970993+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [28+esp] - ror edx,14 - mov edi,DWORD [esp] - xor edx,ecx - mov ebx,DWORD [56+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2453635748+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [24+esp] - ror edx,14 - mov edi,DWORD [28+esp] - xor edx,esi - mov ebx,DWORD [60+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2870763221+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [20+esp] - ror edx,14 - mov edi,DWORD [24+esp] - xor edx,ecx - mov ebx,DWORD [64+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3624381080+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [16+esp] - ror edx,14 - mov edi,DWORD [20+esp] - xor edx,esi - mov ebx,DWORD [68+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[310598401+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [12+esp] - ror edx,14 - mov edi,DWORD [16+esp] - xor edx,ecx - mov ebx,DWORD [72+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[607225278+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [8+esp] - ror edx,14 - mov edi,DWORD [12+esp] - xor edx,esi - mov ebx,DWORD [76+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1426881987+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [4+esp] - ror edx,14 - mov edi,DWORD [8+esp] - xor edx,ecx - mov ebx,DWORD [80+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1925078388+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [esp] - ror edx,14 - mov edi,DWORD [4+esp] - xor edx,esi - mov ebx,DWORD [84+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2162078206+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov ecx,edx - mov esi,DWORD [28+esp] - ror edx,14 - mov edi,DWORD [esp] - xor edx,ecx - mov ebx,DWORD [88+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2614888103+edx*1+ebx] - xor ecx,esi - xor ebp,edi - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov esi,edx - mov ecx,DWORD [24+esp] - ror edx,14 - mov edi,DWORD [28+esp] - xor edx,esi - mov ebx,DWORD [92+esp] - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3248222580+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [36+esp] - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [88+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [32+esp] - shr edi,10 - add ebx,DWORD [68+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [20+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [24+esp] - xor edx,ecx - mov DWORD [32+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3835390401+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [40+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov ecx,DWORD [92+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [36+esp] - shr edi,10 - add ebx,DWORD [72+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [16+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [20+esp] - xor edx,esi - mov DWORD [36+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[4022224774+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [44+esp] - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov esi,DWORD [32+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [40+esp] - shr edi,10 - add ebx,DWORD [76+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [12+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [16+esp] - xor edx,ecx - mov DWORD [40+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[264347078+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [48+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov ecx,DWORD [36+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [44+esp] - shr edi,10 - add ebx,DWORD [80+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [8+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [12+esp] - xor edx,esi - mov DWORD [44+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[604807628+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [52+esp] - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov esi,DWORD [40+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [48+esp] - shr edi,10 - add ebx,DWORD [84+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [4+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [8+esp] - xor edx,ecx - mov DWORD [48+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[770255983+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [56+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov ecx,DWORD [44+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [52+esp] - shr edi,10 - add ebx,DWORD [88+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [4+esp] - xor edx,esi - mov DWORD [52+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1249150122+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [60+esp] - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov esi,DWORD [48+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [56+esp] - shr edi,10 - add ebx,DWORD [92+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [28+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [esp] - xor edx,ecx - mov DWORD [56+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1555081692+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [64+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov ecx,DWORD [52+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [60+esp] - shr edi,10 - add ebx,DWORD [32+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [24+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor edx,esi - mov DWORD [60+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1996064986+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [68+esp] - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [56+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [64+esp] - shr edi,10 - add ebx,DWORD [36+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [20+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [24+esp] - xor edx,ecx - mov DWORD [64+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2554220882+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [72+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov ecx,DWORD [60+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [68+esp] - shr edi,10 - add ebx,DWORD [40+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [16+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [20+esp] - xor edx,esi - mov DWORD [68+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2821834349+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [76+esp] - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov esi,DWORD [64+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [72+esp] - shr edi,10 - add ebx,DWORD [44+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [12+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [16+esp] - xor edx,ecx - mov DWORD [72+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2952996808+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [80+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov ecx,DWORD [68+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [76+esp] - shr edi,10 - add ebx,DWORD [48+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [8+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [12+esp] - xor edx,esi - mov DWORD [76+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3210313671+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [84+esp] - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov esi,DWORD [72+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [80+esp] - shr edi,10 - add ebx,DWORD [52+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [4+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [8+esp] - xor edx,ecx - mov DWORD [80+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3336571891+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [88+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov ecx,DWORD [76+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [84+esp] - shr edi,10 - add ebx,DWORD [56+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [4+esp] - xor edx,esi - mov DWORD [84+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3584528711+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [92+esp] - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov esi,DWORD [80+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [88+esp] - shr edi,10 - add ebx,DWORD [60+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [28+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [esp] - xor edx,ecx - mov DWORD [88+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[113926993+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [32+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov ecx,DWORD [84+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [92+esp] - shr edi,10 - add ebx,DWORD [64+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [24+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor edx,esi - mov DWORD [92+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[338241895+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [36+esp] - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [88+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [32+esp] - shr edi,10 - add ebx,DWORD [68+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [20+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [24+esp] - xor edx,ecx - mov DWORD [32+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[666307205+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [40+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov ecx,DWORD [92+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [36+esp] - shr edi,10 - add ebx,DWORD [72+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [16+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [20+esp] - xor edx,esi - mov DWORD [36+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[773529912+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [44+esp] - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov esi,DWORD [32+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [40+esp] - shr edi,10 - add ebx,DWORD [76+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [12+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [16+esp] - xor edx,ecx - mov DWORD [40+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1294757372+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [48+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov ecx,DWORD [36+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [44+esp] - shr edi,10 - add ebx,DWORD [80+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [8+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [12+esp] - xor edx,esi - mov DWORD [44+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1396182291+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [52+esp] - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov esi,DWORD [40+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [48+esp] - shr edi,10 - add ebx,DWORD [84+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [4+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [8+esp] - xor edx,ecx - mov DWORD [48+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1695183700+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [56+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov ecx,DWORD [44+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [52+esp] - shr edi,10 - add ebx,DWORD [88+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [4+esp] - xor edx,esi - mov DWORD [52+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1986661051+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [60+esp] - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov esi,DWORD [48+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [56+esp] - shr edi,10 - add ebx,DWORD [92+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [28+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [esp] - xor edx,ecx - mov DWORD [56+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2177026350+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [64+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov ecx,DWORD [52+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [60+esp] - shr edi,10 - add ebx,DWORD [32+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [24+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor edx,esi - mov DWORD [60+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2456956037+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [68+esp] - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [56+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [64+esp] - shr edi,10 - add ebx,DWORD [36+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [20+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [24+esp] - xor edx,ecx - mov DWORD [64+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2730485921+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [72+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov ecx,DWORD [60+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [68+esp] - shr edi,10 - add ebx,DWORD [40+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [16+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [20+esp] - xor edx,esi - mov DWORD [68+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2820302411+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [76+esp] - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov esi,DWORD [64+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [72+esp] - shr edi,10 - add ebx,DWORD [44+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [12+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [16+esp] - xor edx,ecx - mov DWORD [72+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3259730800+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [80+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov ecx,DWORD [68+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [76+esp] - shr edi,10 - add ebx,DWORD [48+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [8+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [12+esp] - xor edx,esi - mov DWORD [76+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3345764771+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [84+esp] - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov esi,DWORD [72+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [80+esp] - shr edi,10 - add ebx,DWORD [52+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [4+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [8+esp] - xor edx,ecx - mov DWORD [80+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3516065817+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [88+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov ecx,DWORD [76+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [84+esp] - shr edi,10 - add ebx,DWORD [56+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [4+esp] - xor edx,esi - mov DWORD [84+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3600352804+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [92+esp] - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov esi,DWORD [80+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [88+esp] - shr edi,10 - add ebx,DWORD [60+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [28+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [esp] - xor edx,ecx - mov DWORD [88+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[4094571909+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [32+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov ecx,DWORD [84+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [92+esp] - shr edi,10 - add ebx,DWORD [64+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [24+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor edx,esi - mov DWORD [92+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[275423344+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [36+esp] - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [88+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [32+esp] - shr edi,10 - add ebx,DWORD [68+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [20+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [24+esp] - xor edx,ecx - mov DWORD [32+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[430227734+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [40+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov ecx,DWORD [92+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [36+esp] - shr edi,10 - add ebx,DWORD [72+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [16+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [20+esp] - xor edx,esi - mov DWORD [36+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[506948616+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [44+esp] - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov esi,DWORD [32+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [40+esp] - shr edi,10 - add ebx,DWORD [76+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [12+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [16+esp] - xor edx,ecx - mov DWORD [40+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[659060556+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [48+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov ecx,DWORD [36+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [44+esp] - shr edi,10 - add ebx,DWORD [80+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [8+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [12+esp] - xor edx,esi - mov DWORD [44+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[883997877+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [52+esp] - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov esi,DWORD [40+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [48+esp] - shr edi,10 - add ebx,DWORD [84+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [4+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [8+esp] - xor edx,ecx - mov DWORD [48+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[958139571+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [56+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov ecx,DWORD [44+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [52+esp] - shr edi,10 - add ebx,DWORD [88+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [4+esp] - xor edx,esi - mov DWORD [52+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1322822218+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [60+esp] - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov esi,DWORD [48+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [56+esp] - shr edi,10 - add ebx,DWORD [92+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [28+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [esp] - xor edx,ecx - mov DWORD [56+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1537002063+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [64+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov ecx,DWORD [52+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [60+esp] - shr edi,10 - add ebx,DWORD [32+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [24+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor edx,esi - mov DWORD [60+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[1747873779+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [68+esp] - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [56+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [64+esp] - shr edi,10 - add ebx,DWORD [36+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [20+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [24+esp] - xor edx,ecx - mov DWORD [64+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - add ebx,DWORD [28+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [4+esp] - xor ecx,eax - mov DWORD [esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[1955562222+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [72+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [12+esp] - add ebp,ecx - mov ecx,DWORD [60+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [68+esp] - shr edi,10 - add ebx,DWORD [40+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [16+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [20+esp] - xor edx,esi - mov DWORD [68+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [12+esp],esi - xor edx,esi - add ebx,DWORD [24+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [esp] - xor esi,ebp - mov DWORD [28+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2024104815+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [76+esp] - ror esi,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,esi - mov esi,DWORD [64+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [72+esp] - shr edi,10 - add ebx,DWORD [44+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [12+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [16+esp] - xor edx,ecx - mov DWORD [72+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - add ebx,DWORD [20+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [28+esp] - xor ecx,eax - mov DWORD [24+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2227730452+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [80+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [4+esp] - add ebp,ecx - mov ecx,DWORD [68+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [76+esp] - shr edi,10 - add ebx,DWORD [48+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [8+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [12+esp] - xor edx,esi - mov DWORD [76+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [4+esp],esi - xor edx,esi - add ebx,DWORD [16+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [24+esp] - xor esi,ebp - mov DWORD [20+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2361852424+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [84+esp] - ror esi,2 - add eax,edx - add edx,DWORD [esp] - add eax,esi - mov esi,DWORD [72+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [80+esp] - shr edi,10 - add ebx,DWORD [52+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [4+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [8+esp] - xor edx,ecx - mov DWORD [80+esp],ebx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - add ebx,DWORD [12+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [20+esp] - xor ecx,eax - mov DWORD [16+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[2428436474+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [88+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [28+esp] - add ebp,ecx - mov ecx,DWORD [76+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [84+esp] - shr edi,10 - add ebx,DWORD [56+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [4+esp] - xor edx,esi - mov DWORD [84+esp],ebx - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [28+esp],esi - xor edx,esi - add ebx,DWORD [8+esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [16+esp] - xor esi,ebp - mov DWORD [12+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[2756734187+edx*1+ebx] - xor esi,ecx - xor eax,edi - mov ecx,DWORD [92+esp] - ror esi,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,esi - mov esi,DWORD [80+esp] - mov ebx,ecx - ror ecx,11 - mov edi,esi - ror esi,2 - xor ecx,ebx - shr ebx,3 - ror ecx,7 - xor esi,edi - xor ebx,ecx - ror esi,17 - add ebx,DWORD [88+esp] - shr edi,10 - add ebx,DWORD [60+esp] - mov ecx,edx - xor edi,esi - mov esi,DWORD [28+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [esp] - xor edx,ecx - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - add ebx,DWORD [4+esp] - xor edi,esi - ror edx,6 - mov ecx,eax - add ebx,edi - ror ecx,9 - mov esi,eax - mov edi,DWORD [12+esp] - xor ecx,eax - mov DWORD [8+esp],eax - xor eax,edi - ror ecx,11 - and ebp,eax - lea edx,[3204031479+edx*1+ebx] - xor ecx,esi - xor ebp,edi - mov esi,DWORD [32+esp] - ror ecx,2 - add ebp,edx - add edx,DWORD [20+esp] - add ebp,ecx - mov ecx,DWORD [84+esp] - mov ebx,esi - ror esi,11 - mov edi,ecx - ror ecx,2 - xor esi,ebx - shr ebx,3 - ror esi,7 - xor ecx,edi - xor ebx,esi - ror ecx,17 - add ebx,DWORD [92+esp] - shr edi,10 - add ebx,DWORD [64+esp] - mov esi,edx - xor edi,ecx - mov ecx,DWORD [24+esp] - ror edx,14 - add ebx,edi - mov edi,DWORD [28+esp] - xor edx,esi - xor ecx,edi - ror edx,5 - and ecx,esi - mov DWORD [20+esp],esi - xor edx,esi - add ebx,DWORD [esp] - xor edi,ecx - ror edx,6 - mov esi,ebp - add ebx,edi - ror esi,9 - mov ecx,ebp - mov edi,DWORD [8+esp] - xor esi,ebp - mov DWORD [4+esp],ebp - xor ebp,edi - ror esi,11 - and eax,ebp - lea edx,[3329325298+edx*1+ebx] - xor esi,ecx - xor eax,edi - ror esi,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,esi - mov esi,DWORD [96+esp] - xor ebp,edi - mov ecx,DWORD [12+esp] - add eax,DWORD [esi] - add ebp,DWORD [4+esi] - add edi,DWORD [8+esi] - add ecx,DWORD [12+esi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebp - mov DWORD [8+esi],edi - mov DWORD [12+esi],ecx - mov DWORD [4+esp],ebp - xor ebp,edi - mov DWORD [8+esp],edi - mov DWORD [12+esp],ecx - mov edi,DWORD [20+esp] - mov ebx,DWORD [24+esp] - mov ecx,DWORD [28+esp] - add edx,DWORD [16+esi] - add edi,DWORD [20+esi] - add ebx,DWORD [24+esi] - add ecx,DWORD [28+esi] - mov DWORD [16+esi],edx - mov DWORD [20+esi],edi - mov DWORD [24+esi],ebx - mov DWORD [28+esi],ecx - mov DWORD [20+esp],edi - mov edi,DWORD [100+esp] - mov DWORD [24+esp],ebx - mov DWORD [28+esp],ecx - cmp edi,DWORD [104+esp] - jb NEAR L$009grand_loop - mov esp,DWORD [108+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -align 32 -L$005SSSE3: - lea esp,[esp-96] - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edi,DWORD [12+esi] - mov DWORD [4+esp],ebx - xor ebx,ecx - mov DWORD [8+esp],ecx - mov DWORD [12+esp],edi - mov edx,DWORD [16+esi] - mov edi,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov esi,DWORD [28+esi] - mov DWORD [20+esp],edi - mov edi,DWORD [100+esp] - mov DWORD [24+esp],ecx - mov DWORD [28+esp],esi - movdqa xmm7,[256+ebp] - jmp NEAR L$010grand_ssse3 -align 16 -L$010grand_ssse3: - movdqu xmm0,[edi] - movdqu xmm1,[16+edi] - movdqu xmm2,[32+edi] - movdqu xmm3,[48+edi] - add edi,64 -db 102,15,56,0,199 - mov DWORD [100+esp],edi -db 102,15,56,0,207 - movdqa xmm4,[ebp] -db 102,15,56,0,215 - movdqa xmm5,[16+ebp] - paddd xmm4,xmm0 -db 102,15,56,0,223 - movdqa xmm6,[32+ebp] - paddd xmm5,xmm1 - movdqa xmm7,[48+ebp] - movdqa [32+esp],xmm4 - paddd xmm6,xmm2 - movdqa [48+esp],xmm5 - paddd xmm7,xmm3 - movdqa [64+esp],xmm6 - movdqa [80+esp],xmm7 - jmp NEAR L$011ssse3_00_47 -align 16 -L$011ssse3_00_47: - add ebp,64 - mov ecx,edx - movdqa xmm4,xmm1 - ror edx,14 - mov esi,DWORD [20+esp] - movdqa xmm7,xmm3 - xor edx,ecx - mov edi,DWORD [24+esp] -db 102,15,58,15,224,4 - xor esi,edi - ror edx,5 - and esi,ecx -db 102,15,58,15,250,4 - mov DWORD [16+esp],ecx - xor edx,ecx - xor edi,esi - movdqa xmm5,xmm4 - ror edx,6 - mov ecx,eax - movdqa xmm6,xmm4 - add edx,edi - mov edi,DWORD [4+esp] - psrld xmm4,3 - mov esi,eax - ror ecx,9 - paddd xmm0,xmm7 - mov DWORD [esp],eax - xor ecx,eax - psrld xmm6,7 - xor eax,edi - add edx,DWORD [28+esp] - ror ecx,11 - and ebx,eax - pshufd xmm7,xmm3,250 - xor ecx,esi - add edx,DWORD [32+esp] - pslld xmm5,14 - xor ebx,edi - ror ecx,2 - pxor xmm4,xmm6 - add ebx,edx - add edx,DWORD [12+esp] - psrld xmm6,11 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm4,xmm5 - mov esi,DWORD [16+esp] - xor edx,ecx - pslld xmm5,11 - mov edi,DWORD [20+esp] - xor esi,edi - ror edx,5 - pxor xmm4,xmm6 - and esi,ecx - mov DWORD [12+esp],ecx - movdqa xmm6,xmm7 - xor edx,ecx - xor edi,esi - ror edx,6 - pxor xmm4,xmm5 - mov ecx,ebx - add edx,edi - psrld xmm7,10 - mov edi,DWORD [esp] - mov esi,ebx - ror ecx,9 - paddd xmm0,xmm4 - mov DWORD [28+esp],ebx - xor ecx,ebx - psrlq xmm6,17 - xor ebx,edi - add edx,DWORD [24+esp] - ror ecx,11 - pxor xmm7,xmm6 - and eax,ebx - xor ecx,esi - psrlq xmm6,2 - add edx,DWORD [36+esp] - xor eax,edi - ror ecx,2 - pxor xmm7,xmm6 - add eax,edx - add edx,DWORD [8+esp] - pshufd xmm7,xmm7,128 - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [12+esp] - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - ror edx,5 - and esi,ecx - psrldq xmm7,8 - mov DWORD [8+esp],ecx - xor edx,ecx - xor edi,esi - paddd xmm0,xmm7 - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - mov esi,eax - ror ecx,9 - mov DWORD [24+esp],eax - pshufd xmm7,xmm0,80 - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - movdqa xmm6,xmm7 - ror ecx,11 - psrld xmm7,10 - and ebx,eax - psrlq xmm6,17 - xor ecx,esi - add edx,DWORD [40+esp] - xor ebx,edi - ror ecx,2 - pxor xmm7,xmm6 - add ebx,edx - add edx,DWORD [4+esp] - psrlq xmm6,2 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm7,xmm6 - mov esi,DWORD [8+esp] - xor edx,ecx - mov edi,DWORD [12+esp] - pshufd xmm7,xmm7,8 - xor esi,edi - ror edx,5 - movdqa xmm6,[ebp] - and esi,ecx - mov DWORD [4+esp],ecx - pslldq xmm7,8 - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - ror ecx,9 - paddd xmm0,xmm7 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - paddd xmm6,xmm0 - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [44+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - movdqa [32+esp],xmm6 - mov ecx,edx - movdqa xmm4,xmm2 - ror edx,14 - mov esi,DWORD [4+esp] - movdqa xmm7,xmm0 - xor edx,ecx - mov edi,DWORD [8+esp] -db 102,15,58,15,225,4 - xor esi,edi - ror edx,5 - and esi,ecx -db 102,15,58,15,251,4 - mov DWORD [esp],ecx - xor edx,ecx - xor edi,esi - movdqa xmm5,xmm4 - ror edx,6 - mov ecx,eax - movdqa xmm6,xmm4 - add edx,edi - mov edi,DWORD [20+esp] - psrld xmm4,3 - mov esi,eax - ror ecx,9 - paddd xmm1,xmm7 - mov DWORD [16+esp],eax - xor ecx,eax - psrld xmm6,7 - xor eax,edi - add edx,DWORD [12+esp] - ror ecx,11 - and ebx,eax - pshufd xmm7,xmm0,250 - xor ecx,esi - add edx,DWORD [48+esp] - pslld xmm5,14 - xor ebx,edi - ror ecx,2 - pxor xmm4,xmm6 - add ebx,edx - add edx,DWORD [28+esp] - psrld xmm6,11 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm4,xmm5 - mov esi,DWORD [esp] - xor edx,ecx - pslld xmm5,11 - mov edi,DWORD [4+esp] - xor esi,edi - ror edx,5 - pxor xmm4,xmm6 - and esi,ecx - mov DWORD [28+esp],ecx - movdqa xmm6,xmm7 - xor edx,ecx - xor edi,esi - ror edx,6 - pxor xmm4,xmm5 - mov ecx,ebx - add edx,edi - psrld xmm7,10 - mov edi,DWORD [16+esp] - mov esi,ebx - ror ecx,9 - paddd xmm1,xmm4 - mov DWORD [12+esp],ebx - xor ecx,ebx - psrlq xmm6,17 - xor ebx,edi - add edx,DWORD [8+esp] - ror ecx,11 - pxor xmm7,xmm6 - and eax,ebx - xor ecx,esi - psrlq xmm6,2 - add edx,DWORD [52+esp] - xor eax,edi - ror ecx,2 - pxor xmm7,xmm6 - add eax,edx - add edx,DWORD [24+esp] - pshufd xmm7,xmm7,128 - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [28+esp] - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - ror edx,5 - and esi,ecx - psrldq xmm7,8 - mov DWORD [24+esp],ecx - xor edx,ecx - xor edi,esi - paddd xmm1,xmm7 - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - mov esi,eax - ror ecx,9 - mov DWORD [8+esp],eax - pshufd xmm7,xmm1,80 - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - movdqa xmm6,xmm7 - ror ecx,11 - psrld xmm7,10 - and ebx,eax - psrlq xmm6,17 - xor ecx,esi - add edx,DWORD [56+esp] - xor ebx,edi - ror ecx,2 - pxor xmm7,xmm6 - add ebx,edx - add edx,DWORD [20+esp] - psrlq xmm6,2 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm7,xmm6 - mov esi,DWORD [24+esp] - xor edx,ecx - mov edi,DWORD [28+esp] - pshufd xmm7,xmm7,8 - xor esi,edi - ror edx,5 - movdqa xmm6,[16+ebp] - and esi,ecx - mov DWORD [20+esp],ecx - pslldq xmm7,8 - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - ror ecx,9 - paddd xmm1,xmm7 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - paddd xmm6,xmm1 - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [60+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - movdqa [48+esp],xmm6 - mov ecx,edx - movdqa xmm4,xmm3 - ror edx,14 - mov esi,DWORD [20+esp] - movdqa xmm7,xmm1 - xor edx,ecx - mov edi,DWORD [24+esp] -db 102,15,58,15,226,4 - xor esi,edi - ror edx,5 - and esi,ecx -db 102,15,58,15,248,4 - mov DWORD [16+esp],ecx - xor edx,ecx - xor edi,esi - movdqa xmm5,xmm4 - ror edx,6 - mov ecx,eax - movdqa xmm6,xmm4 - add edx,edi - mov edi,DWORD [4+esp] - psrld xmm4,3 - mov esi,eax - ror ecx,9 - paddd xmm2,xmm7 - mov DWORD [esp],eax - xor ecx,eax - psrld xmm6,7 - xor eax,edi - add edx,DWORD [28+esp] - ror ecx,11 - and ebx,eax - pshufd xmm7,xmm1,250 - xor ecx,esi - add edx,DWORD [64+esp] - pslld xmm5,14 - xor ebx,edi - ror ecx,2 - pxor xmm4,xmm6 - add ebx,edx - add edx,DWORD [12+esp] - psrld xmm6,11 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm4,xmm5 - mov esi,DWORD [16+esp] - xor edx,ecx - pslld xmm5,11 - mov edi,DWORD [20+esp] - xor esi,edi - ror edx,5 - pxor xmm4,xmm6 - and esi,ecx - mov DWORD [12+esp],ecx - movdqa xmm6,xmm7 - xor edx,ecx - xor edi,esi - ror edx,6 - pxor xmm4,xmm5 - mov ecx,ebx - add edx,edi - psrld xmm7,10 - mov edi,DWORD [esp] - mov esi,ebx - ror ecx,9 - paddd xmm2,xmm4 - mov DWORD [28+esp],ebx - xor ecx,ebx - psrlq xmm6,17 - xor ebx,edi - add edx,DWORD [24+esp] - ror ecx,11 - pxor xmm7,xmm6 - and eax,ebx - xor ecx,esi - psrlq xmm6,2 - add edx,DWORD [68+esp] - xor eax,edi - ror ecx,2 - pxor xmm7,xmm6 - add eax,edx - add edx,DWORD [8+esp] - pshufd xmm7,xmm7,128 - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [12+esp] - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - ror edx,5 - and esi,ecx - psrldq xmm7,8 - mov DWORD [8+esp],ecx - xor edx,ecx - xor edi,esi - paddd xmm2,xmm7 - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - mov esi,eax - ror ecx,9 - mov DWORD [24+esp],eax - pshufd xmm7,xmm2,80 - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - movdqa xmm6,xmm7 - ror ecx,11 - psrld xmm7,10 - and ebx,eax - psrlq xmm6,17 - xor ecx,esi - add edx,DWORD [72+esp] - xor ebx,edi - ror ecx,2 - pxor xmm7,xmm6 - add ebx,edx - add edx,DWORD [4+esp] - psrlq xmm6,2 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm7,xmm6 - mov esi,DWORD [8+esp] - xor edx,ecx - mov edi,DWORD [12+esp] - pshufd xmm7,xmm7,8 - xor esi,edi - ror edx,5 - movdqa xmm6,[32+ebp] - and esi,ecx - mov DWORD [4+esp],ecx - pslldq xmm7,8 - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - ror ecx,9 - paddd xmm2,xmm7 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - paddd xmm6,xmm2 - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [76+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - movdqa [64+esp],xmm6 - mov ecx,edx - movdqa xmm4,xmm0 - ror edx,14 - mov esi,DWORD [4+esp] - movdqa xmm7,xmm2 - xor edx,ecx - mov edi,DWORD [8+esp] -db 102,15,58,15,227,4 - xor esi,edi - ror edx,5 - and esi,ecx -db 102,15,58,15,249,4 - mov DWORD [esp],ecx - xor edx,ecx - xor edi,esi - movdqa xmm5,xmm4 - ror edx,6 - mov ecx,eax - movdqa xmm6,xmm4 - add edx,edi - mov edi,DWORD [20+esp] - psrld xmm4,3 - mov esi,eax - ror ecx,9 - paddd xmm3,xmm7 - mov DWORD [16+esp],eax - xor ecx,eax - psrld xmm6,7 - xor eax,edi - add edx,DWORD [12+esp] - ror ecx,11 - and ebx,eax - pshufd xmm7,xmm2,250 - xor ecx,esi - add edx,DWORD [80+esp] - pslld xmm5,14 - xor ebx,edi - ror ecx,2 - pxor xmm4,xmm6 - add ebx,edx - add edx,DWORD [28+esp] - psrld xmm6,11 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm4,xmm5 - mov esi,DWORD [esp] - xor edx,ecx - pslld xmm5,11 - mov edi,DWORD [4+esp] - xor esi,edi - ror edx,5 - pxor xmm4,xmm6 - and esi,ecx - mov DWORD [28+esp],ecx - movdqa xmm6,xmm7 - xor edx,ecx - xor edi,esi - ror edx,6 - pxor xmm4,xmm5 - mov ecx,ebx - add edx,edi - psrld xmm7,10 - mov edi,DWORD [16+esp] - mov esi,ebx - ror ecx,9 - paddd xmm3,xmm4 - mov DWORD [12+esp],ebx - xor ecx,ebx - psrlq xmm6,17 - xor ebx,edi - add edx,DWORD [8+esp] - ror ecx,11 - pxor xmm7,xmm6 - and eax,ebx - xor ecx,esi - psrlq xmm6,2 - add edx,DWORD [84+esp] - xor eax,edi - ror ecx,2 - pxor xmm7,xmm6 - add eax,edx - add edx,DWORD [24+esp] - pshufd xmm7,xmm7,128 - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [28+esp] - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - ror edx,5 - and esi,ecx - psrldq xmm7,8 - mov DWORD [24+esp],ecx - xor edx,ecx - xor edi,esi - paddd xmm3,xmm7 - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - mov esi,eax - ror ecx,9 - mov DWORD [8+esp],eax - pshufd xmm7,xmm3,80 - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - movdqa xmm6,xmm7 - ror ecx,11 - psrld xmm7,10 - and ebx,eax - psrlq xmm6,17 - xor ecx,esi - add edx,DWORD [88+esp] - xor ebx,edi - ror ecx,2 - pxor xmm7,xmm6 - add ebx,edx - add edx,DWORD [20+esp] - psrlq xmm6,2 - add ebx,ecx - mov ecx,edx - ror edx,14 - pxor xmm7,xmm6 - mov esi,DWORD [24+esp] - xor edx,ecx - mov edi,DWORD [28+esp] - pshufd xmm7,xmm7,8 - xor esi,edi - ror edx,5 - movdqa xmm6,[48+ebp] - and esi,ecx - mov DWORD [20+esp],ecx - pslldq xmm7,8 - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - ror ecx,9 - paddd xmm3,xmm7 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - paddd xmm6,xmm3 - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [92+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - movdqa [80+esp],xmm6 - cmp DWORD [64+ebp],66051 - jne NEAR L$011ssse3_00_47 - mov ecx,edx - ror edx,14 - mov esi,DWORD [20+esp] - xor edx,ecx - mov edi,DWORD [24+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [4+esp] - mov esi,eax - ror ecx,9 - mov DWORD [esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [28+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [32+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [12+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [16+esp] - xor edx,ecx - mov edi,DWORD [20+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [12+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [esp] - mov esi,ebx - ror ecx,9 - mov DWORD [28+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [24+esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [36+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [12+esp] - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - mov esi,eax - ror ecx,9 - mov DWORD [24+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [40+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [4+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [8+esp] - xor edx,ecx - mov edi,DWORD [12+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [4+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - ror ecx,9 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [44+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [4+esp] - xor edx,ecx - mov edi,DWORD [8+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [20+esp] - mov esi,eax - ror ecx,9 - mov DWORD [16+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [12+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [48+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [28+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [esp] - xor edx,ecx - mov edi,DWORD [4+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [28+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [16+esp] - mov esi,ebx - ror ecx,9 - mov DWORD [12+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [8+esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [52+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [28+esp] - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - mov esi,eax - ror ecx,9 - mov DWORD [8+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [56+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [20+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [24+esp] - xor edx,ecx - mov edi,DWORD [28+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [20+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - ror ecx,9 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [60+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [20+esp] - xor edx,ecx - mov edi,DWORD [24+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [4+esp] - mov esi,eax - ror ecx,9 - mov DWORD [esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [28+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [64+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [12+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [16+esp] - xor edx,ecx - mov edi,DWORD [20+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [12+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [esp] - mov esi,ebx - ror ecx,9 - mov DWORD [28+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [24+esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [68+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [12+esp] - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - mov esi,eax - ror ecx,9 - mov DWORD [24+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [72+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [4+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [8+esp] - xor edx,ecx - mov edi,DWORD [12+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [4+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - ror ecx,9 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [76+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [4+esp] - xor edx,ecx - mov edi,DWORD [8+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [20+esp] - mov esi,eax - ror ecx,9 - mov DWORD [16+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [12+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [80+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [28+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [esp] - xor edx,ecx - mov edi,DWORD [4+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [28+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [16+esp] - mov esi,ebx - ror ecx,9 - mov DWORD [12+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [8+esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [84+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [28+esp] - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - mov esi,eax - ror ecx,9 - mov DWORD [8+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - ror ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [88+esp] - xor ebx,edi - ror ecx,2 - add ebx,edx - add edx,DWORD [20+esp] - add ebx,ecx - mov ecx,edx - ror edx,14 - mov esi,DWORD [24+esp] - xor edx,ecx - mov edi,DWORD [28+esp] - xor esi,edi - ror edx,5 - and esi,ecx - mov DWORD [20+esp],ecx - xor edx,ecx - xor edi,esi - ror edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - ror ecx,9 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - ror ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [92+esp] - xor eax,edi - ror ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - mov esi,DWORD [96+esp] - xor ebx,edi - mov ecx,DWORD [12+esp] - add eax,DWORD [esi] - add ebx,DWORD [4+esi] - add edi,DWORD [8+esi] - add ecx,DWORD [12+esi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],edi - mov DWORD [12+esi],ecx - mov DWORD [4+esp],ebx - xor ebx,edi - mov DWORD [8+esp],edi - mov DWORD [12+esp],ecx - mov edi,DWORD [20+esp] - mov ecx,DWORD [24+esp] - add edx,DWORD [16+esi] - add edi,DWORD [20+esi] - add ecx,DWORD [24+esi] - mov DWORD [16+esi],edx - mov DWORD [20+esi],edi - mov DWORD [20+esp],edi - mov edi,DWORD [28+esp] - mov DWORD [24+esi],ecx - add edi,DWORD [28+esi] - mov DWORD [24+esp],ecx - mov DWORD [28+esi],edi - mov DWORD [28+esp],edi - mov edi,DWORD [100+esp] - movdqa xmm7,[64+ebp] - sub ebp,192 - cmp edi,DWORD [104+esp] - jb NEAR L$010grand_ssse3 - mov esp,DWORD [108+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -align 32 -L$004AVX: - lea esp,[esp-96] - vzeroall - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edi,DWORD [12+esi] - mov DWORD [4+esp],ebx - xor ebx,ecx - mov DWORD [8+esp],ecx - mov DWORD [12+esp],edi - mov edx,DWORD [16+esi] - mov edi,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov esi,DWORD [28+esi] - mov DWORD [20+esp],edi - mov edi,DWORD [100+esp] - mov DWORD [24+esp],ecx - mov DWORD [28+esp],esi - vmovdqa xmm7,[256+ebp] - jmp NEAR L$012grand_avx -align 32 -L$012grand_avx: - vmovdqu xmm0,[edi] - vmovdqu xmm1,[16+edi] - vmovdqu xmm2,[32+edi] - vmovdqu xmm3,[48+edi] - add edi,64 - vpshufb xmm0,xmm0,xmm7 - mov DWORD [100+esp],edi - vpshufb xmm1,xmm1,xmm7 - vpshufb xmm2,xmm2,xmm7 - vpaddd xmm4,xmm0,[ebp] - vpshufb xmm3,xmm3,xmm7 - vpaddd xmm5,xmm1,[16+ebp] - vpaddd xmm6,xmm2,[32+ebp] - vpaddd xmm7,xmm3,[48+ebp] - vmovdqa [32+esp],xmm4 - vmovdqa [48+esp],xmm5 - vmovdqa [64+esp],xmm6 - vmovdqa [80+esp],xmm7 - jmp NEAR L$013avx_00_47 -align 16 -L$013avx_00_47: - add ebp,64 - vpalignr xmm4,xmm1,xmm0,4 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [20+esp] - vpalignr xmm7,xmm3,xmm2,4 - xor edx,ecx - mov edi,DWORD [24+esp] - xor esi,edi - vpsrld xmm6,xmm4,7 - shrd edx,edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - vpaddd xmm0,xmm0,xmm7 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrld xmm7,xmm4,3 - mov ecx,eax - add edx,edi - mov edi,DWORD [4+esp] - vpslld xmm5,xmm4,14 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [esp],eax - vpxor xmm4,xmm7,xmm6 - xor ecx,eax - xor eax,edi - add edx,DWORD [28+esp] - vpshufd xmm7,xmm3,250 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpsrld xmm6,xmm6,11 - add edx,DWORD [32+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpxor xmm4,xmm4,xmm5 - add ebx,edx - add edx,DWORD [12+esp] - add ebx,ecx - vpslld xmm5,xmm5,11 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [16+esp] - vpxor xmm4,xmm4,xmm6 - xor edx,ecx - mov edi,DWORD [20+esp] - xor esi,edi - vpsrld xmm6,xmm7,10 - shrd edx,edx,5 - and esi,ecx - mov DWORD [12+esp],ecx - vpxor xmm4,xmm4,xmm5 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,ebx - add edx,edi - mov edi,DWORD [esp] - vpaddd xmm0,xmm0,xmm4 - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [28+esp],ebx - vpxor xmm6,xmm6,xmm5 - xor ecx,ebx - xor ebx,edi - add edx,DWORD [24+esp] - vpsrlq xmm7,xmm7,19 - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - vpxor xmm6,xmm6,xmm7 - add edx,DWORD [36+esp] - xor eax,edi - shrd ecx,ecx,2 - vpshufd xmm7,xmm6,132 - add eax,edx - add edx,DWORD [8+esp] - add eax,ecx - vpsrldq xmm7,xmm7,8 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [12+esp] - vpaddd xmm0,xmm0,xmm7 - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - vpshufd xmm7,xmm0,80 - shrd edx,edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - vpsrld xmm6,xmm7,10 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - vpxor xmm6,xmm6,xmm5 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [24+esp],eax - vpsrlq xmm7,xmm7,19 - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - vpxor xmm6,xmm6,xmm7 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpshufd xmm7,xmm6,232 - add edx,DWORD [40+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpslldq xmm7,xmm7,8 - add ebx,edx - add edx,DWORD [4+esp] - add ebx,ecx - vpaddd xmm0,xmm0,xmm7 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [8+esp] - vpaddd xmm6,xmm0,[ebp] - xor edx,ecx - mov edi,DWORD [12+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [4+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [44+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - vmovdqa [32+esp],xmm6 - vpalignr xmm4,xmm2,xmm1,4 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [4+esp] - vpalignr xmm7,xmm0,xmm3,4 - xor edx,ecx - mov edi,DWORD [8+esp] - xor esi,edi - vpsrld xmm6,xmm4,7 - shrd edx,edx,5 - and esi,ecx - mov DWORD [esp],ecx - vpaddd xmm1,xmm1,xmm7 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrld xmm7,xmm4,3 - mov ecx,eax - add edx,edi - mov edi,DWORD [20+esp] - vpslld xmm5,xmm4,14 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [16+esp],eax - vpxor xmm4,xmm7,xmm6 - xor ecx,eax - xor eax,edi - add edx,DWORD [12+esp] - vpshufd xmm7,xmm0,250 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpsrld xmm6,xmm6,11 - add edx,DWORD [48+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpxor xmm4,xmm4,xmm5 - add ebx,edx - add edx,DWORD [28+esp] - add ebx,ecx - vpslld xmm5,xmm5,11 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [esp] - vpxor xmm4,xmm4,xmm6 - xor edx,ecx - mov edi,DWORD [4+esp] - xor esi,edi - vpsrld xmm6,xmm7,10 - shrd edx,edx,5 - and esi,ecx - mov DWORD [28+esp],ecx - vpxor xmm4,xmm4,xmm5 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,ebx - add edx,edi - mov edi,DWORD [16+esp] - vpaddd xmm1,xmm1,xmm4 - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [12+esp],ebx - vpxor xmm6,xmm6,xmm5 - xor ecx,ebx - xor ebx,edi - add edx,DWORD [8+esp] - vpsrlq xmm7,xmm7,19 - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - vpxor xmm6,xmm6,xmm7 - add edx,DWORD [52+esp] - xor eax,edi - shrd ecx,ecx,2 - vpshufd xmm7,xmm6,132 - add eax,edx - add edx,DWORD [24+esp] - add eax,ecx - vpsrldq xmm7,xmm7,8 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [28+esp] - vpaddd xmm1,xmm1,xmm7 - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - vpshufd xmm7,xmm1,80 - shrd edx,edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - vpsrld xmm6,xmm7,10 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - vpxor xmm6,xmm6,xmm5 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [8+esp],eax - vpsrlq xmm7,xmm7,19 - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - vpxor xmm6,xmm6,xmm7 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpshufd xmm7,xmm6,232 - add edx,DWORD [56+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpslldq xmm7,xmm7,8 - add ebx,edx - add edx,DWORD [20+esp] - add ebx,ecx - vpaddd xmm1,xmm1,xmm7 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [24+esp] - vpaddd xmm6,xmm1,[16+ebp] - xor edx,ecx - mov edi,DWORD [28+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [20+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [60+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - vmovdqa [48+esp],xmm6 - vpalignr xmm4,xmm3,xmm2,4 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [20+esp] - vpalignr xmm7,xmm1,xmm0,4 - xor edx,ecx - mov edi,DWORD [24+esp] - xor esi,edi - vpsrld xmm6,xmm4,7 - shrd edx,edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - vpaddd xmm2,xmm2,xmm7 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrld xmm7,xmm4,3 - mov ecx,eax - add edx,edi - mov edi,DWORD [4+esp] - vpslld xmm5,xmm4,14 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [esp],eax - vpxor xmm4,xmm7,xmm6 - xor ecx,eax - xor eax,edi - add edx,DWORD [28+esp] - vpshufd xmm7,xmm1,250 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpsrld xmm6,xmm6,11 - add edx,DWORD [64+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpxor xmm4,xmm4,xmm5 - add ebx,edx - add edx,DWORD [12+esp] - add ebx,ecx - vpslld xmm5,xmm5,11 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [16+esp] - vpxor xmm4,xmm4,xmm6 - xor edx,ecx - mov edi,DWORD [20+esp] - xor esi,edi - vpsrld xmm6,xmm7,10 - shrd edx,edx,5 - and esi,ecx - mov DWORD [12+esp],ecx - vpxor xmm4,xmm4,xmm5 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,ebx - add edx,edi - mov edi,DWORD [esp] - vpaddd xmm2,xmm2,xmm4 - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [28+esp],ebx - vpxor xmm6,xmm6,xmm5 - xor ecx,ebx - xor ebx,edi - add edx,DWORD [24+esp] - vpsrlq xmm7,xmm7,19 - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - vpxor xmm6,xmm6,xmm7 - add edx,DWORD [68+esp] - xor eax,edi - shrd ecx,ecx,2 - vpshufd xmm7,xmm6,132 - add eax,edx - add edx,DWORD [8+esp] - add eax,ecx - vpsrldq xmm7,xmm7,8 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [12+esp] - vpaddd xmm2,xmm2,xmm7 - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - vpshufd xmm7,xmm2,80 - shrd edx,edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - vpsrld xmm6,xmm7,10 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - vpxor xmm6,xmm6,xmm5 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [24+esp],eax - vpsrlq xmm7,xmm7,19 - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - vpxor xmm6,xmm6,xmm7 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpshufd xmm7,xmm6,232 - add edx,DWORD [72+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpslldq xmm7,xmm7,8 - add ebx,edx - add edx,DWORD [4+esp] - add ebx,ecx - vpaddd xmm2,xmm2,xmm7 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [8+esp] - vpaddd xmm6,xmm2,[32+ebp] - xor edx,ecx - mov edi,DWORD [12+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [4+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [76+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - vmovdqa [64+esp],xmm6 - vpalignr xmm4,xmm0,xmm3,4 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [4+esp] - vpalignr xmm7,xmm2,xmm1,4 - xor edx,ecx - mov edi,DWORD [8+esp] - xor esi,edi - vpsrld xmm6,xmm4,7 - shrd edx,edx,5 - and esi,ecx - mov DWORD [esp],ecx - vpaddd xmm3,xmm3,xmm7 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrld xmm7,xmm4,3 - mov ecx,eax - add edx,edi - mov edi,DWORD [20+esp] - vpslld xmm5,xmm4,14 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [16+esp],eax - vpxor xmm4,xmm7,xmm6 - xor ecx,eax - xor eax,edi - add edx,DWORD [12+esp] - vpshufd xmm7,xmm2,250 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpsrld xmm6,xmm6,11 - add edx,DWORD [80+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpxor xmm4,xmm4,xmm5 - add ebx,edx - add edx,DWORD [28+esp] - add ebx,ecx - vpslld xmm5,xmm5,11 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [esp] - vpxor xmm4,xmm4,xmm6 - xor edx,ecx - mov edi,DWORD [4+esp] - xor esi,edi - vpsrld xmm6,xmm7,10 - shrd edx,edx,5 - and esi,ecx - mov DWORD [28+esp],ecx - vpxor xmm4,xmm4,xmm5 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,ebx - add edx,edi - mov edi,DWORD [16+esp] - vpaddd xmm3,xmm3,xmm4 - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [12+esp],ebx - vpxor xmm6,xmm6,xmm5 - xor ecx,ebx - xor ebx,edi - add edx,DWORD [8+esp] - vpsrlq xmm7,xmm7,19 - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - vpxor xmm6,xmm6,xmm7 - add edx,DWORD [84+esp] - xor eax,edi - shrd ecx,ecx,2 - vpshufd xmm7,xmm6,132 - add eax,edx - add edx,DWORD [24+esp] - add eax,ecx - vpsrldq xmm7,xmm7,8 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [28+esp] - vpaddd xmm3,xmm3,xmm7 - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - vpshufd xmm7,xmm3,80 - shrd edx,edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - vpsrld xmm6,xmm7,10 - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - vpsrlq xmm5,xmm7,17 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - vpxor xmm6,xmm6,xmm5 - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [8+esp],eax - vpsrlq xmm7,xmm7,19 - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - vpxor xmm6,xmm6,xmm7 - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - vpshufd xmm7,xmm6,232 - add edx,DWORD [88+esp] - xor ebx,edi - shrd ecx,ecx,2 - vpslldq xmm7,xmm7,8 - add ebx,edx - add edx,DWORD [20+esp] - add ebx,ecx - vpaddd xmm3,xmm3,xmm7 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [24+esp] - vpaddd xmm6,xmm3,[48+ebp] - xor edx,ecx - mov edi,DWORD [28+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [20+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [92+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - vmovdqa [80+esp],xmm6 - cmp DWORD [64+ebp],66051 - jne NEAR L$013avx_00_47 - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [20+esp] - xor edx,ecx - mov edi,DWORD [24+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [4+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [28+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [32+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [12+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [16+esp] - xor edx,ecx - mov edi,DWORD [20+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [12+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [28+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [24+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [36+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [12+esp] - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [24+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [40+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [4+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [8+esp] - xor edx,ecx - mov edi,DWORD [12+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [4+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [44+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [4+esp] - xor edx,ecx - mov edi,DWORD [8+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [20+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [16+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [12+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [48+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [28+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [esp] - xor edx,ecx - mov edi,DWORD [4+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [28+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [16+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [12+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [8+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [52+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [28+esp] - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [8+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [56+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [20+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [24+esp] - xor edx,ecx - mov edi,DWORD [28+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [20+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [60+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [20+esp] - xor edx,ecx - mov edi,DWORD [24+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [16+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [4+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [28+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [64+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [12+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [16+esp] - xor edx,ecx - mov edi,DWORD [20+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [12+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [28+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [24+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [68+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [8+esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [12+esp] - xor edx,ecx - mov edi,DWORD [16+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [8+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [28+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [24+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [20+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [72+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [4+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [8+esp] - xor edx,ecx - mov edi,DWORD [12+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [4+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [24+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [20+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [16+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [76+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [4+esp] - xor edx,ecx - mov edi,DWORD [8+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [20+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [16+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [12+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [80+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [28+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [esp] - xor edx,ecx - mov edi,DWORD [4+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [28+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [16+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [12+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [8+esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [84+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [24+esp] - add eax,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [28+esp] - xor edx,ecx - mov edi,DWORD [esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [24+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,eax - add edx,edi - mov edi,DWORD [12+esp] - mov esi,eax - shrd ecx,ecx,9 - mov DWORD [8+esp],eax - xor ecx,eax - xor eax,edi - add edx,DWORD [4+esp] - shrd ecx,ecx,11 - and ebx,eax - xor ecx,esi - add edx,DWORD [88+esp] - xor ebx,edi - shrd ecx,ecx,2 - add ebx,edx - add edx,DWORD [20+esp] - add ebx,ecx - mov ecx,edx - shrd edx,edx,14 - mov esi,DWORD [24+esp] - xor edx,ecx - mov edi,DWORD [28+esp] - xor esi,edi - shrd edx,edx,5 - and esi,ecx - mov DWORD [20+esp],ecx - xor edx,ecx - xor edi,esi - shrd edx,edx,6 - mov ecx,ebx - add edx,edi - mov edi,DWORD [8+esp] - mov esi,ebx - shrd ecx,ecx,9 - mov DWORD [4+esp],ebx - xor ecx,ebx - xor ebx,edi - add edx,DWORD [esp] - shrd ecx,ecx,11 - and eax,ebx - xor ecx,esi - add edx,DWORD [92+esp] - xor eax,edi - shrd ecx,ecx,2 - add eax,edx - add edx,DWORD [16+esp] - add eax,ecx - mov esi,DWORD [96+esp] - xor ebx,edi - mov ecx,DWORD [12+esp] - add eax,DWORD [esi] - add ebx,DWORD [4+esi] - add edi,DWORD [8+esi] - add ecx,DWORD [12+esi] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - mov DWORD [8+esi],edi - mov DWORD [12+esi],ecx - mov DWORD [4+esp],ebx - xor ebx,edi - mov DWORD [8+esp],edi - mov DWORD [12+esp],ecx - mov edi,DWORD [20+esp] - mov ecx,DWORD [24+esp] - add edx,DWORD [16+esi] - add edi,DWORD [20+esi] - add ecx,DWORD [24+esi] - mov DWORD [16+esi],edx - mov DWORD [20+esi],edi - mov DWORD [20+esp],edi - mov edi,DWORD [28+esp] - mov DWORD [24+esi],ecx - add edi,DWORD [28+esi] - mov DWORD [24+esp],ecx - mov DWORD [28+esi],edi - mov DWORD [28+esp],edi - mov edi,DWORD [100+esp] - vmovdqa xmm7,[64+ebp] - sub ebp,192 - cmp edi,DWORD [104+esp] - jb NEAR L$012grand_avx - mov esp,DWORD [108+esp] - vzeroall - pop edi - pop esi - pop ebx - pop ebp - ret -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha512-586.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha512-586.asm deleted file mode 100644 index 3e6b0680bc..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/sha512-586.asm +++ /dev/null @@ -1,2849 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -;extern _OPENSSL_ia32cap_P -global _sha512_block_data_order -align 16 -_sha512_block_data_order: -L$_sha512_block_data_order_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov ebx,esp - call L$000pic_point -L$000pic_point: - pop ebp - lea ebp,[(L$001K512-L$000pic_point)+ebp] - sub esp,16 - and esp,-64 - shl eax,7 - add eax,edi - mov DWORD [esp],esi - mov DWORD [4+esp],edi - mov DWORD [8+esp],eax - mov DWORD [12+esp],ebx - lea edx,[_OPENSSL_ia32cap_P] - mov ecx,DWORD [edx] - test ecx,67108864 - jz NEAR L$002loop_x86 - mov edx,DWORD [4+edx] - movq mm0,[esi] - and ecx,16777216 - movq mm1,[8+esi] - and edx,512 - movq mm2,[16+esi] - or ecx,edx - movq mm3,[24+esi] - movq mm4,[32+esi] - movq mm5,[40+esi] - movq mm6,[48+esi] - movq mm7,[56+esi] - cmp ecx,16777728 - je NEAR L$003SSSE3 - sub esp,80 - jmp NEAR L$004loop_sse2 -align 16 -L$004loop_sse2: - movq [8+esp],mm1 - movq [16+esp],mm2 - movq [24+esp],mm3 - movq [40+esp],mm5 - movq [48+esp],mm6 - pxor mm2,mm1 - movq [56+esp],mm7 - movq mm3,mm0 - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - add edi,8 - mov edx,15 - bswap eax - bswap ebx - jmp NEAR L$00500_14_sse2 -align 16 -L$00500_14_sse2: - movd mm1,eax - mov eax,DWORD [edi] - movd mm7,ebx - mov ebx,DWORD [4+edi] - add edi,8 - bswap eax - bswap ebx - punpckldq mm7,mm1 - movq mm1,mm4 - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - movq mm0,mm3 - movq [72+esp],mm7 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - paddq mm7,[ebp] - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - sub esp,8 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[40+esp] - paddq mm3,mm2 - movq mm2,mm0 - add ebp,8 - paddq mm3,mm6 - movq mm6,[48+esp] - dec edx - jnz NEAR L$00500_14_sse2 - movd mm1,eax - movd mm7,ebx - punpckldq mm7,mm1 - movq mm1,mm4 - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - movq mm0,mm3 - movq [72+esp],mm7 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - paddq mm7,[ebp] - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - sub esp,8 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm7,[192+esp] - paddq mm3,mm2 - movq mm2,mm0 - add ebp,8 - paddq mm3,mm6 - pxor mm0,mm0 - mov edx,32 - jmp NEAR L$00616_79_sse2 -align 16 -L$00616_79_sse2: - movq mm5,[88+esp] - movq mm1,mm7 - psrlq mm7,1 - movq mm6,mm5 - psrlq mm5,6 - psllq mm1,56 - paddq mm0,mm3 - movq mm3,mm7 - psrlq mm7,6 - pxor mm3,mm1 - psllq mm1,7 - pxor mm3,mm7 - psrlq mm7,1 - pxor mm3,mm1 - movq mm1,mm5 - psrlq mm5,13 - pxor mm7,mm3 - psllq mm6,3 - pxor mm1,mm5 - paddq mm7,[200+esp] - pxor mm1,mm6 - psrlq mm5,42 - paddq mm7,[128+esp] - pxor mm1,mm5 - psllq mm6,42 - movq mm5,[40+esp] - pxor mm1,mm6 - movq mm6,[48+esp] - paddq mm7,mm1 - movq mm1,mm4 - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - movq [72+esp],mm7 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - paddq mm7,[ebp] - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - sub esp,8 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm7,[192+esp] - paddq mm2,mm6 - add ebp,8 - movq mm5,[88+esp] - movq mm1,mm7 - psrlq mm7,1 - movq mm6,mm5 - psrlq mm5,6 - psllq mm1,56 - paddq mm2,mm3 - movq mm3,mm7 - psrlq mm7,6 - pxor mm3,mm1 - psllq mm1,7 - pxor mm3,mm7 - psrlq mm7,1 - pxor mm3,mm1 - movq mm1,mm5 - psrlq mm5,13 - pxor mm7,mm3 - psllq mm6,3 - pxor mm1,mm5 - paddq mm7,[200+esp] - pxor mm1,mm6 - psrlq mm5,42 - paddq mm7,[128+esp] - pxor mm1,mm5 - psllq mm6,42 - movq mm5,[40+esp] - pxor mm1,mm6 - movq mm6,[48+esp] - paddq mm7,mm1 - movq mm1,mm4 - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - movq [72+esp],mm7 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - paddq mm7,[ebp] - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - sub esp,8 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm7,[192+esp] - paddq mm0,mm6 - add ebp,8 - dec edx - jnz NEAR L$00616_79_sse2 - paddq mm0,mm3 - movq mm1,[8+esp] - movq mm3,[24+esp] - movq mm5,[40+esp] - movq mm6,[48+esp] - movq mm7,[56+esp] - pxor mm2,mm1 - paddq mm0,[esi] - paddq mm1,[8+esi] - paddq mm2,[16+esi] - paddq mm3,[24+esi] - paddq mm4,[32+esi] - paddq mm5,[40+esi] - paddq mm6,[48+esi] - paddq mm7,[56+esi] - mov eax,640 - movq [esi],mm0 - movq [8+esi],mm1 - movq [16+esi],mm2 - movq [24+esi],mm3 - movq [32+esi],mm4 - movq [40+esi],mm5 - movq [48+esi],mm6 - movq [56+esi],mm7 - lea esp,[eax*1+esp] - sub ebp,eax - cmp edi,DWORD [88+esp] - jb NEAR L$004loop_sse2 - mov esp,DWORD [92+esp] - emms - pop edi - pop esi - pop ebx - pop ebp - ret -align 32 -L$003SSSE3: - lea edx,[esp-64] - sub esp,256 - movdqa xmm1,[640+ebp] - movdqu xmm0,[edi] -db 102,15,56,0,193 - movdqa xmm3,[ebp] - movdqa xmm2,xmm1 - movdqu xmm1,[16+edi] - paddq xmm3,xmm0 -db 102,15,56,0,202 - movdqa [edx-128],xmm3 - movdqa xmm4,[16+ebp] - movdqa xmm3,xmm2 - movdqu xmm2,[32+edi] - paddq xmm4,xmm1 -db 102,15,56,0,211 - movdqa [edx-112],xmm4 - movdqa xmm5,[32+ebp] - movdqa xmm4,xmm3 - movdqu xmm3,[48+edi] - paddq xmm5,xmm2 -db 102,15,56,0,220 - movdqa [edx-96],xmm5 - movdqa xmm6,[48+ebp] - movdqa xmm5,xmm4 - movdqu xmm4,[64+edi] - paddq xmm6,xmm3 -db 102,15,56,0,229 - movdqa [edx-80],xmm6 - movdqa xmm7,[64+ebp] - movdqa xmm6,xmm5 - movdqu xmm5,[80+edi] - paddq xmm7,xmm4 -db 102,15,56,0,238 - movdqa [edx-64],xmm7 - movdqa [edx],xmm0 - movdqa xmm0,[80+ebp] - movdqa xmm7,xmm6 - movdqu xmm6,[96+edi] - paddq xmm0,xmm5 -db 102,15,56,0,247 - movdqa [edx-48],xmm0 - movdqa [16+edx],xmm1 - movdqa xmm1,[96+ebp] - movdqa xmm0,xmm7 - movdqu xmm7,[112+edi] - paddq xmm1,xmm6 -db 102,15,56,0,248 - movdqa [edx-32],xmm1 - movdqa [32+edx],xmm2 - movdqa xmm2,[112+ebp] - movdqa xmm0,[edx] - paddq xmm2,xmm7 - movdqa [edx-16],xmm2 - nop -align 32 -L$007loop_ssse3: - movdqa xmm2,[16+edx] - movdqa [48+edx],xmm3 - lea ebp,[128+ebp] - movq [8+esp],mm1 - mov ebx,edi - movq [16+esp],mm2 - lea edi,[128+edi] - movq [24+esp],mm3 - cmp edi,eax - movq [40+esp],mm5 - cmovb ebx,edi - movq [48+esp],mm6 - mov ecx,4 - pxor mm2,mm1 - movq [56+esp],mm7 - pxor mm3,mm3 - jmp NEAR L$00800_47_ssse3 -align 32 -L$00800_47_ssse3: - movdqa xmm3,xmm5 - movdqa xmm1,xmm2 -db 102,15,58,15,208,8 - movdqa [edx],xmm4 -db 102,15,58,15,220,8 - movdqa xmm4,xmm2 - psrlq xmm2,7 - paddq xmm0,xmm3 - movdqa xmm3,xmm4 - psrlq xmm4,1 - psllq xmm3,56 - pxor xmm2,xmm4 - psrlq xmm4,7 - pxor xmm2,xmm3 - psllq xmm3,7 - pxor xmm2,xmm4 - movdqa xmm4,xmm7 - pxor xmm2,xmm3 - movdqa xmm3,xmm7 - psrlq xmm4,6 - paddq xmm0,xmm2 - movdqa xmm2,xmm7 - psrlq xmm3,19 - psllq xmm2,3 - pxor xmm4,xmm3 - psrlq xmm3,42 - pxor xmm4,xmm2 - psllq xmm2,42 - pxor xmm4,xmm3 - movdqa xmm3,[32+edx] - pxor xmm4,xmm2 - movdqa xmm2,[ebp] - movq mm1,mm4 - paddq xmm0,xmm4 - movq mm7,[edx-128] - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - paddq xmm2,xmm0 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[32+esp] - paddq mm2,mm6 - movq mm6,[40+esp] - movq mm1,mm4 - movq mm7,[edx-120] - pxor mm5,mm6 - psrlq mm1,14 - movq [24+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [56+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[48+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[16+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[24+esp] - paddq mm0,mm6 - movq mm6,[32+esp] - movdqa [edx-128],xmm2 - movdqa xmm4,xmm6 - movdqa xmm2,xmm3 -db 102,15,58,15,217,8 - movdqa [16+edx],xmm5 -db 102,15,58,15,229,8 - movdqa xmm5,xmm3 - psrlq xmm3,7 - paddq xmm1,xmm4 - movdqa xmm4,xmm5 - psrlq xmm5,1 - psllq xmm4,56 - pxor xmm3,xmm5 - psrlq xmm5,7 - pxor xmm3,xmm4 - psllq xmm4,7 - pxor xmm3,xmm5 - movdqa xmm5,xmm0 - pxor xmm3,xmm4 - movdqa xmm4,xmm0 - psrlq xmm5,6 - paddq xmm1,xmm3 - movdqa xmm3,xmm0 - psrlq xmm4,19 - psllq xmm3,3 - pxor xmm5,xmm4 - psrlq xmm4,42 - pxor xmm5,xmm3 - psllq xmm3,42 - pxor xmm5,xmm4 - movdqa xmm4,[48+edx] - pxor xmm5,xmm3 - movdqa xmm3,[16+ebp] - movq mm1,mm4 - paddq xmm1,xmm5 - movq mm7,[edx-112] - pxor mm5,mm6 - psrlq mm1,14 - movq [16+esp],mm4 - paddq xmm3,xmm1 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [48+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[40+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[8+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[56+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[16+esp] - paddq mm2,mm6 - movq mm6,[24+esp] - movq mm1,mm4 - movq mm7,[edx-104] - pxor mm5,mm6 - psrlq mm1,14 - movq [8+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [40+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[32+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[48+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[8+esp] - paddq mm0,mm6 - movq mm6,[16+esp] - movdqa [edx-112],xmm3 - movdqa xmm5,xmm7 - movdqa xmm3,xmm4 -db 102,15,58,15,226,8 - movdqa [32+edx],xmm6 -db 102,15,58,15,238,8 - movdqa xmm6,xmm4 - psrlq xmm4,7 - paddq xmm2,xmm5 - movdqa xmm5,xmm6 - psrlq xmm6,1 - psllq xmm5,56 - pxor xmm4,xmm6 - psrlq xmm6,7 - pxor xmm4,xmm5 - psllq xmm5,7 - pxor xmm4,xmm6 - movdqa xmm6,xmm1 - pxor xmm4,xmm5 - movdqa xmm5,xmm1 - psrlq xmm6,6 - paddq xmm2,xmm4 - movdqa xmm4,xmm1 - psrlq xmm5,19 - psllq xmm4,3 - pxor xmm6,xmm5 - psrlq xmm5,42 - pxor xmm6,xmm4 - psllq xmm4,42 - pxor xmm6,xmm5 - movdqa xmm5,[edx] - pxor xmm6,xmm4 - movdqa xmm4,[32+ebp] - movq mm1,mm4 - paddq xmm2,xmm6 - movq mm7,[edx-96] - pxor mm5,mm6 - psrlq mm1,14 - movq [esp],mm4 - paddq xmm4,xmm2 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [32+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[24+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[56+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[40+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[esp] - paddq mm2,mm6 - movq mm6,[8+esp] - movq mm1,mm4 - movq mm7,[edx-88] - pxor mm5,mm6 - psrlq mm1,14 - movq [56+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [24+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[16+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[48+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[32+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[56+esp] - paddq mm0,mm6 - movq mm6,[esp] - movdqa [edx-96],xmm4 - movdqa xmm6,xmm0 - movdqa xmm4,xmm5 -db 102,15,58,15,235,8 - movdqa [48+edx],xmm7 -db 102,15,58,15,247,8 - movdqa xmm7,xmm5 - psrlq xmm5,7 - paddq xmm3,xmm6 - movdqa xmm6,xmm7 - psrlq xmm7,1 - psllq xmm6,56 - pxor xmm5,xmm7 - psrlq xmm7,7 - pxor xmm5,xmm6 - psllq xmm6,7 - pxor xmm5,xmm7 - movdqa xmm7,xmm2 - pxor xmm5,xmm6 - movdqa xmm6,xmm2 - psrlq xmm7,6 - paddq xmm3,xmm5 - movdqa xmm5,xmm2 - psrlq xmm6,19 - psllq xmm5,3 - pxor xmm7,xmm6 - psrlq xmm6,42 - pxor xmm7,xmm5 - psllq xmm5,42 - pxor xmm7,xmm6 - movdqa xmm6,[16+edx] - pxor xmm7,xmm5 - movdqa xmm5,[48+ebp] - movq mm1,mm4 - paddq xmm3,xmm7 - movq mm7,[edx-80] - pxor mm5,mm6 - psrlq mm1,14 - movq [48+esp],mm4 - paddq xmm5,xmm3 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [16+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[8+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[40+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[24+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[48+esp] - paddq mm2,mm6 - movq mm6,[56+esp] - movq mm1,mm4 - movq mm7,[edx-72] - pxor mm5,mm6 - psrlq mm1,14 - movq [40+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [8+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[32+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[16+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[40+esp] - paddq mm0,mm6 - movq mm6,[48+esp] - movdqa [edx-80],xmm5 - movdqa xmm7,xmm1 - movdqa xmm5,xmm6 -db 102,15,58,15,244,8 - movdqa [edx],xmm0 -db 102,15,58,15,248,8 - movdqa xmm0,xmm6 - psrlq xmm6,7 - paddq xmm4,xmm7 - movdqa xmm7,xmm0 - psrlq xmm0,1 - psllq xmm7,56 - pxor xmm6,xmm0 - psrlq xmm0,7 - pxor xmm6,xmm7 - psllq xmm7,7 - pxor xmm6,xmm0 - movdqa xmm0,xmm3 - pxor xmm6,xmm7 - movdqa xmm7,xmm3 - psrlq xmm0,6 - paddq xmm4,xmm6 - movdqa xmm6,xmm3 - psrlq xmm7,19 - psllq xmm6,3 - pxor xmm0,xmm7 - psrlq xmm7,42 - pxor xmm0,xmm6 - psllq xmm6,42 - pxor xmm0,xmm7 - movdqa xmm7,[32+edx] - pxor xmm0,xmm6 - movdqa xmm6,[64+ebp] - movq mm1,mm4 - paddq xmm4,xmm0 - movq mm7,[edx-64] - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - paddq xmm6,xmm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[32+esp] - paddq mm2,mm6 - movq mm6,[40+esp] - movq mm1,mm4 - movq mm7,[edx-56] - pxor mm5,mm6 - psrlq mm1,14 - movq [24+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [56+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[48+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[16+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[24+esp] - paddq mm0,mm6 - movq mm6,[32+esp] - movdqa [edx-64],xmm6 - movdqa xmm0,xmm2 - movdqa xmm6,xmm7 -db 102,15,58,15,253,8 - movdqa [16+edx],xmm1 -db 102,15,58,15,193,8 - movdqa xmm1,xmm7 - psrlq xmm7,7 - paddq xmm5,xmm0 - movdqa xmm0,xmm1 - psrlq xmm1,1 - psllq xmm0,56 - pxor xmm7,xmm1 - psrlq xmm1,7 - pxor xmm7,xmm0 - psllq xmm0,7 - pxor xmm7,xmm1 - movdqa xmm1,xmm4 - pxor xmm7,xmm0 - movdqa xmm0,xmm4 - psrlq xmm1,6 - paddq xmm5,xmm7 - movdqa xmm7,xmm4 - psrlq xmm0,19 - psllq xmm7,3 - pxor xmm1,xmm0 - psrlq xmm0,42 - pxor xmm1,xmm7 - psllq xmm7,42 - pxor xmm1,xmm0 - movdqa xmm0,[48+edx] - pxor xmm1,xmm7 - movdqa xmm7,[80+ebp] - movq mm1,mm4 - paddq xmm5,xmm1 - movq mm7,[edx-48] - pxor mm5,mm6 - psrlq mm1,14 - movq [16+esp],mm4 - paddq xmm7,xmm5 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [48+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[40+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[8+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[56+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[16+esp] - paddq mm2,mm6 - movq mm6,[24+esp] - movq mm1,mm4 - movq mm7,[edx-40] - pxor mm5,mm6 - psrlq mm1,14 - movq [8+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [40+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[32+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[48+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[8+esp] - paddq mm0,mm6 - movq mm6,[16+esp] - movdqa [edx-48],xmm7 - movdqa xmm1,xmm3 - movdqa xmm7,xmm0 -db 102,15,58,15,198,8 - movdqa [32+edx],xmm2 -db 102,15,58,15,202,8 - movdqa xmm2,xmm0 - psrlq xmm0,7 - paddq xmm6,xmm1 - movdqa xmm1,xmm2 - psrlq xmm2,1 - psllq xmm1,56 - pxor xmm0,xmm2 - psrlq xmm2,7 - pxor xmm0,xmm1 - psllq xmm1,7 - pxor xmm0,xmm2 - movdqa xmm2,xmm5 - pxor xmm0,xmm1 - movdqa xmm1,xmm5 - psrlq xmm2,6 - paddq xmm6,xmm0 - movdqa xmm0,xmm5 - psrlq xmm1,19 - psllq xmm0,3 - pxor xmm2,xmm1 - psrlq xmm1,42 - pxor xmm2,xmm0 - psllq xmm0,42 - pxor xmm2,xmm1 - movdqa xmm1,[edx] - pxor xmm2,xmm0 - movdqa xmm0,[96+ebp] - movq mm1,mm4 - paddq xmm6,xmm2 - movq mm7,[edx-32] - pxor mm5,mm6 - psrlq mm1,14 - movq [esp],mm4 - paddq xmm0,xmm6 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [32+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[24+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[56+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[40+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[esp] - paddq mm2,mm6 - movq mm6,[8+esp] - movq mm1,mm4 - movq mm7,[edx-24] - pxor mm5,mm6 - psrlq mm1,14 - movq [56+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [24+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[16+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[48+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[32+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[56+esp] - paddq mm0,mm6 - movq mm6,[esp] - movdqa [edx-32],xmm0 - movdqa xmm2,xmm4 - movdqa xmm0,xmm1 -db 102,15,58,15,207,8 - movdqa [48+edx],xmm3 -db 102,15,58,15,211,8 - movdqa xmm3,xmm1 - psrlq xmm1,7 - paddq xmm7,xmm2 - movdqa xmm2,xmm3 - psrlq xmm3,1 - psllq xmm2,56 - pxor xmm1,xmm3 - psrlq xmm3,7 - pxor xmm1,xmm2 - psllq xmm2,7 - pxor xmm1,xmm3 - movdqa xmm3,xmm6 - pxor xmm1,xmm2 - movdqa xmm2,xmm6 - psrlq xmm3,6 - paddq xmm7,xmm1 - movdqa xmm1,xmm6 - psrlq xmm2,19 - psllq xmm1,3 - pxor xmm3,xmm2 - psrlq xmm2,42 - pxor xmm3,xmm1 - psllq xmm1,42 - pxor xmm3,xmm2 - movdqa xmm2,[16+edx] - pxor xmm3,xmm1 - movdqa xmm1,[112+ebp] - movq mm1,mm4 - paddq xmm7,xmm3 - movq mm7,[edx-16] - pxor mm5,mm6 - psrlq mm1,14 - movq [48+esp],mm4 - paddq xmm1,xmm7 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [16+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[8+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[40+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[24+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[48+esp] - paddq mm2,mm6 - movq mm6,[56+esp] - movq mm1,mm4 - movq mm7,[edx-8] - pxor mm5,mm6 - psrlq mm1,14 - movq [40+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [8+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[32+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[16+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[40+esp] - paddq mm0,mm6 - movq mm6,[48+esp] - movdqa [edx-16],xmm1 - lea ebp,[128+ebp] - dec ecx - jnz NEAR L$00800_47_ssse3 - movdqa xmm1,[ebp] - lea ebp,[ebp-640] - movdqu xmm0,[ebx] -db 102,15,56,0,193 - movdqa xmm3,[ebp] - movdqa xmm2,xmm1 - movdqu xmm1,[16+ebx] - paddq xmm3,xmm0 -db 102,15,56,0,202 - movq mm1,mm4 - movq mm7,[edx-128] - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[32+esp] - paddq mm2,mm6 - movq mm6,[40+esp] - movq mm1,mm4 - movq mm7,[edx-120] - pxor mm5,mm6 - psrlq mm1,14 - movq [24+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [56+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[48+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[16+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[24+esp] - paddq mm0,mm6 - movq mm6,[32+esp] - movdqa [edx-128],xmm3 - movdqa xmm4,[16+ebp] - movdqa xmm3,xmm2 - movdqu xmm2,[32+ebx] - paddq xmm4,xmm1 -db 102,15,56,0,211 - movq mm1,mm4 - movq mm7,[edx-112] - pxor mm5,mm6 - psrlq mm1,14 - movq [16+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [48+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[40+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[8+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[56+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[16+esp] - paddq mm2,mm6 - movq mm6,[24+esp] - movq mm1,mm4 - movq mm7,[edx-104] - pxor mm5,mm6 - psrlq mm1,14 - movq [8+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [40+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[32+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[48+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[8+esp] - paddq mm0,mm6 - movq mm6,[16+esp] - movdqa [edx-112],xmm4 - movdqa xmm5,[32+ebp] - movdqa xmm4,xmm3 - movdqu xmm3,[48+ebx] - paddq xmm5,xmm2 -db 102,15,56,0,220 - movq mm1,mm4 - movq mm7,[edx-96] - pxor mm5,mm6 - psrlq mm1,14 - movq [esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [32+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[24+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[56+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[40+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[esp] - paddq mm2,mm6 - movq mm6,[8+esp] - movq mm1,mm4 - movq mm7,[edx-88] - pxor mm5,mm6 - psrlq mm1,14 - movq [56+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [24+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[16+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[48+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[32+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[56+esp] - paddq mm0,mm6 - movq mm6,[esp] - movdqa [edx-96],xmm5 - movdqa xmm6,[48+ebp] - movdqa xmm5,xmm4 - movdqu xmm4,[64+ebx] - paddq xmm6,xmm3 -db 102,15,56,0,229 - movq mm1,mm4 - movq mm7,[edx-80] - pxor mm5,mm6 - psrlq mm1,14 - movq [48+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [16+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[8+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[40+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[24+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[48+esp] - paddq mm2,mm6 - movq mm6,[56+esp] - movq mm1,mm4 - movq mm7,[edx-72] - pxor mm5,mm6 - psrlq mm1,14 - movq [40+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [8+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[32+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[16+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[40+esp] - paddq mm0,mm6 - movq mm6,[48+esp] - movdqa [edx-80],xmm6 - movdqa xmm7,[64+ebp] - movdqa xmm6,xmm5 - movdqu xmm5,[80+ebx] - paddq xmm7,xmm4 -db 102,15,56,0,238 - movq mm1,mm4 - movq mm7,[edx-64] - pxor mm5,mm6 - psrlq mm1,14 - movq [32+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[56+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[24+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[8+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[32+esp] - paddq mm2,mm6 - movq mm6,[40+esp] - movq mm1,mm4 - movq mm7,[edx-56] - pxor mm5,mm6 - psrlq mm1,14 - movq [24+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [56+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[48+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[16+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[24+esp] - paddq mm0,mm6 - movq mm6,[32+esp] - movdqa [edx-64],xmm7 - movdqa [edx],xmm0 - movdqa xmm0,[80+ebp] - movdqa xmm7,xmm6 - movdqu xmm6,[96+ebx] - paddq xmm0,xmm5 -db 102,15,56,0,247 - movq mm1,mm4 - movq mm7,[edx-48] - pxor mm5,mm6 - psrlq mm1,14 - movq [16+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [48+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[40+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[8+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[56+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[16+esp] - paddq mm2,mm6 - movq mm6,[24+esp] - movq mm1,mm4 - movq mm7,[edx-40] - pxor mm5,mm6 - psrlq mm1,14 - movq [8+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [40+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[32+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[48+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[8+esp] - paddq mm0,mm6 - movq mm6,[16+esp] - movdqa [edx-48],xmm0 - movdqa [16+edx],xmm1 - movdqa xmm1,[96+ebp] - movdqa xmm0,xmm7 - movdqu xmm7,[112+ebx] - paddq xmm1,xmm6 -db 102,15,56,0,248 - movq mm1,mm4 - movq mm7,[edx-32] - pxor mm5,mm6 - psrlq mm1,14 - movq [esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [32+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[24+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[56+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[40+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[esp] - paddq mm2,mm6 - movq mm6,[8+esp] - movq mm1,mm4 - movq mm7,[edx-24] - pxor mm5,mm6 - psrlq mm1,14 - movq [56+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [24+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[16+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[48+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[32+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[56+esp] - paddq mm0,mm6 - movq mm6,[esp] - movdqa [edx-32],xmm1 - movdqa [32+edx],xmm2 - movdqa xmm2,[112+ebp] - movdqa xmm0,[edx] - paddq xmm2,xmm7 - movq mm1,mm4 - movq mm7,[edx-16] - pxor mm5,mm6 - psrlq mm1,14 - movq [48+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm0,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [16+esp],mm0 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[8+esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[40+esp] - paddq mm3,mm7 - movq mm5,mm0 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm0 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[24+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm0,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm2,mm0 - psllq mm6,6 - pxor mm7,mm5 - pxor mm2,mm1 - pxor mm6,mm7 - movq mm5,[48+esp] - paddq mm2,mm6 - movq mm6,[56+esp] - movq mm1,mm4 - movq mm7,[edx-8] - pxor mm5,mm6 - psrlq mm1,14 - movq [40+esp],mm4 - pand mm5,mm4 - psllq mm4,23 - paddq mm2,mm3 - movq mm3,mm1 - psrlq mm1,4 - pxor mm5,mm6 - pxor mm3,mm4 - psllq mm4,23 - pxor mm3,mm1 - movq [8+esp],mm2 - paddq mm7,mm5 - pxor mm3,mm4 - psrlq mm1,23 - paddq mm7,[esp] - pxor mm3,mm1 - psllq mm4,4 - pxor mm3,mm4 - movq mm4,[32+esp] - paddq mm3,mm7 - movq mm5,mm2 - psrlq mm5,28 - paddq mm4,mm3 - movq mm6,mm2 - movq mm7,mm5 - psllq mm6,25 - movq mm1,[16+esp] - psrlq mm5,6 - pxor mm7,mm6 - psllq mm6,5 - pxor mm7,mm5 - pxor mm2,mm1 - psrlq mm5,5 - pxor mm7,mm6 - pand mm0,mm2 - psllq mm6,6 - pxor mm7,mm5 - pxor mm0,mm1 - pxor mm6,mm7 - movq mm5,[40+esp] - paddq mm0,mm6 - movq mm6,[48+esp] - movdqa [edx-16],xmm2 - movq mm1,[8+esp] - paddq mm0,mm3 - movq mm3,[24+esp] - movq mm7,[56+esp] - pxor mm2,mm1 - paddq mm0,[esi] - paddq mm1,[8+esi] - paddq mm2,[16+esi] - paddq mm3,[24+esi] - paddq mm4,[32+esi] - paddq mm5,[40+esi] - paddq mm6,[48+esi] - paddq mm7,[56+esi] - movq [esi],mm0 - movq [8+esi],mm1 - movq [16+esi],mm2 - movq [24+esi],mm3 - movq [32+esi],mm4 - movq [40+esi],mm5 - movq [48+esi],mm6 - movq [56+esi],mm7 - cmp edi,eax - jb NEAR L$007loop_ssse3 - mov esp,DWORD [76+edx] - emms - pop edi - pop esi - pop ebx - pop ebp - ret -align 16 -L$002loop_x86: - mov eax,DWORD [edi] - mov ebx,DWORD [4+edi] - mov ecx,DWORD [8+edi] - mov edx,DWORD [12+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [16+edi] - mov ebx,DWORD [20+edi] - mov ecx,DWORD [24+edi] - mov edx,DWORD [28+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [32+edi] - mov ebx,DWORD [36+edi] - mov ecx,DWORD [40+edi] - mov edx,DWORD [44+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [48+edi] - mov ebx,DWORD [52+edi] - mov ecx,DWORD [56+edi] - mov edx,DWORD [60+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [64+edi] - mov ebx,DWORD [68+edi] - mov ecx,DWORD [72+edi] - mov edx,DWORD [76+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [80+edi] - mov ebx,DWORD [84+edi] - mov ecx,DWORD [88+edi] - mov edx,DWORD [92+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [96+edi] - mov ebx,DWORD [100+edi] - mov ecx,DWORD [104+edi] - mov edx,DWORD [108+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - mov eax,DWORD [112+edi] - mov ebx,DWORD [116+edi] - mov ecx,DWORD [120+edi] - mov edx,DWORD [124+edi] - bswap eax - bswap ebx - bswap ecx - bswap edx - push eax - push ebx - push ecx - push edx - add edi,128 - sub esp,72 - mov DWORD [204+esp],edi - lea edi,[8+esp] - mov ecx,16 -dd 2784229001 -align 16 -L$00900_15_x86: - mov ecx,DWORD [40+esp] - mov edx,DWORD [44+esp] - mov esi,ecx - shr ecx,9 - mov edi,edx - shr edx,9 - mov ebx,ecx - shl esi,14 - mov eax,edx - shl edi,14 - xor ebx,esi - shr ecx,5 - xor eax,edi - shr edx,5 - xor eax,ecx - shl esi,4 - xor ebx,edx - shl edi,4 - xor ebx,esi - shr ecx,4 - xor eax,edi - shr edx,4 - xor eax,ecx - shl esi,5 - xor ebx,edx - shl edi,5 - xor eax,esi - xor ebx,edi - mov ecx,DWORD [48+esp] - mov edx,DWORD [52+esp] - mov esi,DWORD [56+esp] - mov edi,DWORD [60+esp] - add eax,DWORD [64+esp] - adc ebx,DWORD [68+esp] - xor ecx,esi - xor edx,edi - and ecx,DWORD [40+esp] - and edx,DWORD [44+esp] - add eax,DWORD [192+esp] - adc ebx,DWORD [196+esp] - xor ecx,esi - xor edx,edi - mov esi,DWORD [ebp] - mov edi,DWORD [4+ebp] - add eax,ecx - adc ebx,edx - mov ecx,DWORD [32+esp] - mov edx,DWORD [36+esp] - add eax,esi - adc ebx,edi - mov DWORD [esp],eax - mov DWORD [4+esp],ebx - add eax,ecx - adc ebx,edx - mov ecx,DWORD [8+esp] - mov edx,DWORD [12+esp] - mov DWORD [32+esp],eax - mov DWORD [36+esp],ebx - mov esi,ecx - shr ecx,2 - mov edi,edx - shr edx,2 - mov ebx,ecx - shl esi,4 - mov eax,edx - shl edi,4 - xor ebx,esi - shr ecx,5 - xor eax,edi - shr edx,5 - xor ebx,ecx - shl esi,21 - xor eax,edx - shl edi,21 - xor eax,esi - shr ecx,21 - xor ebx,edi - shr edx,21 - xor eax,ecx - shl esi,5 - xor ebx,edx - shl edi,5 - xor eax,esi - xor ebx,edi - mov ecx,DWORD [8+esp] - mov edx,DWORD [12+esp] - mov esi,DWORD [16+esp] - mov edi,DWORD [20+esp] - add eax,DWORD [esp] - adc ebx,DWORD [4+esp] - or ecx,esi - or edx,edi - and ecx,DWORD [24+esp] - and edx,DWORD [28+esp] - and esi,DWORD [8+esp] - and edi,DWORD [12+esp] - or ecx,esi - or edx,edi - add eax,ecx - adc ebx,edx - mov DWORD [esp],eax - mov DWORD [4+esp],ebx - mov dl,BYTE [ebp] - sub esp,8 - lea ebp,[8+ebp] - cmp dl,148 - jne NEAR L$00900_15_x86 -align 16 -L$01016_79_x86: - mov ecx,DWORD [312+esp] - mov edx,DWORD [316+esp] - mov esi,ecx - shr ecx,1 - mov edi,edx - shr edx,1 - mov eax,ecx - shl esi,24 - mov ebx,edx - shl edi,24 - xor ebx,esi - shr ecx,6 - xor eax,edi - shr edx,6 - xor eax,ecx - shl esi,7 - xor ebx,edx - shl edi,1 - xor ebx,esi - shr ecx,1 - xor eax,edi - shr edx,1 - xor eax,ecx - shl edi,6 - xor ebx,edx - xor eax,edi - mov DWORD [esp],eax - mov DWORD [4+esp],ebx - mov ecx,DWORD [208+esp] - mov edx,DWORD [212+esp] - mov esi,ecx - shr ecx,6 - mov edi,edx - shr edx,6 - mov eax,ecx - shl esi,3 - mov ebx,edx - shl edi,3 - xor eax,esi - shr ecx,13 - xor ebx,edi - shr edx,13 - xor eax,ecx - shl esi,10 - xor ebx,edx - shl edi,10 - xor ebx,esi - shr ecx,10 - xor eax,edi - shr edx,10 - xor ebx,ecx - shl edi,13 - xor eax,edx - xor eax,edi - mov ecx,DWORD [320+esp] - mov edx,DWORD [324+esp] - add eax,DWORD [esp] - adc ebx,DWORD [4+esp] - mov esi,DWORD [248+esp] - mov edi,DWORD [252+esp] - add eax,ecx - adc ebx,edx - add eax,esi - adc ebx,edi - mov DWORD [192+esp],eax - mov DWORD [196+esp],ebx - mov ecx,DWORD [40+esp] - mov edx,DWORD [44+esp] - mov esi,ecx - shr ecx,9 - mov edi,edx - shr edx,9 - mov ebx,ecx - shl esi,14 - mov eax,edx - shl edi,14 - xor ebx,esi - shr ecx,5 - xor eax,edi - shr edx,5 - xor eax,ecx - shl esi,4 - xor ebx,edx - shl edi,4 - xor ebx,esi - shr ecx,4 - xor eax,edi - shr edx,4 - xor eax,ecx - shl esi,5 - xor ebx,edx - shl edi,5 - xor eax,esi - xor ebx,edi - mov ecx,DWORD [48+esp] - mov edx,DWORD [52+esp] - mov esi,DWORD [56+esp] - mov edi,DWORD [60+esp] - add eax,DWORD [64+esp] - adc ebx,DWORD [68+esp] - xor ecx,esi - xor edx,edi - and ecx,DWORD [40+esp] - and edx,DWORD [44+esp] - add eax,DWORD [192+esp] - adc ebx,DWORD [196+esp] - xor ecx,esi - xor edx,edi - mov esi,DWORD [ebp] - mov edi,DWORD [4+ebp] - add eax,ecx - adc ebx,edx - mov ecx,DWORD [32+esp] - mov edx,DWORD [36+esp] - add eax,esi - adc ebx,edi - mov DWORD [esp],eax - mov DWORD [4+esp],ebx - add eax,ecx - adc ebx,edx - mov ecx,DWORD [8+esp] - mov edx,DWORD [12+esp] - mov DWORD [32+esp],eax - mov DWORD [36+esp],ebx - mov esi,ecx - shr ecx,2 - mov edi,edx - shr edx,2 - mov ebx,ecx - shl esi,4 - mov eax,edx - shl edi,4 - xor ebx,esi - shr ecx,5 - xor eax,edi - shr edx,5 - xor ebx,ecx - shl esi,21 - xor eax,edx - shl edi,21 - xor eax,esi - shr ecx,21 - xor ebx,edi - shr edx,21 - xor eax,ecx - shl esi,5 - xor ebx,edx - shl edi,5 - xor eax,esi - xor ebx,edi - mov ecx,DWORD [8+esp] - mov edx,DWORD [12+esp] - mov esi,DWORD [16+esp] - mov edi,DWORD [20+esp] - add eax,DWORD [esp] - adc ebx,DWORD [4+esp] - or ecx,esi - or edx,edi - and ecx,DWORD [24+esp] - and edx,DWORD [28+esp] - and esi,DWORD [8+esp] - and edi,DWORD [12+esp] - or ecx,esi - or edx,edi - add eax,ecx - adc ebx,edx - mov DWORD [esp],eax - mov DWORD [4+esp],ebx - mov dl,BYTE [ebp] - sub esp,8 - lea ebp,[8+ebp] - cmp dl,23 - jne NEAR L$01016_79_x86 - mov esi,DWORD [840+esp] - mov edi,DWORD [844+esp] - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov edx,DWORD [12+esi] - add eax,DWORD [8+esp] - adc ebx,DWORD [12+esp] - mov DWORD [esi],eax - mov DWORD [4+esi],ebx - add ecx,DWORD [16+esp] - adc edx,DWORD [20+esp] - mov DWORD [8+esi],ecx - mov DWORD [12+esi],edx - mov eax,DWORD [16+esi] - mov ebx,DWORD [20+esi] - mov ecx,DWORD [24+esi] - mov edx,DWORD [28+esi] - add eax,DWORD [24+esp] - adc ebx,DWORD [28+esp] - mov DWORD [16+esi],eax - mov DWORD [20+esi],ebx - add ecx,DWORD [32+esp] - adc edx,DWORD [36+esp] - mov DWORD [24+esi],ecx - mov DWORD [28+esi],edx - mov eax,DWORD [32+esi] - mov ebx,DWORD [36+esi] - mov ecx,DWORD [40+esi] - mov edx,DWORD [44+esi] - add eax,DWORD [40+esp] - adc ebx,DWORD [44+esp] - mov DWORD [32+esi],eax - mov DWORD [36+esi],ebx - add ecx,DWORD [48+esp] - adc edx,DWORD [52+esp] - mov DWORD [40+esi],ecx - mov DWORD [44+esi],edx - mov eax,DWORD [48+esi] - mov ebx,DWORD [52+esi] - mov ecx,DWORD [56+esi] - mov edx,DWORD [60+esi] - add eax,DWORD [56+esp] - adc ebx,DWORD [60+esp] - mov DWORD [48+esi],eax - mov DWORD [52+esi],ebx - add ecx,DWORD [64+esp] - adc edx,DWORD [68+esp] - mov DWORD [56+esi],ecx - mov DWORD [60+esi],edx - add esp,840 - sub ebp,640 - cmp edi,DWORD [8+esp] - jb NEAR L$002loop_x86 - mov esp,DWORD [12+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -align 64 -L$001K512: -dd 3609767458,1116352408 -dd 602891725,1899447441 -dd 3964484399,3049323471 -dd 2173295548,3921009573 -dd 4081628472,961987163 -dd 3053834265,1508970993 -dd 2937671579,2453635748 -dd 3664609560,2870763221 -dd 2734883394,3624381080 -dd 1164996542,310598401 -dd 1323610764,607225278 -dd 3590304994,1426881987 -dd 4068182383,1925078388 -dd 991336113,2162078206 -dd 633803317,2614888103 -dd 3479774868,3248222580 -dd 2666613458,3835390401 -dd 944711139,4022224774 -dd 2341262773,264347078 -dd 2007800933,604807628 -dd 1495990901,770255983 -dd 1856431235,1249150122 -dd 3175218132,1555081692 -dd 2198950837,1996064986 -dd 3999719339,2554220882 -dd 766784016,2821834349 -dd 2566594879,2952996808 -dd 3203337956,3210313671 -dd 1034457026,3336571891 -dd 2466948901,3584528711 -dd 3758326383,113926993 -dd 168717936,338241895 -dd 1188179964,666307205 -dd 1546045734,773529912 -dd 1522805485,1294757372 -dd 2643833823,1396182291 -dd 2343527390,1695183700 -dd 1014477480,1986661051 -dd 1206759142,2177026350 -dd 344077627,2456956037 -dd 1290863460,2730485921 -dd 3158454273,2820302411 -dd 3505952657,3259730800 -dd 106217008,3345764771 -dd 3606008344,3516065817 -dd 1432725776,3600352804 -dd 1467031594,4094571909 -dd 851169720,275423344 -dd 3100823752,430227734 -dd 1363258195,506948616 -dd 3750685593,659060556 -dd 3785050280,883997877 -dd 3318307427,958139571 -dd 3812723403,1322822218 -dd 2003034995,1537002063 -dd 3602036899,1747873779 -dd 1575990012,1955562222 -dd 1125592928,2024104815 -dd 2716904306,2227730452 -dd 442776044,2361852424 -dd 593698344,2428436474 -dd 3733110249,2756734187 -dd 2999351573,3204031479 -dd 3815920427,3329325298 -dd 3928383900,3391569614 -dd 566280711,3515267271 -dd 3454069534,3940187606 -dd 4000239992,4118630271 -dd 1914138554,116418474 -dd 2731055270,174292421 -dd 3203993006,289380356 -dd 320620315,460393269 -dd 587496836,685471733 -dd 1086792851,852142971 -dd 365543100,1017036298 -dd 2618297676,1126000580 -dd 3409855158,1288033470 -dd 4234509866,1501505948 -dd 987167468,1607167915 -dd 1246189591,1816402316 -dd 67438087,66051 -dd 202182159,134810123 -db 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 -db 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 -db 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -db 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -db 62,0 -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/vpaes-x86.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/vpaes-x86.asm deleted file mode 100644 index 81b8b8330f..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/vpaes-x86.asm +++ /dev/null @@ -1,682 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -%ifdef BORINGSSL_DISPATCH_TEST -extern _BORINGSSL_function_hit -%endif -align 64 -L$_vpaes_consts: -dd 218628480,235210255,168496130,67568393 -dd 252381056,17041926,33884169,51187212 -dd 252645135,252645135,252645135,252645135 -dd 1512730624,3266504856,1377990664,3401244816 -dd 830229760,1275146365,2969422977,3447763452 -dd 3411033600,2979783055,338359620,2782886510 -dd 4209124096,907596821,221174255,1006095553 -dd 191964160,3799684038,3164090317,1589111125 -dd 182528256,1777043520,2877432650,3265356744 -dd 1874708224,3503451415,3305285752,363511674 -dd 1606117888,3487855781,1093350906,2384367825 -dd 197121,67569157,134941193,202313229 -dd 67569157,134941193,202313229,197121 -dd 134941193,202313229,197121,67569157 -dd 202313229,197121,67569157,134941193 -dd 33619971,100992007,168364043,235736079 -dd 235736079,33619971,100992007,168364043 -dd 168364043,235736079,33619971,100992007 -dd 100992007,168364043,235736079,33619971 -dd 50462976,117835012,185207048,252579084 -dd 252314880,51251460,117574920,184942860 -dd 184682752,252054788,50987272,118359308 -dd 118099200,185467140,251790600,50727180 -dd 2946363062,528716217,1300004225,1881839624 -dd 1532713819,1532713819,1532713819,1532713819 -dd 3602276352,4288629033,3737020424,4153884961 -dd 1354558464,32357713,2958822624,3775749553 -dd 1201988352,132424512,1572796698,503232858 -dd 2213177600,1597421020,4103937655,675398315 -dd 2749646592,4273543773,1511898873,121693092 -dd 3040248576,1103263732,2871565598,1608280554 -dd 2236667136,2588920351,482954393,64377734 -dd 3069987328,291237287,2117370568,3650299247 -dd 533321216,3573750986,2572112006,1401264716 -dd 1339849704,2721158661,548607111,3445553514 -dd 2128193280,3054596040,2183486460,1257083700 -dd 655635200,1165381986,3923443150,2344132524 -dd 190078720,256924420,290342170,357187870 -dd 1610966272,2263057382,4103205268,309794674 -dd 2592527872,2233205587,1335446729,3402964816 -dd 3973531904,3225098121,3002836325,1918774430 -dd 3870401024,2102906079,2284471353,4117666579 -dd 617007872,1021508343,366931923,691083277 -dd 2528395776,3491914898,2968704004,1613121270 -dd 3445188352,3247741094,844474987,4093578302 -dd 651481088,1190302358,1689581232,574775300 -dd 4289380608,206939853,2555985458,2489840491 -dd 2130264064,327674451,3566485037,3349835193 -dd 2470714624,316102159,3636825756,3393945945 -db 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 -db 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 -db 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 -db 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 -db 118,101,114,115,105,116,121,41,0 -align 64 -align 16 -__vpaes_preheat: - add ebp,DWORD [esp] - movdqa xmm7,[ebp-48] - movdqa xmm6,[ebp-16] - ret -align 16 -__vpaes_encrypt_core: - mov ecx,16 - mov eax,DWORD [240+edx] - movdqa xmm1,xmm6 - movdqa xmm2,[ebp] - pandn xmm1,xmm0 - pand xmm0,xmm6 - movdqu xmm5,[edx] -db 102,15,56,0,208 - movdqa xmm0,[16+ebp] - pxor xmm2,xmm5 - psrld xmm1,4 - add edx,16 -db 102,15,56,0,193 - lea ebx,[192+ebp] - pxor xmm0,xmm2 - jmp NEAR L$000enc_entry -align 16 -L$001enc_loop: - movdqa xmm4,[32+ebp] - movdqa xmm0,[48+ebp] -db 102,15,56,0,226 -db 102,15,56,0,195 - pxor xmm4,xmm5 - movdqa xmm5,[64+ebp] - pxor xmm0,xmm4 - movdqa xmm1,[ecx*1+ebx-64] -db 102,15,56,0,234 - movdqa xmm2,[80+ebp] - movdqa xmm4,[ecx*1+ebx] -db 102,15,56,0,211 - movdqa xmm3,xmm0 - pxor xmm2,xmm5 -db 102,15,56,0,193 - add edx,16 - pxor xmm0,xmm2 -db 102,15,56,0,220 - add ecx,16 - pxor xmm3,xmm0 -db 102,15,56,0,193 - and ecx,48 - sub eax,1 - pxor xmm0,xmm3 -L$000enc_entry: - movdqa xmm1,xmm6 - movdqa xmm5,[ebp-32] - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm6 -db 102,15,56,0,232 - movdqa xmm3,xmm7 - pxor xmm0,xmm1 -db 102,15,56,0,217 - movdqa xmm4,xmm7 - pxor xmm3,xmm5 -db 102,15,56,0,224 - movdqa xmm2,xmm7 - pxor xmm4,xmm5 -db 102,15,56,0,211 - movdqa xmm3,xmm7 - pxor xmm2,xmm0 -db 102,15,56,0,220 - movdqu xmm5,[edx] - pxor xmm3,xmm1 - jnz NEAR L$001enc_loop - movdqa xmm4,[96+ebp] - movdqa xmm0,[112+ebp] -db 102,15,56,0,226 - pxor xmm4,xmm5 -db 102,15,56,0,195 - movdqa xmm1,[64+ecx*1+ebx] - pxor xmm0,xmm4 -db 102,15,56,0,193 - ret -align 16 -__vpaes_decrypt_core: - lea ebx,[608+ebp] - mov eax,DWORD [240+edx] - movdqa xmm1,xmm6 - movdqa xmm2,[ebx-64] - pandn xmm1,xmm0 - mov ecx,eax - psrld xmm1,4 - movdqu xmm5,[edx] - shl ecx,4 - pand xmm0,xmm6 -db 102,15,56,0,208 - movdqa xmm0,[ebx-48] - xor ecx,48 -db 102,15,56,0,193 - and ecx,48 - pxor xmm2,xmm5 - movdqa xmm5,[176+ebp] - pxor xmm0,xmm2 - add edx,16 - lea ecx,[ecx*1+ebx-352] - jmp NEAR L$002dec_entry -align 16 -L$003dec_loop: - movdqa xmm4,[ebx-32] - movdqa xmm1,[ebx-16] -db 102,15,56,0,226 -db 102,15,56,0,203 - pxor xmm0,xmm4 - movdqa xmm4,[ebx] - pxor xmm0,xmm1 - movdqa xmm1,[16+ebx] -db 102,15,56,0,226 -db 102,15,56,0,197 -db 102,15,56,0,203 - pxor xmm0,xmm4 - movdqa xmm4,[32+ebx] - pxor xmm0,xmm1 - movdqa xmm1,[48+ebx] -db 102,15,56,0,226 -db 102,15,56,0,197 -db 102,15,56,0,203 - pxor xmm0,xmm4 - movdqa xmm4,[64+ebx] - pxor xmm0,xmm1 - movdqa xmm1,[80+ebx] -db 102,15,56,0,226 -db 102,15,56,0,197 -db 102,15,56,0,203 - pxor xmm0,xmm4 - add edx,16 -db 102,15,58,15,237,12 - pxor xmm0,xmm1 - sub eax,1 -L$002dec_entry: - movdqa xmm1,xmm6 - movdqa xmm2,[ebp-32] - pandn xmm1,xmm0 - pand xmm0,xmm6 - psrld xmm1,4 -db 102,15,56,0,208 - movdqa xmm3,xmm7 - pxor xmm0,xmm1 -db 102,15,56,0,217 - movdqa xmm4,xmm7 - pxor xmm3,xmm2 -db 102,15,56,0,224 - pxor xmm4,xmm2 - movdqa xmm2,xmm7 -db 102,15,56,0,211 - movdqa xmm3,xmm7 - pxor xmm2,xmm0 -db 102,15,56,0,220 - movdqu xmm0,[edx] - pxor xmm3,xmm1 - jnz NEAR L$003dec_loop - movdqa xmm4,[96+ebx] -db 102,15,56,0,226 - pxor xmm4,xmm0 - movdqa xmm0,[112+ebx] - movdqa xmm2,[ecx] -db 102,15,56,0,195 - pxor xmm0,xmm4 -db 102,15,56,0,194 - ret -align 16 -__vpaes_schedule_core: - add ebp,DWORD [esp] - movdqu xmm0,[esi] - movdqa xmm2,[320+ebp] - movdqa xmm3,xmm0 - lea ebx,[ebp] - movdqa [4+esp],xmm2 - call __vpaes_schedule_transform - movdqa xmm7,xmm0 - test edi,edi - jnz NEAR L$004schedule_am_decrypting - movdqu [edx],xmm0 - jmp NEAR L$005schedule_go -L$004schedule_am_decrypting: - movdqa xmm1,[256+ecx*1+ebp] -db 102,15,56,0,217 - movdqu [edx],xmm3 - xor ecx,48 -L$005schedule_go: - cmp eax,192 - ja NEAR L$006schedule_256 - je NEAR L$007schedule_192 -L$008schedule_128: - mov eax,10 -L$009loop_schedule_128: - call __vpaes_schedule_round - dec eax - jz NEAR L$010schedule_mangle_last - call __vpaes_schedule_mangle - jmp NEAR L$009loop_schedule_128 -align 16 -L$007schedule_192: - movdqu xmm0,[8+esi] - call __vpaes_schedule_transform - movdqa xmm6,xmm0 - pxor xmm4,xmm4 - movhlps xmm6,xmm4 - mov eax,4 -L$011loop_schedule_192: - call __vpaes_schedule_round -db 102,15,58,15,198,8 - call __vpaes_schedule_mangle - call __vpaes_schedule_192_smear - call __vpaes_schedule_mangle - call __vpaes_schedule_round - dec eax - jz NEAR L$010schedule_mangle_last - call __vpaes_schedule_mangle - call __vpaes_schedule_192_smear - jmp NEAR L$011loop_schedule_192 -align 16 -L$006schedule_256: - movdqu xmm0,[16+esi] - call __vpaes_schedule_transform - mov eax,7 -L$012loop_schedule_256: - call __vpaes_schedule_mangle - movdqa xmm6,xmm0 - call __vpaes_schedule_round - dec eax - jz NEAR L$010schedule_mangle_last - call __vpaes_schedule_mangle - pshufd xmm0,xmm0,255 - movdqa [20+esp],xmm7 - movdqa xmm7,xmm6 - call L$_vpaes_schedule_low_round - movdqa xmm7,[20+esp] - jmp NEAR L$012loop_schedule_256 -align 16 -L$010schedule_mangle_last: - lea ebx,[384+ebp] - test edi,edi - jnz NEAR L$013schedule_mangle_last_dec - movdqa xmm1,[256+ecx*1+ebp] -db 102,15,56,0,193 - lea ebx,[352+ebp] - add edx,32 -L$013schedule_mangle_last_dec: - add edx,-16 - pxor xmm0,[336+ebp] - call __vpaes_schedule_transform - movdqu [edx],xmm0 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - ret -align 16 -__vpaes_schedule_192_smear: - pshufd xmm1,xmm6,128 - pshufd xmm0,xmm7,254 - pxor xmm6,xmm1 - pxor xmm1,xmm1 - pxor xmm6,xmm0 - movdqa xmm0,xmm6 - movhlps xmm6,xmm1 - ret -align 16 -__vpaes_schedule_round: - movdqa xmm2,[8+esp] - pxor xmm1,xmm1 -db 102,15,58,15,202,15 -db 102,15,58,15,210,15 - pxor xmm7,xmm1 - pshufd xmm0,xmm0,255 -db 102,15,58,15,192,1 - movdqa [8+esp],xmm2 -L$_vpaes_schedule_low_round: - movdqa xmm1,xmm7 - pslldq xmm7,4 - pxor xmm7,xmm1 - movdqa xmm1,xmm7 - pslldq xmm7,8 - pxor xmm7,xmm1 - pxor xmm7,[336+ebp] - movdqa xmm4,[ebp-16] - movdqa xmm5,[ebp-48] - movdqa xmm1,xmm4 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm4 - movdqa xmm2,[ebp-32] -db 102,15,56,0,208 - pxor xmm0,xmm1 - movdqa xmm3,xmm5 -db 102,15,56,0,217 - pxor xmm3,xmm2 - movdqa xmm4,xmm5 -db 102,15,56,0,224 - pxor xmm4,xmm2 - movdqa xmm2,xmm5 -db 102,15,56,0,211 - pxor xmm2,xmm0 - movdqa xmm3,xmm5 -db 102,15,56,0,220 - pxor xmm3,xmm1 - movdqa xmm4,[32+ebp] -db 102,15,56,0,226 - movdqa xmm0,[48+ebp] -db 102,15,56,0,195 - pxor xmm0,xmm4 - pxor xmm0,xmm7 - movdqa xmm7,xmm0 - ret -align 16 -__vpaes_schedule_transform: - movdqa xmm2,[ebp-16] - movdqa xmm1,xmm2 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm2 - movdqa xmm2,[ebx] -db 102,15,56,0,208 - movdqa xmm0,[16+ebx] -db 102,15,56,0,193 - pxor xmm0,xmm2 - ret -align 16 -__vpaes_schedule_mangle: - movdqa xmm4,xmm0 - movdqa xmm5,[128+ebp] - test edi,edi - jnz NEAR L$014schedule_mangle_dec - add edx,16 - pxor xmm4,[336+ebp] -db 102,15,56,0,229 - movdqa xmm3,xmm4 -db 102,15,56,0,229 - pxor xmm3,xmm4 -db 102,15,56,0,229 - pxor xmm3,xmm4 - jmp NEAR L$015schedule_mangle_both -align 16 -L$014schedule_mangle_dec: - movdqa xmm2,[ebp-16] - lea esi,[416+ebp] - movdqa xmm1,xmm2 - pandn xmm1,xmm4 - psrld xmm1,4 - pand xmm4,xmm2 - movdqa xmm2,[esi] -db 102,15,56,0,212 - movdqa xmm3,[16+esi] -db 102,15,56,0,217 - pxor xmm3,xmm2 -db 102,15,56,0,221 - movdqa xmm2,[32+esi] -db 102,15,56,0,212 - pxor xmm2,xmm3 - movdqa xmm3,[48+esi] -db 102,15,56,0,217 - pxor xmm3,xmm2 -db 102,15,56,0,221 - movdqa xmm2,[64+esi] -db 102,15,56,0,212 - pxor xmm2,xmm3 - movdqa xmm3,[80+esi] -db 102,15,56,0,217 - pxor xmm3,xmm2 -db 102,15,56,0,221 - movdqa xmm2,[96+esi] -db 102,15,56,0,212 - pxor xmm2,xmm3 - movdqa xmm3,[112+esi] -db 102,15,56,0,217 - pxor xmm3,xmm2 - add edx,-16 -L$015schedule_mangle_both: - movdqa xmm1,[256+ecx*1+ebp] -db 102,15,56,0,217 - add ecx,-16 - and ecx,48 - movdqu [edx],xmm3 - ret -global _vpaes_set_encrypt_key -align 16 -_vpaes_set_encrypt_key: -L$_vpaes_set_encrypt_key_begin: - push ebp - push ebx - push esi - push edi -%ifdef BORINGSSL_DISPATCH_TEST - push ebx - push edx - call L$016pic -L$016pic: - pop ebx - lea ebx,[(_BORINGSSL_function_hit+5-L$016pic)+ebx] - mov edx,1 - mov BYTE [ebx],dl - pop edx - pop ebx -%endif - mov esi,DWORD [20+esp] - lea ebx,[esp-56] - mov eax,DWORD [24+esp] - and ebx,-16 - mov edx,DWORD [28+esp] - xchg ebx,esp - mov DWORD [48+esp],ebx - mov ebx,eax - shr ebx,5 - add ebx,5 - mov DWORD [240+edx],ebx - mov ecx,48 - mov edi,0 - lea ebp,[(L$_vpaes_consts+0x30-L$017pic_point)] - call __vpaes_schedule_core -L$017pic_point: - mov esp,DWORD [48+esp] - xor eax,eax - pop edi - pop esi - pop ebx - pop ebp - ret -global _vpaes_set_decrypt_key -align 16 -_vpaes_set_decrypt_key: -L$_vpaes_set_decrypt_key_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - lea ebx,[esp-56] - mov eax,DWORD [24+esp] - and ebx,-16 - mov edx,DWORD [28+esp] - xchg ebx,esp - mov DWORD [48+esp],ebx - mov ebx,eax - shr ebx,5 - add ebx,5 - mov DWORD [240+edx],ebx - shl ebx,4 - lea edx,[16+ebx*1+edx] - mov edi,1 - mov ecx,eax - shr ecx,1 - and ecx,32 - xor ecx,32 - lea ebp,[(L$_vpaes_consts+0x30-L$018pic_point)] - call __vpaes_schedule_core -L$018pic_point: - mov esp,DWORD [48+esp] - xor eax,eax - pop edi - pop esi - pop ebx - pop ebp - ret -global _vpaes_encrypt -align 16 -_vpaes_encrypt: -L$_vpaes_encrypt_begin: - push ebp - push ebx - push esi - push edi -%ifdef BORINGSSL_DISPATCH_TEST - push ebx - push edx - call L$019pic -L$019pic: - pop ebx - lea ebx,[(_BORINGSSL_function_hit+4-L$019pic)+ebx] - mov edx,1 - mov BYTE [ebx],dl - pop edx - pop ebx -%endif - lea ebp,[(L$_vpaes_consts+0x30-L$020pic_point)] - call __vpaes_preheat -L$020pic_point: - mov esi,DWORD [20+esp] - lea ebx,[esp-56] - mov edi,DWORD [24+esp] - and ebx,-16 - mov edx,DWORD [28+esp] - xchg ebx,esp - mov DWORD [48+esp],ebx - movdqu xmm0,[esi] - call __vpaes_encrypt_core - movdqu [edi],xmm0 - mov esp,DWORD [48+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -global _vpaes_decrypt -align 16 -_vpaes_decrypt: -L$_vpaes_decrypt_begin: - push ebp - push ebx - push esi - push edi - lea ebp,[(L$_vpaes_consts+0x30-L$021pic_point)] - call __vpaes_preheat -L$021pic_point: - mov esi,DWORD [20+esp] - lea ebx,[esp-56] - mov edi,DWORD [24+esp] - and ebx,-16 - mov edx,DWORD [28+esp] - xchg ebx,esp - mov DWORD [48+esp],ebx - movdqu xmm0,[esi] - call __vpaes_decrypt_core - movdqu [edi],xmm0 - mov esp,DWORD [48+esp] - pop edi - pop esi - pop ebx - pop ebp - ret -global _vpaes_cbc_encrypt -align 16 -_vpaes_cbc_encrypt: -L$_vpaes_cbc_encrypt_begin: - push ebp - push ebx - push esi - push edi - mov esi,DWORD [20+esp] - mov edi,DWORD [24+esp] - mov eax,DWORD [28+esp] - mov edx,DWORD [32+esp] - sub eax,16 - jc NEAR L$022cbc_abort - lea ebx,[esp-56] - mov ebp,DWORD [36+esp] - and ebx,-16 - mov ecx,DWORD [40+esp] - xchg ebx,esp - movdqu xmm1,[ebp] - sub edi,esi - mov DWORD [48+esp],ebx - mov DWORD [esp],edi - mov DWORD [4+esp],edx - mov DWORD [8+esp],ebp - mov edi,eax - lea ebp,[(L$_vpaes_consts+0x30-L$023pic_point)] - call __vpaes_preheat -L$023pic_point: - cmp ecx,0 - je NEAR L$024cbc_dec_loop - jmp NEAR L$025cbc_enc_loop -align 16 -L$025cbc_enc_loop: - movdqu xmm0,[esi] - pxor xmm0,xmm1 - call __vpaes_encrypt_core - mov ebx,DWORD [esp] - mov edx,DWORD [4+esp] - movdqa xmm1,xmm0 - movdqu [esi*1+ebx],xmm0 - lea esi,[16+esi] - sub edi,16 - jnc NEAR L$025cbc_enc_loop - jmp NEAR L$026cbc_done -align 16 -L$024cbc_dec_loop: - movdqu xmm0,[esi] - movdqa [16+esp],xmm1 - movdqa [32+esp],xmm0 - call __vpaes_decrypt_core - mov ebx,DWORD [esp] - mov edx,DWORD [4+esp] - pxor xmm0,[16+esp] - movdqa xmm1,[32+esp] - movdqu [esi*1+ebx],xmm0 - lea esi,[16+esi] - sub edi,16 - jnc NEAR L$024cbc_dec_loop -L$026cbc_done: - mov ebx,DWORD [8+esp] - mov esp,DWORD [48+esp] - movdqu [ebx],xmm1 -L$022cbc_abort: - pop edi - pop esi - pop ebx - pop ebp - ret diff --git a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/x86-mont.asm b/packager/third_party/boringssl/win-x86/crypto/fipsmodule/x86-mont.asm deleted file mode 100644 index 6a15ed944b..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/fipsmodule/x86-mont.asm +++ /dev/null @@ -1,493 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -;extern _OPENSSL_ia32cap_P -global _bn_mul_mont -align 16 -_bn_mul_mont: -L$_bn_mul_mont_begin: - push ebp - push ebx - push esi - push edi - xor eax,eax - mov edi,DWORD [40+esp] - cmp edi,4 - jl NEAR L$000just_leave - lea esi,[20+esp] - lea edx,[24+esp] - add edi,2 - neg edi - lea ebp,[edi*4+esp-32] - neg edi - mov eax,ebp - sub eax,edx - and eax,2047 - sub ebp,eax - xor edx,ebp - and edx,2048 - xor edx,2048 - sub ebp,edx - and ebp,-64 - mov eax,esp - sub eax,ebp - and eax,-4096 - mov edx,esp - lea esp,[eax*1+ebp] - mov eax,DWORD [esp] - cmp esp,ebp - ja NEAR L$001page_walk - jmp NEAR L$002page_walk_done -align 16 -L$001page_walk: - lea esp,[esp-4096] - mov eax,DWORD [esp] - cmp esp,ebp - ja NEAR L$001page_walk -L$002page_walk_done: - mov eax,DWORD [esi] - mov ebx,DWORD [4+esi] - mov ecx,DWORD [8+esi] - mov ebp,DWORD [12+esi] - mov esi,DWORD [16+esi] - mov esi,DWORD [esi] - mov DWORD [4+esp],eax - mov DWORD [8+esp],ebx - mov DWORD [12+esp],ecx - mov DWORD [16+esp],ebp - mov DWORD [20+esp],esi - lea ebx,[edi-3] - mov DWORD [24+esp],edx - lea eax,[_OPENSSL_ia32cap_P] - bt DWORD [eax],26 - jnc NEAR L$003non_sse2 - mov eax,-1 - movd mm7,eax - mov esi,DWORD [8+esp] - mov edi,DWORD [12+esp] - mov ebp,DWORD [16+esp] - xor edx,edx - xor ecx,ecx - movd mm4,DWORD [edi] - movd mm5,DWORD [esi] - movd mm3,DWORD [ebp] - pmuludq mm5,mm4 - movq mm2,mm5 - movq mm0,mm5 - pand mm0,mm7 - pmuludq mm5,[20+esp] - pmuludq mm3,mm5 - paddq mm3,mm0 - movd mm1,DWORD [4+ebp] - movd mm0,DWORD [4+esi] - psrlq mm2,32 - psrlq mm3,32 - inc ecx -align 16 -L$0041st: - pmuludq mm0,mm4 - pmuludq mm1,mm5 - paddq mm2,mm0 - paddq mm3,mm1 - movq mm0,mm2 - pand mm0,mm7 - movd mm1,DWORD [4+ecx*4+ebp] - paddq mm3,mm0 - movd mm0,DWORD [4+ecx*4+esi] - psrlq mm2,32 - movd DWORD [28+ecx*4+esp],mm3 - psrlq mm3,32 - lea ecx,[1+ecx] - cmp ecx,ebx - jl NEAR L$0041st - pmuludq mm0,mm4 - pmuludq mm1,mm5 - paddq mm2,mm0 - paddq mm3,mm1 - movq mm0,mm2 - pand mm0,mm7 - paddq mm3,mm0 - movd DWORD [28+ecx*4+esp],mm3 - psrlq mm2,32 - psrlq mm3,32 - paddq mm3,mm2 - movq [32+ebx*4+esp],mm3 - inc edx -L$005outer: - xor ecx,ecx - movd mm4,DWORD [edx*4+edi] - movd mm5,DWORD [esi] - movd mm6,DWORD [32+esp] - movd mm3,DWORD [ebp] - pmuludq mm5,mm4 - paddq mm5,mm6 - movq mm0,mm5 - movq mm2,mm5 - pand mm0,mm7 - pmuludq mm5,[20+esp] - pmuludq mm3,mm5 - paddq mm3,mm0 - movd mm6,DWORD [36+esp] - movd mm1,DWORD [4+ebp] - movd mm0,DWORD [4+esi] - psrlq mm2,32 - psrlq mm3,32 - paddq mm2,mm6 - inc ecx - dec ebx -L$006inner: - pmuludq mm0,mm4 - pmuludq mm1,mm5 - paddq mm2,mm0 - paddq mm3,mm1 - movq mm0,mm2 - movd mm6,DWORD [36+ecx*4+esp] - pand mm0,mm7 - movd mm1,DWORD [4+ecx*4+ebp] - paddq mm3,mm0 - movd mm0,DWORD [4+ecx*4+esi] - psrlq mm2,32 - movd DWORD [28+ecx*4+esp],mm3 - psrlq mm3,32 - paddq mm2,mm6 - dec ebx - lea ecx,[1+ecx] - jnz NEAR L$006inner - mov ebx,ecx - pmuludq mm0,mm4 - pmuludq mm1,mm5 - paddq mm2,mm0 - paddq mm3,mm1 - movq mm0,mm2 - pand mm0,mm7 - paddq mm3,mm0 - movd DWORD [28+ecx*4+esp],mm3 - psrlq mm2,32 - psrlq mm3,32 - movd mm6,DWORD [36+ebx*4+esp] - paddq mm3,mm2 - paddq mm3,mm6 - movq [32+ebx*4+esp],mm3 - lea edx,[1+edx] - cmp edx,ebx - jle NEAR L$005outer - emms - jmp NEAR L$007common_tail -align 16 -L$003non_sse2: - mov esi,DWORD [8+esp] - lea ebp,[1+ebx] - mov edi,DWORD [12+esp] - xor ecx,ecx - mov edx,esi - and ebp,1 - sub edx,edi - lea eax,[4+ebx*4+edi] - or ebp,edx - mov edi,DWORD [edi] - jz NEAR L$008bn_sqr_mont - mov DWORD [28+esp],eax - mov eax,DWORD [esi] - xor edx,edx -align 16 -L$009mull: - mov ebp,edx - mul edi - add ebp,eax - lea ecx,[1+ecx] - adc edx,0 - mov eax,DWORD [ecx*4+esi] - cmp ecx,ebx - mov DWORD [28+ecx*4+esp],ebp - jl NEAR L$009mull - mov ebp,edx - mul edi - mov edi,DWORD [20+esp] - add eax,ebp - mov esi,DWORD [16+esp] - adc edx,0 - imul edi,DWORD [32+esp] - mov DWORD [32+ebx*4+esp],eax - xor ecx,ecx - mov DWORD [36+ebx*4+esp],edx - mov DWORD [40+ebx*4+esp],ecx - mov eax,DWORD [esi] - mul edi - add eax,DWORD [32+esp] - mov eax,DWORD [4+esi] - adc edx,0 - inc ecx - jmp NEAR L$0102ndmadd -align 16 -L$0111stmadd: - mov ebp,edx - mul edi - add ebp,DWORD [32+ecx*4+esp] - lea ecx,[1+ecx] - adc edx,0 - add ebp,eax - mov eax,DWORD [ecx*4+esi] - adc edx,0 - cmp ecx,ebx - mov DWORD [28+ecx*4+esp],ebp - jl NEAR L$0111stmadd - mov ebp,edx - mul edi - add eax,DWORD [32+ebx*4+esp] - mov edi,DWORD [20+esp] - adc edx,0 - mov esi,DWORD [16+esp] - add ebp,eax - adc edx,0 - imul edi,DWORD [32+esp] - xor ecx,ecx - add edx,DWORD [36+ebx*4+esp] - mov DWORD [32+ebx*4+esp],ebp - adc ecx,0 - mov eax,DWORD [esi] - mov DWORD [36+ebx*4+esp],edx - mov DWORD [40+ebx*4+esp],ecx - mul edi - add eax,DWORD [32+esp] - mov eax,DWORD [4+esi] - adc edx,0 - mov ecx,1 -align 16 -L$0102ndmadd: - mov ebp,edx - mul edi - add ebp,DWORD [32+ecx*4+esp] - lea ecx,[1+ecx] - adc edx,0 - add ebp,eax - mov eax,DWORD [ecx*4+esi] - adc edx,0 - cmp ecx,ebx - mov DWORD [24+ecx*4+esp],ebp - jl NEAR L$0102ndmadd - mov ebp,edx - mul edi - add ebp,DWORD [32+ebx*4+esp] - adc edx,0 - add ebp,eax - adc edx,0 - mov DWORD [28+ebx*4+esp],ebp - xor eax,eax - mov ecx,DWORD [12+esp] - add edx,DWORD [36+ebx*4+esp] - adc eax,DWORD [40+ebx*4+esp] - lea ecx,[4+ecx] - mov DWORD [32+ebx*4+esp],edx - cmp ecx,DWORD [28+esp] - mov DWORD [36+ebx*4+esp],eax - je NEAR L$007common_tail - mov edi,DWORD [ecx] - mov esi,DWORD [8+esp] - mov DWORD [12+esp],ecx - xor ecx,ecx - xor edx,edx - mov eax,DWORD [esi] - jmp NEAR L$0111stmadd -align 16 -L$008bn_sqr_mont: - mov DWORD [esp],ebx - mov DWORD [12+esp],ecx - mov eax,edi - mul edi - mov DWORD [32+esp],eax - mov ebx,edx - shr edx,1 - and ebx,1 - inc ecx -align 16 -L$012sqr: - mov eax,DWORD [ecx*4+esi] - mov ebp,edx - mul edi - add eax,ebp - lea ecx,[1+ecx] - adc edx,0 - lea ebp,[eax*2+ebx] - shr eax,31 - cmp ecx,DWORD [esp] - mov ebx,eax - mov DWORD [28+ecx*4+esp],ebp - jl NEAR L$012sqr - mov eax,DWORD [ecx*4+esi] - mov ebp,edx - mul edi - add eax,ebp - mov edi,DWORD [20+esp] - adc edx,0 - mov esi,DWORD [16+esp] - lea ebp,[eax*2+ebx] - imul edi,DWORD [32+esp] - shr eax,31 - mov DWORD [32+ecx*4+esp],ebp - lea ebp,[edx*2+eax] - mov eax,DWORD [esi] - shr edx,31 - mov DWORD [36+ecx*4+esp],ebp - mov DWORD [40+ecx*4+esp],edx - mul edi - add eax,DWORD [32+esp] - mov ebx,ecx - adc edx,0 - mov eax,DWORD [4+esi] - mov ecx,1 -align 16 -L$0133rdmadd: - mov ebp,edx - mul edi - add ebp,DWORD [32+ecx*4+esp] - adc edx,0 - add ebp,eax - mov eax,DWORD [4+ecx*4+esi] - adc edx,0 - mov DWORD [28+ecx*4+esp],ebp - mov ebp,edx - mul edi - add ebp,DWORD [36+ecx*4+esp] - lea ecx,[2+ecx] - adc edx,0 - add ebp,eax - mov eax,DWORD [ecx*4+esi] - adc edx,0 - cmp ecx,ebx - mov DWORD [24+ecx*4+esp],ebp - jl NEAR L$0133rdmadd - mov ebp,edx - mul edi - add ebp,DWORD [32+ebx*4+esp] - adc edx,0 - add ebp,eax - adc edx,0 - mov DWORD [28+ebx*4+esp],ebp - mov ecx,DWORD [12+esp] - xor eax,eax - mov esi,DWORD [8+esp] - add edx,DWORD [36+ebx*4+esp] - adc eax,DWORD [40+ebx*4+esp] - mov DWORD [32+ebx*4+esp],edx - cmp ecx,ebx - mov DWORD [36+ebx*4+esp],eax - je NEAR L$007common_tail - mov edi,DWORD [4+ecx*4+esi] - lea ecx,[1+ecx] - mov eax,edi - mov DWORD [12+esp],ecx - mul edi - add eax,DWORD [32+ecx*4+esp] - adc edx,0 - mov DWORD [32+ecx*4+esp],eax - xor ebp,ebp - cmp ecx,ebx - lea ecx,[1+ecx] - je NEAR L$014sqrlast - mov ebx,edx - shr edx,1 - and ebx,1 -align 16 -L$015sqradd: - mov eax,DWORD [ecx*4+esi] - mov ebp,edx - mul edi - add eax,ebp - lea ebp,[eax*1+eax] - adc edx,0 - shr eax,31 - add ebp,DWORD [32+ecx*4+esp] - lea ecx,[1+ecx] - adc eax,0 - add ebp,ebx - adc eax,0 - cmp ecx,DWORD [esp] - mov DWORD [28+ecx*4+esp],ebp - mov ebx,eax - jle NEAR L$015sqradd - mov ebp,edx - add edx,edx - shr ebp,31 - add edx,ebx - adc ebp,0 -L$014sqrlast: - mov edi,DWORD [20+esp] - mov esi,DWORD [16+esp] - imul edi,DWORD [32+esp] - add edx,DWORD [32+ecx*4+esp] - mov eax,DWORD [esi] - adc ebp,0 - mov DWORD [32+ecx*4+esp],edx - mov DWORD [36+ecx*4+esp],ebp - mul edi - add eax,DWORD [32+esp] - lea ebx,[ecx-1] - adc edx,0 - mov ecx,1 - mov eax,DWORD [4+esi] - jmp NEAR L$0133rdmadd -align 16 -L$007common_tail: - mov ebp,DWORD [16+esp] - mov edi,DWORD [4+esp] - lea esi,[32+esp] - mov eax,DWORD [esi] - mov ecx,ebx - xor edx,edx -align 16 -L$016sub: - sbb eax,DWORD [edx*4+ebp] - mov DWORD [edx*4+edi],eax - dec ecx - mov eax,DWORD [4+edx*4+esi] - lea edx,[1+edx] - jge NEAR L$016sub - sbb eax,0 - mov edx,-1 - xor edx,eax - jmp NEAR L$017copy -align 16 -L$017copy: - mov esi,DWORD [32+ebx*4+esp] - mov ebp,DWORD [ebx*4+edi] - mov DWORD [32+ebx*4+esp],ecx - and esi,eax - and ebp,edx - or ebp,esi - mov DWORD [ebx*4+edi],ebp - dec ebx - jge NEAR L$017copy - mov esp,DWORD [24+esp] - mov eax,1 -L$000just_leave: - pop edi - pop esi - pop ebx - pop ebp - ret -db 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 -db 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 -db 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 -db 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 -db 111,114,103,62,0 -segment .bss -common _OPENSSL_ia32cap_P 16 diff --git a/packager/third_party/boringssl/win-x86/crypto/test/trampoline-x86.asm b/packager/third_party/boringssl/win-x86/crypto/test/trampoline-x86.asm deleted file mode 100644 index e5c7d3f7fa..0000000000 --- a/packager/third_party/boringssl/win-x86/crypto/test/trampoline-x86.asm +++ /dev/null @@ -1,164 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -%ifidn __OUTPUT_FORMAT__,obj -section code use32 class=code align=64 -%elifidn __OUTPUT_FORMAT__,win32 -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01010000h -%error yasm version 1.1.0 or later needed. -%endif -; Yasm automatically includes .00 and complains about redefining it. -; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html -%else -$@feat.00 equ 1 -%endif -section .text code align=64 -%else -section .text code -%endif -global _abi_test_trampoline -align 16 -_abi_test_trampoline: -L$_abi_test_trampoline_begin: - push ebp - push ebx - push esi - push edi - mov ecx,DWORD [24+esp] - mov esi,DWORD [ecx] - mov edi,DWORD [4+ecx] - mov ebx,DWORD [8+ecx] - mov ebp,DWORD [12+ecx] - sub esp,44 - mov eax,DWORD [72+esp] - xor ecx,ecx -L$000loop: - cmp ecx,DWORD [76+esp] - jae NEAR L$001loop_done - mov edx,DWORD [ecx*4+eax] - mov DWORD [ecx*4+esp],edx - add ecx,1 - jmp NEAR L$000loop -L$001loop_done: - call DWORD [64+esp] - add esp,44 - mov ecx,DWORD [24+esp] - mov DWORD [ecx],esi - mov DWORD [4+ecx],edi - mov DWORD [8+ecx],ebx - mov DWORD [12+ecx],ebp - pop edi - pop esi - pop ebx - pop ebp - ret -global _abi_test_get_and_clear_direction_flag -align 16 -_abi_test_get_and_clear_direction_flag: -L$_abi_test_get_and_clear_direction_flag_begin: - pushfd - pop eax - and eax,1024 - shr eax,10 - cld - ret -global _abi_test_set_direction_flag -align 16 -_abi_test_set_direction_flag: -L$_abi_test_set_direction_flag_begin: - std - ret -global _abi_test_clobber_eax -align 16 -_abi_test_clobber_eax: -L$_abi_test_clobber_eax_begin: - xor eax,eax - ret -global _abi_test_clobber_ebx -align 16 -_abi_test_clobber_ebx: -L$_abi_test_clobber_ebx_begin: - xor ebx,ebx - ret -global _abi_test_clobber_ecx -align 16 -_abi_test_clobber_ecx: -L$_abi_test_clobber_ecx_begin: - xor ecx,ecx - ret -global _abi_test_clobber_edx -align 16 -_abi_test_clobber_edx: -L$_abi_test_clobber_edx_begin: - xor edx,edx - ret -global _abi_test_clobber_edi -align 16 -_abi_test_clobber_edi: -L$_abi_test_clobber_edi_begin: - xor edi,edi - ret -global _abi_test_clobber_esi -align 16 -_abi_test_clobber_esi: -L$_abi_test_clobber_esi_begin: - xor esi,esi - ret -global _abi_test_clobber_ebp -align 16 -_abi_test_clobber_ebp: -L$_abi_test_clobber_ebp_begin: - xor ebp,ebp - ret -global _abi_test_clobber_xmm0 -align 16 -_abi_test_clobber_xmm0: -L$_abi_test_clobber_xmm0_begin: - pxor xmm0,xmm0 - ret -global _abi_test_clobber_xmm1 -align 16 -_abi_test_clobber_xmm1: -L$_abi_test_clobber_xmm1_begin: - pxor xmm1,xmm1 - ret -global _abi_test_clobber_xmm2 -align 16 -_abi_test_clobber_xmm2: -L$_abi_test_clobber_xmm2_begin: - pxor xmm2,xmm2 - ret -global _abi_test_clobber_xmm3 -align 16 -_abi_test_clobber_xmm3: -L$_abi_test_clobber_xmm3_begin: - pxor xmm3,xmm3 - ret -global _abi_test_clobber_xmm4 -align 16 -_abi_test_clobber_xmm4: -L$_abi_test_clobber_xmm4_begin: - pxor xmm4,xmm4 - ret -global _abi_test_clobber_xmm5 -align 16 -_abi_test_clobber_xmm5: -L$_abi_test_clobber_xmm5_begin: - pxor xmm5,xmm5 - ret -global _abi_test_clobber_xmm6 -align 16 -_abi_test_clobber_xmm6: -L$_abi_test_clobber_xmm6_begin: - pxor xmm6,xmm6 - ret -global _abi_test_clobber_xmm7 -align 16 -_abi_test_clobber_xmm7: -L$_abi_test_clobber_xmm7_begin: - pxor xmm7,xmm7 - ret diff --git a/packager/third_party/boringssl/win-x86_64/crypto/chacha/chacha-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/chacha/chacha-x86_64.asm deleted file mode 100644 index a3c29381e3..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/chacha/chacha-x86_64.asm +++ /dev/null @@ -1,1926 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -EXTERN OPENSSL_ia32cap_P - -ALIGN 64 -$L$zero: - DD 0,0,0,0 -$L$one: - DD 1,0,0,0 -$L$inc: - DD 0,1,2,3 -$L$four: - DD 4,4,4,4 -$L$incy: - DD 0,2,4,6,1,3,5,7 -$L$eight: - DD 8,8,8,8,8,8,8,8 -$L$rot16: -DB 0x2,0x3,0x0,0x1,0x6,0x7,0x4,0x5,0xa,0xb,0x8,0x9,0xe,0xf,0xc,0xd -$L$rot24: -DB 0x3,0x0,0x1,0x2,0x7,0x4,0x5,0x6,0xb,0x8,0x9,0xa,0xf,0xc,0xd,0xe -$L$sigma: -DB 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107 -DB 0 -ALIGN 64 -$L$zeroz: - DD 0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0 -$L$fourz: - DD 4,0,0,0,4,0,0,0,4,0,0,0,4,0,0,0 -$L$incz: - DD 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 -$L$sixteen: - DD 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 -DB 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 -DB 95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32 -DB 98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115 -DB 108,46,111,114,103,62,0 -global ChaCha20_ctr32 - -ALIGN 64 -ChaCha20_ctr32: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ChaCha20_ctr32: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - cmp rdx,0 - je NEAR $L$no_data - mov r10,QWORD[((OPENSSL_ia32cap_P+4))] - test r10d,512 - jnz NEAR $L$ChaCha20_ssse3 - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,64+24 - -$L$ctr32_body: - - - movdqu xmm1,XMMWORD[rcx] - movdqu xmm2,XMMWORD[16+rcx] - movdqu xmm3,XMMWORD[r8] - movdqa xmm4,XMMWORD[$L$one] - - - movdqa XMMWORD[16+rsp],xmm1 - movdqa XMMWORD[32+rsp],xmm2 - movdqa XMMWORD[48+rsp],xmm3 - mov rbp,rdx - jmp NEAR $L$oop_outer - -ALIGN 32 -$L$oop_outer: - mov eax,0x61707865 - mov ebx,0x3320646e - mov ecx,0x79622d32 - mov edx,0x6b206574 - mov r8d,DWORD[16+rsp] - mov r9d,DWORD[20+rsp] - mov r10d,DWORD[24+rsp] - mov r11d,DWORD[28+rsp] - movd r12d,xmm3 - mov r13d,DWORD[52+rsp] - mov r14d,DWORD[56+rsp] - mov r15d,DWORD[60+rsp] - - mov QWORD[((64+0))+rsp],rbp - mov ebp,10 - mov QWORD[((64+8))+rsp],rsi -DB 102,72,15,126,214 - mov QWORD[((64+16))+rsp],rdi - mov rdi,rsi - shr rdi,32 - jmp NEAR $L$oop - -ALIGN 32 -$L$oop: - add eax,r8d - xor r12d,eax - rol r12d,16 - add ebx,r9d - xor r13d,ebx - rol r13d,16 - add esi,r12d - xor r8d,esi - rol r8d,12 - add edi,r13d - xor r9d,edi - rol r9d,12 - add eax,r8d - xor r12d,eax - rol r12d,8 - add ebx,r9d - xor r13d,ebx - rol r13d,8 - add esi,r12d - xor r8d,esi - rol r8d,7 - add edi,r13d - xor r9d,edi - rol r9d,7 - mov DWORD[32+rsp],esi - mov DWORD[36+rsp],edi - mov esi,DWORD[40+rsp] - mov edi,DWORD[44+rsp] - add ecx,r10d - xor r14d,ecx - rol r14d,16 - add edx,r11d - xor r15d,edx - rol r15d,16 - add esi,r14d - xor r10d,esi - rol r10d,12 - add edi,r15d - xor r11d,edi - rol r11d,12 - add ecx,r10d - xor r14d,ecx - rol r14d,8 - add edx,r11d - xor r15d,edx - rol r15d,8 - add esi,r14d - xor r10d,esi - rol r10d,7 - add edi,r15d - xor r11d,edi - rol r11d,7 - add eax,r9d - xor r15d,eax - rol r15d,16 - add ebx,r10d - xor r12d,ebx - rol r12d,16 - add esi,r15d - xor r9d,esi - rol r9d,12 - add edi,r12d - xor r10d,edi - rol r10d,12 - add eax,r9d - xor r15d,eax - rol r15d,8 - add ebx,r10d - xor r12d,ebx - rol r12d,8 - add esi,r15d - xor r9d,esi - rol r9d,7 - add edi,r12d - xor r10d,edi - rol r10d,7 - mov DWORD[40+rsp],esi - mov DWORD[44+rsp],edi - mov esi,DWORD[32+rsp] - mov edi,DWORD[36+rsp] - add ecx,r11d - xor r13d,ecx - rol r13d,16 - add edx,r8d - xor r14d,edx - rol r14d,16 - add esi,r13d - xor r11d,esi - rol r11d,12 - add edi,r14d - xor r8d,edi - rol r8d,12 - add ecx,r11d - xor r13d,ecx - rol r13d,8 - add edx,r8d - xor r14d,edx - rol r14d,8 - add esi,r13d - xor r11d,esi - rol r11d,7 - add edi,r14d - xor r8d,edi - rol r8d,7 - dec ebp - jnz NEAR $L$oop - mov DWORD[36+rsp],edi - mov DWORD[32+rsp],esi - mov rbp,QWORD[64+rsp] - movdqa xmm1,xmm2 - mov rsi,QWORD[((64+8))+rsp] - paddd xmm3,xmm4 - mov rdi,QWORD[((64+16))+rsp] - - add eax,0x61707865 - add ebx,0x3320646e - add ecx,0x79622d32 - add edx,0x6b206574 - add r8d,DWORD[16+rsp] - add r9d,DWORD[20+rsp] - add r10d,DWORD[24+rsp] - add r11d,DWORD[28+rsp] - add r12d,DWORD[48+rsp] - add r13d,DWORD[52+rsp] - add r14d,DWORD[56+rsp] - add r15d,DWORD[60+rsp] - paddd xmm1,XMMWORD[32+rsp] - - cmp rbp,64 - jb NEAR $L$tail - - xor eax,DWORD[rsi] - xor ebx,DWORD[4+rsi] - xor ecx,DWORD[8+rsi] - xor edx,DWORD[12+rsi] - xor r8d,DWORD[16+rsi] - xor r9d,DWORD[20+rsi] - xor r10d,DWORD[24+rsi] - xor r11d,DWORD[28+rsi] - movdqu xmm0,XMMWORD[32+rsi] - xor r12d,DWORD[48+rsi] - xor r13d,DWORD[52+rsi] - xor r14d,DWORD[56+rsi] - xor r15d,DWORD[60+rsi] - lea rsi,[64+rsi] - pxor xmm0,xmm1 - - movdqa XMMWORD[32+rsp],xmm2 - movd DWORD[48+rsp],xmm3 - - mov DWORD[rdi],eax - mov DWORD[4+rdi],ebx - mov DWORD[8+rdi],ecx - mov DWORD[12+rdi],edx - mov DWORD[16+rdi],r8d - mov DWORD[20+rdi],r9d - mov DWORD[24+rdi],r10d - mov DWORD[28+rdi],r11d - movdqu XMMWORD[32+rdi],xmm0 - mov DWORD[48+rdi],r12d - mov DWORD[52+rdi],r13d - mov DWORD[56+rdi],r14d - mov DWORD[60+rdi],r15d - lea rdi,[64+rdi] - - sub rbp,64 - jnz NEAR $L$oop_outer - - jmp NEAR $L$done - -ALIGN 16 -$L$tail: - mov DWORD[rsp],eax - mov DWORD[4+rsp],ebx - xor rbx,rbx - mov DWORD[8+rsp],ecx - mov DWORD[12+rsp],edx - mov DWORD[16+rsp],r8d - mov DWORD[20+rsp],r9d - mov DWORD[24+rsp],r10d - mov DWORD[28+rsp],r11d - movdqa XMMWORD[32+rsp],xmm1 - mov DWORD[48+rsp],r12d - mov DWORD[52+rsp],r13d - mov DWORD[56+rsp],r14d - mov DWORD[60+rsp],r15d - -$L$oop_tail: - movzx eax,BYTE[rbx*1+rsi] - movzx edx,BYTE[rbx*1+rsp] - lea rbx,[1+rbx] - xor eax,edx - mov BYTE[((-1))+rbx*1+rdi],al - dec rbp - jnz NEAR $L$oop_tail - -$L$done: - lea rsi,[((64+24+48))+rsp] - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$no_data: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ChaCha20_ctr32: - -ALIGN 32 -ChaCha20_ssse3: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ChaCha20_ssse3: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - -$L$ChaCha20_ssse3: - - mov r9,rsp - - cmp rdx,128 - ja NEAR $L$ChaCha20_4x - -$L$do_sse3_after_all: - sub rsp,64+40 - movaps XMMWORD[(-40)+r9],xmm6 - movaps XMMWORD[(-24)+r9],xmm7 -$L$ssse3_body: - movdqa xmm0,XMMWORD[$L$sigma] - movdqu xmm1,XMMWORD[rcx] - movdqu xmm2,XMMWORD[16+rcx] - movdqu xmm3,XMMWORD[r8] - movdqa xmm6,XMMWORD[$L$rot16] - movdqa xmm7,XMMWORD[$L$rot24] - - movdqa XMMWORD[rsp],xmm0 - movdqa XMMWORD[16+rsp],xmm1 - movdqa XMMWORD[32+rsp],xmm2 - movdqa XMMWORD[48+rsp],xmm3 - mov r8,10 - jmp NEAR $L$oop_ssse3 - -ALIGN 32 -$L$oop_outer_ssse3: - movdqa xmm3,XMMWORD[$L$one] - movdqa xmm0,XMMWORD[rsp] - movdqa xmm1,XMMWORD[16+rsp] - movdqa xmm2,XMMWORD[32+rsp] - paddd xmm3,XMMWORD[48+rsp] - mov r8,10 - movdqa XMMWORD[48+rsp],xmm3 - jmp NEAR $L$oop_ssse3 - -ALIGN 32 -$L$oop_ssse3: - paddd xmm0,xmm1 - pxor xmm3,xmm0 -DB 102,15,56,0,222 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,20 - pslld xmm4,12 - por xmm1,xmm4 - paddd xmm0,xmm1 - pxor xmm3,xmm0 -DB 102,15,56,0,223 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,25 - pslld xmm4,7 - por xmm1,xmm4 - pshufd xmm2,xmm2,78 - pshufd xmm1,xmm1,57 - pshufd xmm3,xmm3,147 - nop - paddd xmm0,xmm1 - pxor xmm3,xmm0 -DB 102,15,56,0,222 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,20 - pslld xmm4,12 - por xmm1,xmm4 - paddd xmm0,xmm1 - pxor xmm3,xmm0 -DB 102,15,56,0,223 - paddd xmm2,xmm3 - pxor xmm1,xmm2 - movdqa xmm4,xmm1 - psrld xmm1,25 - pslld xmm4,7 - por xmm1,xmm4 - pshufd xmm2,xmm2,78 - pshufd xmm1,xmm1,147 - pshufd xmm3,xmm3,57 - dec r8 - jnz NEAR $L$oop_ssse3 - paddd xmm0,XMMWORD[rsp] - paddd xmm1,XMMWORD[16+rsp] - paddd xmm2,XMMWORD[32+rsp] - paddd xmm3,XMMWORD[48+rsp] - - cmp rdx,64 - jb NEAR $L$tail_ssse3 - - movdqu xmm4,XMMWORD[rsi] - movdqu xmm5,XMMWORD[16+rsi] - pxor xmm0,xmm4 - movdqu xmm4,XMMWORD[32+rsi] - pxor xmm1,xmm5 - movdqu xmm5,XMMWORD[48+rsi] - lea rsi,[64+rsi] - pxor xmm2,xmm4 - pxor xmm3,xmm5 - - movdqu XMMWORD[rdi],xmm0 - movdqu XMMWORD[16+rdi],xmm1 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm3 - lea rdi,[64+rdi] - - sub rdx,64 - jnz NEAR $L$oop_outer_ssse3 - - jmp NEAR $L$done_ssse3 - -ALIGN 16 -$L$tail_ssse3: - movdqa XMMWORD[rsp],xmm0 - movdqa XMMWORD[16+rsp],xmm1 - movdqa XMMWORD[32+rsp],xmm2 - movdqa XMMWORD[48+rsp],xmm3 - xor r8,r8 - -$L$oop_tail_ssse3: - movzx eax,BYTE[r8*1+rsi] - movzx ecx,BYTE[r8*1+rsp] - lea r8,[1+r8] - xor eax,ecx - mov BYTE[((-1))+r8*1+rdi],al - dec rdx - jnz NEAR $L$oop_tail_ssse3 - -$L$done_ssse3: - movaps xmm6,XMMWORD[((-40))+r9] - movaps xmm7,XMMWORD[((-24))+r9] - lea rsp,[r9] - -$L$ssse3_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ChaCha20_ssse3: - -ALIGN 32 -ChaCha20_4x: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ChaCha20_4x: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - -$L$ChaCha20_4x: - - mov r9,rsp - - mov r11,r10 - shr r10,32 - test r10,32 - jnz NEAR $L$ChaCha20_8x - cmp rdx,192 - ja NEAR $L$proceed4x - - and r11,71303168 - cmp r11,4194304 - je NEAR $L$do_sse3_after_all - -$L$proceed4x: - sub rsp,0x140+168 - movaps XMMWORD[(-168)+r9],xmm6 - movaps XMMWORD[(-152)+r9],xmm7 - movaps XMMWORD[(-136)+r9],xmm8 - movaps XMMWORD[(-120)+r9],xmm9 - movaps XMMWORD[(-104)+r9],xmm10 - movaps XMMWORD[(-88)+r9],xmm11 - movaps XMMWORD[(-72)+r9],xmm12 - movaps XMMWORD[(-56)+r9],xmm13 - movaps XMMWORD[(-40)+r9],xmm14 - movaps XMMWORD[(-24)+r9],xmm15 -$L$4x_body: - movdqa xmm11,XMMWORD[$L$sigma] - movdqu xmm15,XMMWORD[rcx] - movdqu xmm7,XMMWORD[16+rcx] - movdqu xmm3,XMMWORD[r8] - lea rcx,[256+rsp] - lea r10,[$L$rot16] - lea r11,[$L$rot24] - - pshufd xmm8,xmm11,0x00 - pshufd xmm9,xmm11,0x55 - movdqa XMMWORD[64+rsp],xmm8 - pshufd xmm10,xmm11,0xaa - movdqa XMMWORD[80+rsp],xmm9 - pshufd xmm11,xmm11,0xff - movdqa XMMWORD[96+rsp],xmm10 - movdqa XMMWORD[112+rsp],xmm11 - - pshufd xmm12,xmm15,0x00 - pshufd xmm13,xmm15,0x55 - movdqa XMMWORD[(128-256)+rcx],xmm12 - pshufd xmm14,xmm15,0xaa - movdqa XMMWORD[(144-256)+rcx],xmm13 - pshufd xmm15,xmm15,0xff - movdqa XMMWORD[(160-256)+rcx],xmm14 - movdqa XMMWORD[(176-256)+rcx],xmm15 - - pshufd xmm4,xmm7,0x00 - pshufd xmm5,xmm7,0x55 - movdqa XMMWORD[(192-256)+rcx],xmm4 - pshufd xmm6,xmm7,0xaa - movdqa XMMWORD[(208-256)+rcx],xmm5 - pshufd xmm7,xmm7,0xff - movdqa XMMWORD[(224-256)+rcx],xmm6 - movdqa XMMWORD[(240-256)+rcx],xmm7 - - pshufd xmm0,xmm3,0x00 - pshufd xmm1,xmm3,0x55 - paddd xmm0,XMMWORD[$L$inc] - pshufd xmm2,xmm3,0xaa - movdqa XMMWORD[(272-256)+rcx],xmm1 - pshufd xmm3,xmm3,0xff - movdqa XMMWORD[(288-256)+rcx],xmm2 - movdqa XMMWORD[(304-256)+rcx],xmm3 - - jmp NEAR $L$oop_enter4x - -ALIGN 32 -$L$oop_outer4x: - movdqa xmm8,XMMWORD[64+rsp] - movdqa xmm9,XMMWORD[80+rsp] - movdqa xmm10,XMMWORD[96+rsp] - movdqa xmm11,XMMWORD[112+rsp] - movdqa xmm12,XMMWORD[((128-256))+rcx] - movdqa xmm13,XMMWORD[((144-256))+rcx] - movdqa xmm14,XMMWORD[((160-256))+rcx] - movdqa xmm15,XMMWORD[((176-256))+rcx] - movdqa xmm4,XMMWORD[((192-256))+rcx] - movdqa xmm5,XMMWORD[((208-256))+rcx] - movdqa xmm6,XMMWORD[((224-256))+rcx] - movdqa xmm7,XMMWORD[((240-256))+rcx] - movdqa xmm0,XMMWORD[((256-256))+rcx] - movdqa xmm1,XMMWORD[((272-256))+rcx] - movdqa xmm2,XMMWORD[((288-256))+rcx] - movdqa xmm3,XMMWORD[((304-256))+rcx] - paddd xmm0,XMMWORD[$L$four] - -$L$oop_enter4x: - movdqa XMMWORD[32+rsp],xmm6 - movdqa XMMWORD[48+rsp],xmm7 - movdqa xmm7,XMMWORD[r10] - mov eax,10 - movdqa XMMWORD[(256-256)+rcx],xmm0 - jmp NEAR $L$oop4x - -ALIGN 32 -$L$oop4x: - paddd xmm8,xmm12 - paddd xmm9,xmm13 - pxor xmm0,xmm8 - pxor xmm1,xmm9 -DB 102,15,56,0,199 -DB 102,15,56,0,207 - paddd xmm4,xmm0 - paddd xmm5,xmm1 - pxor xmm12,xmm4 - pxor xmm13,xmm5 - movdqa xmm6,xmm12 - pslld xmm12,12 - psrld xmm6,20 - movdqa xmm7,xmm13 - pslld xmm13,12 - por xmm12,xmm6 - psrld xmm7,20 - movdqa xmm6,XMMWORD[r11] - por xmm13,xmm7 - paddd xmm8,xmm12 - paddd xmm9,xmm13 - pxor xmm0,xmm8 - pxor xmm1,xmm9 -DB 102,15,56,0,198 -DB 102,15,56,0,206 - paddd xmm4,xmm0 - paddd xmm5,xmm1 - pxor xmm12,xmm4 - pxor xmm13,xmm5 - movdqa xmm7,xmm12 - pslld xmm12,7 - psrld xmm7,25 - movdqa xmm6,xmm13 - pslld xmm13,7 - por xmm12,xmm7 - psrld xmm6,25 - movdqa xmm7,XMMWORD[r10] - por xmm13,xmm6 - movdqa XMMWORD[rsp],xmm4 - movdqa XMMWORD[16+rsp],xmm5 - movdqa xmm4,XMMWORD[32+rsp] - movdqa xmm5,XMMWORD[48+rsp] - paddd xmm10,xmm14 - paddd xmm11,xmm15 - pxor xmm2,xmm10 - pxor xmm3,xmm11 -DB 102,15,56,0,215 -DB 102,15,56,0,223 - paddd xmm4,xmm2 - paddd xmm5,xmm3 - pxor xmm14,xmm4 - pxor xmm15,xmm5 - movdqa xmm6,xmm14 - pslld xmm14,12 - psrld xmm6,20 - movdqa xmm7,xmm15 - pslld xmm15,12 - por xmm14,xmm6 - psrld xmm7,20 - movdqa xmm6,XMMWORD[r11] - por xmm15,xmm7 - paddd xmm10,xmm14 - paddd xmm11,xmm15 - pxor xmm2,xmm10 - pxor xmm3,xmm11 -DB 102,15,56,0,214 -DB 102,15,56,0,222 - paddd xmm4,xmm2 - paddd xmm5,xmm3 - pxor xmm14,xmm4 - pxor xmm15,xmm5 - movdqa xmm7,xmm14 - pslld xmm14,7 - psrld xmm7,25 - movdqa xmm6,xmm15 - pslld xmm15,7 - por xmm14,xmm7 - psrld xmm6,25 - movdqa xmm7,XMMWORD[r10] - por xmm15,xmm6 - paddd xmm8,xmm13 - paddd xmm9,xmm14 - pxor xmm3,xmm8 - pxor xmm0,xmm9 -DB 102,15,56,0,223 -DB 102,15,56,0,199 - paddd xmm4,xmm3 - paddd xmm5,xmm0 - pxor xmm13,xmm4 - pxor xmm14,xmm5 - movdqa xmm6,xmm13 - pslld xmm13,12 - psrld xmm6,20 - movdqa xmm7,xmm14 - pslld xmm14,12 - por xmm13,xmm6 - psrld xmm7,20 - movdqa xmm6,XMMWORD[r11] - por xmm14,xmm7 - paddd xmm8,xmm13 - paddd xmm9,xmm14 - pxor xmm3,xmm8 - pxor xmm0,xmm9 -DB 102,15,56,0,222 -DB 102,15,56,0,198 - paddd xmm4,xmm3 - paddd xmm5,xmm0 - pxor xmm13,xmm4 - pxor xmm14,xmm5 - movdqa xmm7,xmm13 - pslld xmm13,7 - psrld xmm7,25 - movdqa xmm6,xmm14 - pslld xmm14,7 - por xmm13,xmm7 - psrld xmm6,25 - movdqa xmm7,XMMWORD[r10] - por xmm14,xmm6 - movdqa XMMWORD[32+rsp],xmm4 - movdqa XMMWORD[48+rsp],xmm5 - movdqa xmm4,XMMWORD[rsp] - movdqa xmm5,XMMWORD[16+rsp] - paddd xmm10,xmm15 - paddd xmm11,xmm12 - pxor xmm1,xmm10 - pxor xmm2,xmm11 -DB 102,15,56,0,207 -DB 102,15,56,0,215 - paddd xmm4,xmm1 - paddd xmm5,xmm2 - pxor xmm15,xmm4 - pxor xmm12,xmm5 - movdqa xmm6,xmm15 - pslld xmm15,12 - psrld xmm6,20 - movdqa xmm7,xmm12 - pslld xmm12,12 - por xmm15,xmm6 - psrld xmm7,20 - movdqa xmm6,XMMWORD[r11] - por xmm12,xmm7 - paddd xmm10,xmm15 - paddd xmm11,xmm12 - pxor xmm1,xmm10 - pxor xmm2,xmm11 -DB 102,15,56,0,206 -DB 102,15,56,0,214 - paddd xmm4,xmm1 - paddd xmm5,xmm2 - pxor xmm15,xmm4 - pxor xmm12,xmm5 - movdqa xmm7,xmm15 - pslld xmm15,7 - psrld xmm7,25 - movdqa xmm6,xmm12 - pslld xmm12,7 - por xmm15,xmm7 - psrld xmm6,25 - movdqa xmm7,XMMWORD[r10] - por xmm12,xmm6 - dec eax - jnz NEAR $L$oop4x - - paddd xmm8,XMMWORD[64+rsp] - paddd xmm9,XMMWORD[80+rsp] - paddd xmm10,XMMWORD[96+rsp] - paddd xmm11,XMMWORD[112+rsp] - - movdqa xmm6,xmm8 - punpckldq xmm8,xmm9 - movdqa xmm7,xmm10 - punpckldq xmm10,xmm11 - punpckhdq xmm6,xmm9 - punpckhdq xmm7,xmm11 - movdqa xmm9,xmm8 - punpcklqdq xmm8,xmm10 - movdqa xmm11,xmm6 - punpcklqdq xmm6,xmm7 - punpckhqdq xmm9,xmm10 - punpckhqdq xmm11,xmm7 - paddd xmm12,XMMWORD[((128-256))+rcx] - paddd xmm13,XMMWORD[((144-256))+rcx] - paddd xmm14,XMMWORD[((160-256))+rcx] - paddd xmm15,XMMWORD[((176-256))+rcx] - - movdqa XMMWORD[rsp],xmm8 - movdqa XMMWORD[16+rsp],xmm9 - movdqa xmm8,XMMWORD[32+rsp] - movdqa xmm9,XMMWORD[48+rsp] - - movdqa xmm10,xmm12 - punpckldq xmm12,xmm13 - movdqa xmm7,xmm14 - punpckldq xmm14,xmm15 - punpckhdq xmm10,xmm13 - punpckhdq xmm7,xmm15 - movdqa xmm13,xmm12 - punpcklqdq xmm12,xmm14 - movdqa xmm15,xmm10 - punpcklqdq xmm10,xmm7 - punpckhqdq xmm13,xmm14 - punpckhqdq xmm15,xmm7 - paddd xmm4,XMMWORD[((192-256))+rcx] - paddd xmm5,XMMWORD[((208-256))+rcx] - paddd xmm8,XMMWORD[((224-256))+rcx] - paddd xmm9,XMMWORD[((240-256))+rcx] - - movdqa XMMWORD[32+rsp],xmm6 - movdqa XMMWORD[48+rsp],xmm11 - - movdqa xmm14,xmm4 - punpckldq xmm4,xmm5 - movdqa xmm7,xmm8 - punpckldq xmm8,xmm9 - punpckhdq xmm14,xmm5 - punpckhdq xmm7,xmm9 - movdqa xmm5,xmm4 - punpcklqdq xmm4,xmm8 - movdqa xmm9,xmm14 - punpcklqdq xmm14,xmm7 - punpckhqdq xmm5,xmm8 - punpckhqdq xmm9,xmm7 - paddd xmm0,XMMWORD[((256-256))+rcx] - paddd xmm1,XMMWORD[((272-256))+rcx] - paddd xmm2,XMMWORD[((288-256))+rcx] - paddd xmm3,XMMWORD[((304-256))+rcx] - - movdqa xmm8,xmm0 - punpckldq xmm0,xmm1 - movdqa xmm7,xmm2 - punpckldq xmm2,xmm3 - punpckhdq xmm8,xmm1 - punpckhdq xmm7,xmm3 - movdqa xmm1,xmm0 - punpcklqdq xmm0,xmm2 - movdqa xmm3,xmm8 - punpcklqdq xmm8,xmm7 - punpckhqdq xmm1,xmm2 - punpckhqdq xmm3,xmm7 - cmp rdx,64*4 - jb NEAR $L$tail4x - - movdqu xmm6,XMMWORD[rsi] - movdqu xmm11,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm7,XMMWORD[48+rsi] - pxor xmm6,XMMWORD[rsp] - pxor xmm11,xmm12 - pxor xmm2,xmm4 - pxor xmm7,xmm0 - - movdqu XMMWORD[rdi],xmm6 - movdqu xmm6,XMMWORD[64+rsi] - movdqu XMMWORD[16+rdi],xmm11 - movdqu xmm11,XMMWORD[80+rsi] - movdqu XMMWORD[32+rdi],xmm2 - movdqu xmm2,XMMWORD[96+rsi] - movdqu XMMWORD[48+rdi],xmm7 - movdqu xmm7,XMMWORD[112+rsi] - lea rsi,[128+rsi] - pxor xmm6,XMMWORD[16+rsp] - pxor xmm11,xmm13 - pxor xmm2,xmm5 - pxor xmm7,xmm1 - - movdqu XMMWORD[64+rdi],xmm6 - movdqu xmm6,XMMWORD[rsi] - movdqu XMMWORD[80+rdi],xmm11 - movdqu xmm11,XMMWORD[16+rsi] - movdqu XMMWORD[96+rdi],xmm2 - movdqu xmm2,XMMWORD[32+rsi] - movdqu XMMWORD[112+rdi],xmm7 - lea rdi,[128+rdi] - movdqu xmm7,XMMWORD[48+rsi] - pxor xmm6,XMMWORD[32+rsp] - pxor xmm11,xmm10 - pxor xmm2,xmm14 - pxor xmm7,xmm8 - - movdqu XMMWORD[rdi],xmm6 - movdqu xmm6,XMMWORD[64+rsi] - movdqu XMMWORD[16+rdi],xmm11 - movdqu xmm11,XMMWORD[80+rsi] - movdqu XMMWORD[32+rdi],xmm2 - movdqu xmm2,XMMWORD[96+rsi] - movdqu XMMWORD[48+rdi],xmm7 - movdqu xmm7,XMMWORD[112+rsi] - lea rsi,[128+rsi] - pxor xmm6,XMMWORD[48+rsp] - pxor xmm11,xmm15 - pxor xmm2,xmm9 - pxor xmm7,xmm3 - movdqu XMMWORD[64+rdi],xmm6 - movdqu XMMWORD[80+rdi],xmm11 - movdqu XMMWORD[96+rdi],xmm2 - movdqu XMMWORD[112+rdi],xmm7 - lea rdi,[128+rdi] - - sub rdx,64*4 - jnz NEAR $L$oop_outer4x - - jmp NEAR $L$done4x - -$L$tail4x: - cmp rdx,192 - jae NEAR $L$192_or_more4x - cmp rdx,128 - jae NEAR $L$128_or_more4x - cmp rdx,64 - jae NEAR $L$64_or_more4x - - - xor r10,r10 - - movdqa XMMWORD[16+rsp],xmm12 - movdqa XMMWORD[32+rsp],xmm4 - movdqa XMMWORD[48+rsp],xmm0 - jmp NEAR $L$oop_tail4x - -ALIGN 32 -$L$64_or_more4x: - movdqu xmm6,XMMWORD[rsi] - movdqu xmm11,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm7,XMMWORD[48+rsi] - pxor xmm6,XMMWORD[rsp] - pxor xmm11,xmm12 - pxor xmm2,xmm4 - pxor xmm7,xmm0 - movdqu XMMWORD[rdi],xmm6 - movdqu XMMWORD[16+rdi],xmm11 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm7 - je NEAR $L$done4x - - movdqa xmm6,XMMWORD[16+rsp] - lea rsi,[64+rsi] - xor r10,r10 - movdqa XMMWORD[rsp],xmm6 - movdqa XMMWORD[16+rsp],xmm13 - lea rdi,[64+rdi] - movdqa XMMWORD[32+rsp],xmm5 - sub rdx,64 - movdqa XMMWORD[48+rsp],xmm1 - jmp NEAR $L$oop_tail4x - -ALIGN 32 -$L$128_or_more4x: - movdqu xmm6,XMMWORD[rsi] - movdqu xmm11,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm7,XMMWORD[48+rsi] - pxor xmm6,XMMWORD[rsp] - pxor xmm11,xmm12 - pxor xmm2,xmm4 - pxor xmm7,xmm0 - - movdqu XMMWORD[rdi],xmm6 - movdqu xmm6,XMMWORD[64+rsi] - movdqu XMMWORD[16+rdi],xmm11 - movdqu xmm11,XMMWORD[80+rsi] - movdqu XMMWORD[32+rdi],xmm2 - movdqu xmm2,XMMWORD[96+rsi] - movdqu XMMWORD[48+rdi],xmm7 - movdqu xmm7,XMMWORD[112+rsi] - pxor xmm6,XMMWORD[16+rsp] - pxor xmm11,xmm13 - pxor xmm2,xmm5 - pxor xmm7,xmm1 - movdqu XMMWORD[64+rdi],xmm6 - movdqu XMMWORD[80+rdi],xmm11 - movdqu XMMWORD[96+rdi],xmm2 - movdqu XMMWORD[112+rdi],xmm7 - je NEAR $L$done4x - - movdqa xmm6,XMMWORD[32+rsp] - lea rsi,[128+rsi] - xor r10,r10 - movdqa XMMWORD[rsp],xmm6 - movdqa XMMWORD[16+rsp],xmm10 - lea rdi,[128+rdi] - movdqa XMMWORD[32+rsp],xmm14 - sub rdx,128 - movdqa XMMWORD[48+rsp],xmm8 - jmp NEAR $L$oop_tail4x - -ALIGN 32 -$L$192_or_more4x: - movdqu xmm6,XMMWORD[rsi] - movdqu xmm11,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm7,XMMWORD[48+rsi] - pxor xmm6,XMMWORD[rsp] - pxor xmm11,xmm12 - pxor xmm2,xmm4 - pxor xmm7,xmm0 - - movdqu XMMWORD[rdi],xmm6 - movdqu xmm6,XMMWORD[64+rsi] - movdqu XMMWORD[16+rdi],xmm11 - movdqu xmm11,XMMWORD[80+rsi] - movdqu XMMWORD[32+rdi],xmm2 - movdqu xmm2,XMMWORD[96+rsi] - movdqu XMMWORD[48+rdi],xmm7 - movdqu xmm7,XMMWORD[112+rsi] - lea rsi,[128+rsi] - pxor xmm6,XMMWORD[16+rsp] - pxor xmm11,xmm13 - pxor xmm2,xmm5 - pxor xmm7,xmm1 - - movdqu XMMWORD[64+rdi],xmm6 - movdqu xmm6,XMMWORD[rsi] - movdqu XMMWORD[80+rdi],xmm11 - movdqu xmm11,XMMWORD[16+rsi] - movdqu XMMWORD[96+rdi],xmm2 - movdqu xmm2,XMMWORD[32+rsi] - movdqu XMMWORD[112+rdi],xmm7 - lea rdi,[128+rdi] - movdqu xmm7,XMMWORD[48+rsi] - pxor xmm6,XMMWORD[32+rsp] - pxor xmm11,xmm10 - pxor xmm2,xmm14 - pxor xmm7,xmm8 - movdqu XMMWORD[rdi],xmm6 - movdqu XMMWORD[16+rdi],xmm11 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm7 - je NEAR $L$done4x - - movdqa xmm6,XMMWORD[48+rsp] - lea rsi,[64+rsi] - xor r10,r10 - movdqa XMMWORD[rsp],xmm6 - movdqa XMMWORD[16+rsp],xmm15 - lea rdi,[64+rdi] - movdqa XMMWORD[32+rsp],xmm9 - sub rdx,192 - movdqa XMMWORD[48+rsp],xmm3 - -$L$oop_tail4x: - movzx eax,BYTE[r10*1+rsi] - movzx ecx,BYTE[r10*1+rsp] - lea r10,[1+r10] - xor eax,ecx - mov BYTE[((-1))+r10*1+rdi],al - dec rdx - jnz NEAR $L$oop_tail4x - -$L$done4x: - movaps xmm6,XMMWORD[((-168))+r9] - movaps xmm7,XMMWORD[((-152))+r9] - movaps xmm8,XMMWORD[((-136))+r9] - movaps xmm9,XMMWORD[((-120))+r9] - movaps xmm10,XMMWORD[((-104))+r9] - movaps xmm11,XMMWORD[((-88))+r9] - movaps xmm12,XMMWORD[((-72))+r9] - movaps xmm13,XMMWORD[((-56))+r9] - movaps xmm14,XMMWORD[((-40))+r9] - movaps xmm15,XMMWORD[((-24))+r9] - lea rsp,[r9] - -$L$4x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ChaCha20_4x: - -ALIGN 32 -ChaCha20_8x: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ChaCha20_8x: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - -$L$ChaCha20_8x: - - mov r9,rsp - - sub rsp,0x280+168 - and rsp,-32 - movaps XMMWORD[(-168)+r9],xmm6 - movaps XMMWORD[(-152)+r9],xmm7 - movaps XMMWORD[(-136)+r9],xmm8 - movaps XMMWORD[(-120)+r9],xmm9 - movaps XMMWORD[(-104)+r9],xmm10 - movaps XMMWORD[(-88)+r9],xmm11 - movaps XMMWORD[(-72)+r9],xmm12 - movaps XMMWORD[(-56)+r9],xmm13 - movaps XMMWORD[(-40)+r9],xmm14 - movaps XMMWORD[(-24)+r9],xmm15 -$L$8x_body: - vzeroupper - - - - - - - - - - - vbroadcasti128 ymm11,XMMWORD[$L$sigma] - vbroadcasti128 ymm3,XMMWORD[rcx] - vbroadcasti128 ymm15,XMMWORD[16+rcx] - vbroadcasti128 ymm7,XMMWORD[r8] - lea rcx,[256+rsp] - lea rax,[512+rsp] - lea r10,[$L$rot16] - lea r11,[$L$rot24] - - vpshufd ymm8,ymm11,0x00 - vpshufd ymm9,ymm11,0x55 - vmovdqa YMMWORD[(128-256)+rcx],ymm8 - vpshufd ymm10,ymm11,0xaa - vmovdqa YMMWORD[(160-256)+rcx],ymm9 - vpshufd ymm11,ymm11,0xff - vmovdqa YMMWORD[(192-256)+rcx],ymm10 - vmovdqa YMMWORD[(224-256)+rcx],ymm11 - - vpshufd ymm0,ymm3,0x00 - vpshufd ymm1,ymm3,0x55 - vmovdqa YMMWORD[(256-256)+rcx],ymm0 - vpshufd ymm2,ymm3,0xaa - vmovdqa YMMWORD[(288-256)+rcx],ymm1 - vpshufd ymm3,ymm3,0xff - vmovdqa YMMWORD[(320-256)+rcx],ymm2 - vmovdqa YMMWORD[(352-256)+rcx],ymm3 - - vpshufd ymm12,ymm15,0x00 - vpshufd ymm13,ymm15,0x55 - vmovdqa YMMWORD[(384-512)+rax],ymm12 - vpshufd ymm14,ymm15,0xaa - vmovdqa YMMWORD[(416-512)+rax],ymm13 - vpshufd ymm15,ymm15,0xff - vmovdqa YMMWORD[(448-512)+rax],ymm14 - vmovdqa YMMWORD[(480-512)+rax],ymm15 - - vpshufd ymm4,ymm7,0x00 - vpshufd ymm5,ymm7,0x55 - vpaddd ymm4,ymm4,YMMWORD[$L$incy] - vpshufd ymm6,ymm7,0xaa - vmovdqa YMMWORD[(544-512)+rax],ymm5 - vpshufd ymm7,ymm7,0xff - vmovdqa YMMWORD[(576-512)+rax],ymm6 - vmovdqa YMMWORD[(608-512)+rax],ymm7 - - jmp NEAR $L$oop_enter8x - -ALIGN 32 -$L$oop_outer8x: - vmovdqa ymm8,YMMWORD[((128-256))+rcx] - vmovdqa ymm9,YMMWORD[((160-256))+rcx] - vmovdqa ymm10,YMMWORD[((192-256))+rcx] - vmovdqa ymm11,YMMWORD[((224-256))+rcx] - vmovdqa ymm0,YMMWORD[((256-256))+rcx] - vmovdqa ymm1,YMMWORD[((288-256))+rcx] - vmovdqa ymm2,YMMWORD[((320-256))+rcx] - vmovdqa ymm3,YMMWORD[((352-256))+rcx] - vmovdqa ymm12,YMMWORD[((384-512))+rax] - vmovdqa ymm13,YMMWORD[((416-512))+rax] - vmovdqa ymm14,YMMWORD[((448-512))+rax] - vmovdqa ymm15,YMMWORD[((480-512))+rax] - vmovdqa ymm4,YMMWORD[((512-512))+rax] - vmovdqa ymm5,YMMWORD[((544-512))+rax] - vmovdqa ymm6,YMMWORD[((576-512))+rax] - vmovdqa ymm7,YMMWORD[((608-512))+rax] - vpaddd ymm4,ymm4,YMMWORD[$L$eight] - -$L$oop_enter8x: - vmovdqa YMMWORD[64+rsp],ymm14 - vmovdqa YMMWORD[96+rsp],ymm15 - vbroadcasti128 ymm15,XMMWORD[r10] - vmovdqa YMMWORD[(512-512)+rax],ymm4 - mov eax,10 - jmp NEAR $L$oop8x - -ALIGN 32 -$L$oop8x: - vpaddd ymm8,ymm8,ymm0 - vpxor ymm4,ymm8,ymm4 - vpshufb ymm4,ymm4,ymm15 - vpaddd ymm9,ymm9,ymm1 - vpxor ymm5,ymm9,ymm5 - vpshufb ymm5,ymm5,ymm15 - vpaddd ymm12,ymm12,ymm4 - vpxor ymm0,ymm12,ymm0 - vpslld ymm14,ymm0,12 - vpsrld ymm0,ymm0,20 - vpor ymm0,ymm14,ymm0 - vbroadcasti128 ymm14,XMMWORD[r11] - vpaddd ymm13,ymm13,ymm5 - vpxor ymm1,ymm13,ymm1 - vpslld ymm15,ymm1,12 - vpsrld ymm1,ymm1,20 - vpor ymm1,ymm15,ymm1 - vpaddd ymm8,ymm8,ymm0 - vpxor ymm4,ymm8,ymm4 - vpshufb ymm4,ymm4,ymm14 - vpaddd ymm9,ymm9,ymm1 - vpxor ymm5,ymm9,ymm5 - vpshufb ymm5,ymm5,ymm14 - vpaddd ymm12,ymm12,ymm4 - vpxor ymm0,ymm12,ymm0 - vpslld ymm15,ymm0,7 - vpsrld ymm0,ymm0,25 - vpor ymm0,ymm15,ymm0 - vbroadcasti128 ymm15,XMMWORD[r10] - vpaddd ymm13,ymm13,ymm5 - vpxor ymm1,ymm13,ymm1 - vpslld ymm14,ymm1,7 - vpsrld ymm1,ymm1,25 - vpor ymm1,ymm14,ymm1 - vmovdqa YMMWORD[rsp],ymm12 - vmovdqa YMMWORD[32+rsp],ymm13 - vmovdqa ymm12,YMMWORD[64+rsp] - vmovdqa ymm13,YMMWORD[96+rsp] - vpaddd ymm10,ymm10,ymm2 - vpxor ymm6,ymm10,ymm6 - vpshufb ymm6,ymm6,ymm15 - vpaddd ymm11,ymm11,ymm3 - vpxor ymm7,ymm11,ymm7 - vpshufb ymm7,ymm7,ymm15 - vpaddd ymm12,ymm12,ymm6 - vpxor ymm2,ymm12,ymm2 - vpslld ymm14,ymm2,12 - vpsrld ymm2,ymm2,20 - vpor ymm2,ymm14,ymm2 - vbroadcasti128 ymm14,XMMWORD[r11] - vpaddd ymm13,ymm13,ymm7 - vpxor ymm3,ymm13,ymm3 - vpslld ymm15,ymm3,12 - vpsrld ymm3,ymm3,20 - vpor ymm3,ymm15,ymm3 - vpaddd ymm10,ymm10,ymm2 - vpxor ymm6,ymm10,ymm6 - vpshufb ymm6,ymm6,ymm14 - vpaddd ymm11,ymm11,ymm3 - vpxor ymm7,ymm11,ymm7 - vpshufb ymm7,ymm7,ymm14 - vpaddd ymm12,ymm12,ymm6 - vpxor ymm2,ymm12,ymm2 - vpslld ymm15,ymm2,7 - vpsrld ymm2,ymm2,25 - vpor ymm2,ymm15,ymm2 - vbroadcasti128 ymm15,XMMWORD[r10] - vpaddd ymm13,ymm13,ymm7 - vpxor ymm3,ymm13,ymm3 - vpslld ymm14,ymm3,7 - vpsrld ymm3,ymm3,25 - vpor ymm3,ymm14,ymm3 - vpaddd ymm8,ymm8,ymm1 - vpxor ymm7,ymm8,ymm7 - vpshufb ymm7,ymm7,ymm15 - vpaddd ymm9,ymm9,ymm2 - vpxor ymm4,ymm9,ymm4 - vpshufb ymm4,ymm4,ymm15 - vpaddd ymm12,ymm12,ymm7 - vpxor ymm1,ymm12,ymm1 - vpslld ymm14,ymm1,12 - vpsrld ymm1,ymm1,20 - vpor ymm1,ymm14,ymm1 - vbroadcasti128 ymm14,XMMWORD[r11] - vpaddd ymm13,ymm13,ymm4 - vpxor ymm2,ymm13,ymm2 - vpslld ymm15,ymm2,12 - vpsrld ymm2,ymm2,20 - vpor ymm2,ymm15,ymm2 - vpaddd ymm8,ymm8,ymm1 - vpxor ymm7,ymm8,ymm7 - vpshufb ymm7,ymm7,ymm14 - vpaddd ymm9,ymm9,ymm2 - vpxor ymm4,ymm9,ymm4 - vpshufb ymm4,ymm4,ymm14 - vpaddd ymm12,ymm12,ymm7 - vpxor ymm1,ymm12,ymm1 - vpslld ymm15,ymm1,7 - vpsrld ymm1,ymm1,25 - vpor ymm1,ymm15,ymm1 - vbroadcasti128 ymm15,XMMWORD[r10] - vpaddd ymm13,ymm13,ymm4 - vpxor ymm2,ymm13,ymm2 - vpslld ymm14,ymm2,7 - vpsrld ymm2,ymm2,25 - vpor ymm2,ymm14,ymm2 - vmovdqa YMMWORD[64+rsp],ymm12 - vmovdqa YMMWORD[96+rsp],ymm13 - vmovdqa ymm12,YMMWORD[rsp] - vmovdqa ymm13,YMMWORD[32+rsp] - vpaddd ymm10,ymm10,ymm3 - vpxor ymm5,ymm10,ymm5 - vpshufb ymm5,ymm5,ymm15 - vpaddd ymm11,ymm11,ymm0 - vpxor ymm6,ymm11,ymm6 - vpshufb ymm6,ymm6,ymm15 - vpaddd ymm12,ymm12,ymm5 - vpxor ymm3,ymm12,ymm3 - vpslld ymm14,ymm3,12 - vpsrld ymm3,ymm3,20 - vpor ymm3,ymm14,ymm3 - vbroadcasti128 ymm14,XMMWORD[r11] - vpaddd ymm13,ymm13,ymm6 - vpxor ymm0,ymm13,ymm0 - vpslld ymm15,ymm0,12 - vpsrld ymm0,ymm0,20 - vpor ymm0,ymm15,ymm0 - vpaddd ymm10,ymm10,ymm3 - vpxor ymm5,ymm10,ymm5 - vpshufb ymm5,ymm5,ymm14 - vpaddd ymm11,ymm11,ymm0 - vpxor ymm6,ymm11,ymm6 - vpshufb ymm6,ymm6,ymm14 - vpaddd ymm12,ymm12,ymm5 - vpxor ymm3,ymm12,ymm3 - vpslld ymm15,ymm3,7 - vpsrld ymm3,ymm3,25 - vpor ymm3,ymm15,ymm3 - vbroadcasti128 ymm15,XMMWORD[r10] - vpaddd ymm13,ymm13,ymm6 - vpxor ymm0,ymm13,ymm0 - vpslld ymm14,ymm0,7 - vpsrld ymm0,ymm0,25 - vpor ymm0,ymm14,ymm0 - dec eax - jnz NEAR $L$oop8x - - lea rax,[512+rsp] - vpaddd ymm8,ymm8,YMMWORD[((128-256))+rcx] - vpaddd ymm9,ymm9,YMMWORD[((160-256))+rcx] - vpaddd ymm10,ymm10,YMMWORD[((192-256))+rcx] - vpaddd ymm11,ymm11,YMMWORD[((224-256))+rcx] - - vpunpckldq ymm14,ymm8,ymm9 - vpunpckldq ymm15,ymm10,ymm11 - vpunpckhdq ymm8,ymm8,ymm9 - vpunpckhdq ymm10,ymm10,ymm11 - vpunpcklqdq ymm9,ymm14,ymm15 - vpunpckhqdq ymm14,ymm14,ymm15 - vpunpcklqdq ymm11,ymm8,ymm10 - vpunpckhqdq ymm8,ymm8,ymm10 - vpaddd ymm0,ymm0,YMMWORD[((256-256))+rcx] - vpaddd ymm1,ymm1,YMMWORD[((288-256))+rcx] - vpaddd ymm2,ymm2,YMMWORD[((320-256))+rcx] - vpaddd ymm3,ymm3,YMMWORD[((352-256))+rcx] - - vpunpckldq ymm10,ymm0,ymm1 - vpunpckldq ymm15,ymm2,ymm3 - vpunpckhdq ymm0,ymm0,ymm1 - vpunpckhdq ymm2,ymm2,ymm3 - vpunpcklqdq ymm1,ymm10,ymm15 - vpunpckhqdq ymm10,ymm10,ymm15 - vpunpcklqdq ymm3,ymm0,ymm2 - vpunpckhqdq ymm0,ymm0,ymm2 - vperm2i128 ymm15,ymm9,ymm1,0x20 - vperm2i128 ymm1,ymm9,ymm1,0x31 - vperm2i128 ymm9,ymm14,ymm10,0x20 - vperm2i128 ymm10,ymm14,ymm10,0x31 - vperm2i128 ymm14,ymm11,ymm3,0x20 - vperm2i128 ymm3,ymm11,ymm3,0x31 - vperm2i128 ymm11,ymm8,ymm0,0x20 - vperm2i128 ymm0,ymm8,ymm0,0x31 - vmovdqa YMMWORD[rsp],ymm15 - vmovdqa YMMWORD[32+rsp],ymm9 - vmovdqa ymm15,YMMWORD[64+rsp] - vmovdqa ymm9,YMMWORD[96+rsp] - - vpaddd ymm12,ymm12,YMMWORD[((384-512))+rax] - vpaddd ymm13,ymm13,YMMWORD[((416-512))+rax] - vpaddd ymm15,ymm15,YMMWORD[((448-512))+rax] - vpaddd ymm9,ymm9,YMMWORD[((480-512))+rax] - - vpunpckldq ymm2,ymm12,ymm13 - vpunpckldq ymm8,ymm15,ymm9 - vpunpckhdq ymm12,ymm12,ymm13 - vpunpckhdq ymm15,ymm15,ymm9 - vpunpcklqdq ymm13,ymm2,ymm8 - vpunpckhqdq ymm2,ymm2,ymm8 - vpunpcklqdq ymm9,ymm12,ymm15 - vpunpckhqdq ymm12,ymm12,ymm15 - vpaddd ymm4,ymm4,YMMWORD[((512-512))+rax] - vpaddd ymm5,ymm5,YMMWORD[((544-512))+rax] - vpaddd ymm6,ymm6,YMMWORD[((576-512))+rax] - vpaddd ymm7,ymm7,YMMWORD[((608-512))+rax] - - vpunpckldq ymm15,ymm4,ymm5 - vpunpckldq ymm8,ymm6,ymm7 - vpunpckhdq ymm4,ymm4,ymm5 - vpunpckhdq ymm6,ymm6,ymm7 - vpunpcklqdq ymm5,ymm15,ymm8 - vpunpckhqdq ymm15,ymm15,ymm8 - vpunpcklqdq ymm7,ymm4,ymm6 - vpunpckhqdq ymm4,ymm4,ymm6 - vperm2i128 ymm8,ymm13,ymm5,0x20 - vperm2i128 ymm5,ymm13,ymm5,0x31 - vperm2i128 ymm13,ymm2,ymm15,0x20 - vperm2i128 ymm15,ymm2,ymm15,0x31 - vperm2i128 ymm2,ymm9,ymm7,0x20 - vperm2i128 ymm7,ymm9,ymm7,0x31 - vperm2i128 ymm9,ymm12,ymm4,0x20 - vperm2i128 ymm4,ymm12,ymm4,0x31 - vmovdqa ymm6,YMMWORD[rsp] - vmovdqa ymm12,YMMWORD[32+rsp] - - cmp rdx,64*8 - jb NEAR $L$tail8x - - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - lea rsi,[128+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - lea rdi,[128+rdi] - - vpxor ymm12,ymm12,YMMWORD[rsi] - vpxor ymm13,ymm13,YMMWORD[32+rsi] - vpxor ymm10,ymm10,YMMWORD[64+rsi] - vpxor ymm15,ymm15,YMMWORD[96+rsi] - lea rsi,[128+rsi] - vmovdqu YMMWORD[rdi],ymm12 - vmovdqu YMMWORD[32+rdi],ymm13 - vmovdqu YMMWORD[64+rdi],ymm10 - vmovdqu YMMWORD[96+rdi],ymm15 - lea rdi,[128+rdi] - - vpxor ymm14,ymm14,YMMWORD[rsi] - vpxor ymm2,ymm2,YMMWORD[32+rsi] - vpxor ymm3,ymm3,YMMWORD[64+rsi] - vpxor ymm7,ymm7,YMMWORD[96+rsi] - lea rsi,[128+rsi] - vmovdqu YMMWORD[rdi],ymm14 - vmovdqu YMMWORD[32+rdi],ymm2 - vmovdqu YMMWORD[64+rdi],ymm3 - vmovdqu YMMWORD[96+rdi],ymm7 - lea rdi,[128+rdi] - - vpxor ymm11,ymm11,YMMWORD[rsi] - vpxor ymm9,ymm9,YMMWORD[32+rsi] - vpxor ymm0,ymm0,YMMWORD[64+rsi] - vpxor ymm4,ymm4,YMMWORD[96+rsi] - lea rsi,[128+rsi] - vmovdqu YMMWORD[rdi],ymm11 - vmovdqu YMMWORD[32+rdi],ymm9 - vmovdqu YMMWORD[64+rdi],ymm0 - vmovdqu YMMWORD[96+rdi],ymm4 - lea rdi,[128+rdi] - - sub rdx,64*8 - jnz NEAR $L$oop_outer8x - - jmp NEAR $L$done8x - -$L$tail8x: - cmp rdx,448 - jae NEAR $L$448_or_more8x - cmp rdx,384 - jae NEAR $L$384_or_more8x - cmp rdx,320 - jae NEAR $L$320_or_more8x - cmp rdx,256 - jae NEAR $L$256_or_more8x - cmp rdx,192 - jae NEAR $L$192_or_more8x - cmp rdx,128 - jae NEAR $L$128_or_more8x - cmp rdx,64 - jae NEAR $L$64_or_more8x - - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm6 - vmovdqa YMMWORD[32+rsp],ymm8 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$64_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - je NEAR $L$done8x - - lea rsi,[64+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm1 - lea rdi,[64+rdi] - sub rdx,64 - vmovdqa YMMWORD[32+rsp],ymm5 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$128_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - je NEAR $L$done8x - - lea rsi,[128+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm12 - lea rdi,[128+rdi] - sub rdx,128 - vmovdqa YMMWORD[32+rsp],ymm13 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$192_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - vpxor ymm12,ymm12,YMMWORD[128+rsi] - vpxor ymm13,ymm13,YMMWORD[160+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - vmovdqu YMMWORD[128+rdi],ymm12 - vmovdqu YMMWORD[160+rdi],ymm13 - je NEAR $L$done8x - - lea rsi,[192+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm10 - lea rdi,[192+rdi] - sub rdx,192 - vmovdqa YMMWORD[32+rsp],ymm15 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$256_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - vpxor ymm12,ymm12,YMMWORD[128+rsi] - vpxor ymm13,ymm13,YMMWORD[160+rsi] - vpxor ymm10,ymm10,YMMWORD[192+rsi] - vpxor ymm15,ymm15,YMMWORD[224+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - vmovdqu YMMWORD[128+rdi],ymm12 - vmovdqu YMMWORD[160+rdi],ymm13 - vmovdqu YMMWORD[192+rdi],ymm10 - vmovdqu YMMWORD[224+rdi],ymm15 - je NEAR $L$done8x - - lea rsi,[256+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm14 - lea rdi,[256+rdi] - sub rdx,256 - vmovdqa YMMWORD[32+rsp],ymm2 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$320_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - vpxor ymm12,ymm12,YMMWORD[128+rsi] - vpxor ymm13,ymm13,YMMWORD[160+rsi] - vpxor ymm10,ymm10,YMMWORD[192+rsi] - vpxor ymm15,ymm15,YMMWORD[224+rsi] - vpxor ymm14,ymm14,YMMWORD[256+rsi] - vpxor ymm2,ymm2,YMMWORD[288+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - vmovdqu YMMWORD[128+rdi],ymm12 - vmovdqu YMMWORD[160+rdi],ymm13 - vmovdqu YMMWORD[192+rdi],ymm10 - vmovdqu YMMWORD[224+rdi],ymm15 - vmovdqu YMMWORD[256+rdi],ymm14 - vmovdqu YMMWORD[288+rdi],ymm2 - je NEAR $L$done8x - - lea rsi,[320+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm3 - lea rdi,[320+rdi] - sub rdx,320 - vmovdqa YMMWORD[32+rsp],ymm7 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$384_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - vpxor ymm12,ymm12,YMMWORD[128+rsi] - vpxor ymm13,ymm13,YMMWORD[160+rsi] - vpxor ymm10,ymm10,YMMWORD[192+rsi] - vpxor ymm15,ymm15,YMMWORD[224+rsi] - vpxor ymm14,ymm14,YMMWORD[256+rsi] - vpxor ymm2,ymm2,YMMWORD[288+rsi] - vpxor ymm3,ymm3,YMMWORD[320+rsi] - vpxor ymm7,ymm7,YMMWORD[352+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - vmovdqu YMMWORD[128+rdi],ymm12 - vmovdqu YMMWORD[160+rdi],ymm13 - vmovdqu YMMWORD[192+rdi],ymm10 - vmovdqu YMMWORD[224+rdi],ymm15 - vmovdqu YMMWORD[256+rdi],ymm14 - vmovdqu YMMWORD[288+rdi],ymm2 - vmovdqu YMMWORD[320+rdi],ymm3 - vmovdqu YMMWORD[352+rdi],ymm7 - je NEAR $L$done8x - - lea rsi,[384+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm11 - lea rdi,[384+rdi] - sub rdx,384 - vmovdqa YMMWORD[32+rsp],ymm9 - jmp NEAR $L$oop_tail8x - -ALIGN 32 -$L$448_or_more8x: - vpxor ymm6,ymm6,YMMWORD[rsi] - vpxor ymm8,ymm8,YMMWORD[32+rsi] - vpxor ymm1,ymm1,YMMWORD[64+rsi] - vpxor ymm5,ymm5,YMMWORD[96+rsi] - vpxor ymm12,ymm12,YMMWORD[128+rsi] - vpxor ymm13,ymm13,YMMWORD[160+rsi] - vpxor ymm10,ymm10,YMMWORD[192+rsi] - vpxor ymm15,ymm15,YMMWORD[224+rsi] - vpxor ymm14,ymm14,YMMWORD[256+rsi] - vpxor ymm2,ymm2,YMMWORD[288+rsi] - vpxor ymm3,ymm3,YMMWORD[320+rsi] - vpxor ymm7,ymm7,YMMWORD[352+rsi] - vpxor ymm11,ymm11,YMMWORD[384+rsi] - vpxor ymm9,ymm9,YMMWORD[416+rsi] - vmovdqu YMMWORD[rdi],ymm6 - vmovdqu YMMWORD[32+rdi],ymm8 - vmovdqu YMMWORD[64+rdi],ymm1 - vmovdqu YMMWORD[96+rdi],ymm5 - vmovdqu YMMWORD[128+rdi],ymm12 - vmovdqu YMMWORD[160+rdi],ymm13 - vmovdqu YMMWORD[192+rdi],ymm10 - vmovdqu YMMWORD[224+rdi],ymm15 - vmovdqu YMMWORD[256+rdi],ymm14 - vmovdqu YMMWORD[288+rdi],ymm2 - vmovdqu YMMWORD[320+rdi],ymm3 - vmovdqu YMMWORD[352+rdi],ymm7 - vmovdqu YMMWORD[384+rdi],ymm11 - vmovdqu YMMWORD[416+rdi],ymm9 - je NEAR $L$done8x - - lea rsi,[448+rsi] - xor r10,r10 - vmovdqa YMMWORD[rsp],ymm0 - lea rdi,[448+rdi] - sub rdx,448 - vmovdqa YMMWORD[32+rsp],ymm4 - -$L$oop_tail8x: - movzx eax,BYTE[r10*1+rsi] - movzx ecx,BYTE[r10*1+rsp] - lea r10,[1+r10] - xor eax,ecx - mov BYTE[((-1))+r10*1+rdi],al - dec rdx - jnz NEAR $L$oop_tail8x - -$L$done8x: - vzeroall - movaps xmm6,XMMWORD[((-168))+r9] - movaps xmm7,XMMWORD[((-152))+r9] - movaps xmm8,XMMWORD[((-136))+r9] - movaps xmm9,XMMWORD[((-120))+r9] - movaps xmm10,XMMWORD[((-104))+r9] - movaps xmm11,XMMWORD[((-88))+r9] - movaps xmm12,XMMWORD[((-72))+r9] - movaps xmm13,XMMWORD[((-56))+r9] - movaps xmm14,XMMWORD[((-40))+r9] - movaps xmm15,XMMWORD[((-24))+r9] - lea rsp,[r9] - -$L$8x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ChaCha20_8x: -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - lea r10,[$L$ctr32_body] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - lea r10,[$L$no_data] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rax,[((64+24+48))+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -ssse3_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[192+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rsi,[((-40))+rax] - lea rdi,[512+r8] - mov ecx,4 - DD 0xa548f3fc - - jmp NEAR $L$common_seh_tail - - - -ALIGN 16 -full_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[192+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rsi,[((-168))+rax] - lea rdi,[512+r8] - mov ecx,20 - DD 0xa548f3fc - - jmp NEAR $L$common_seh_tail - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_ChaCha20_ctr32 wrt ..imagebase - DD $L$SEH_end_ChaCha20_ctr32 wrt ..imagebase - DD $L$SEH_info_ChaCha20_ctr32 wrt ..imagebase - - DD $L$SEH_begin_ChaCha20_ssse3 wrt ..imagebase - DD $L$SEH_end_ChaCha20_ssse3 wrt ..imagebase - DD $L$SEH_info_ChaCha20_ssse3 wrt ..imagebase - - DD $L$SEH_begin_ChaCha20_4x wrt ..imagebase - DD $L$SEH_end_ChaCha20_4x wrt ..imagebase - DD $L$SEH_info_ChaCha20_4x wrt ..imagebase - DD $L$SEH_begin_ChaCha20_8x wrt ..imagebase - DD $L$SEH_end_ChaCha20_8x wrt ..imagebase - DD $L$SEH_info_ChaCha20_8x wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_ChaCha20_ctr32: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - -$L$SEH_info_ChaCha20_ssse3: -DB 9,0,0,0 - DD ssse3_handler wrt ..imagebase - DD $L$ssse3_body wrt ..imagebase,$L$ssse3_epilogue wrt ..imagebase - -$L$SEH_info_ChaCha20_4x: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$4x_body wrt ..imagebase,$L$4x_epilogue wrt ..imagebase -$L$SEH_info_ChaCha20_8x: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$8x_body wrt ..imagebase,$L$8x_epilogue wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm deleted file mode 100644 index e711826b14..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm +++ /dev/null @@ -1,3277 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .data data align=8 - - -ALIGN 16 -one: - DQ 1,0 -two: - DQ 2,0 -three: - DQ 3,0 -four: - DQ 4,0 -five: - DQ 5,0 -six: - DQ 6,0 -seven: - DQ 7,0 -eight: - DQ 8,0 - -OR_MASK: - DD 0x00000000,0x00000000,0x00000000,0x80000000 -poly: - DQ 0x1,0xc200000000000000 -mask: - DD 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -con1: - DD 1,1,1,1 -con2: - DD 0x1b,0x1b,0x1b,0x1b -con3: -DB -1,-1,-1,-1,-1,-1,-1,-1,4,5,6,7,4,5,6,7 -and_mask: - DD 0,0xffffffff,0xffffffff,0xffffffff -section .text code align=64 - - -ALIGN 16 -GFMUL: - - vpclmulqdq xmm2,xmm0,xmm1,0x00 - vpclmulqdq xmm5,xmm0,xmm1,0x11 - vpclmulqdq xmm3,xmm0,xmm1,0x10 - vpclmulqdq xmm4,xmm0,xmm1,0x01 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm3,8 - vpsrldq xmm3,xmm3,8 - vpxor xmm2,xmm2,xmm4 - vpxor xmm5,xmm5,xmm3 - - vpclmulqdq xmm3,xmm2,XMMWORD[poly],0x10 - vpshufd xmm4,xmm2,78 - vpxor xmm2,xmm3,xmm4 - - vpclmulqdq xmm3,xmm2,XMMWORD[poly],0x10 - vpshufd xmm4,xmm2,78 - vpxor xmm2,xmm3,xmm4 - - vpxor xmm0,xmm2,xmm5 - DB 0F3h,0C3h ;repret - - -global aesgcmsiv_htable_init - -ALIGN 16 -aesgcmsiv_htable_init: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aesgcmsiv_htable_init: - mov rdi,rcx - mov rsi,rdx - - - - vmovdqa xmm0,XMMWORD[rsi] - vmovdqa xmm1,xmm0 - vmovdqa XMMWORD[rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[16+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[32+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[48+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[64+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[80+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[96+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[112+rdi],xmm0 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aesgcmsiv_htable_init: -global aesgcmsiv_htable6_init - -ALIGN 16 -aesgcmsiv_htable6_init: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aesgcmsiv_htable6_init: - mov rdi,rcx - mov rsi,rdx - - - - vmovdqa xmm0,XMMWORD[rsi] - vmovdqa xmm1,xmm0 - vmovdqa XMMWORD[rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[16+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[32+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[48+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[64+rdi],xmm0 - call GFMUL - vmovdqa XMMWORD[80+rdi],xmm0 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aesgcmsiv_htable6_init: -global aesgcmsiv_htable_polyval - -ALIGN 16 -aesgcmsiv_htable_polyval: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aesgcmsiv_htable_polyval: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - - - - test rdx,rdx - jnz NEAR $L$htable_polyval_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$htable_polyval_start: - vzeroall - - - - mov r11,rdx - and r11,127 - - jz NEAR $L$htable_polyval_no_prefix - - vpxor xmm9,xmm9,xmm9 - vmovdqa xmm1,XMMWORD[rcx] - sub rdx,r11 - - sub r11,16 - - - vmovdqu xmm0,XMMWORD[rsi] - vpxor xmm0,xmm0,xmm1 - - vpclmulqdq xmm5,xmm0,XMMWORD[r11*1+rdi],0x01 - vpclmulqdq xmm3,xmm0,XMMWORD[r11*1+rdi],0x00 - vpclmulqdq xmm4,xmm0,XMMWORD[r11*1+rdi],0x11 - vpclmulqdq xmm6,xmm0,XMMWORD[r11*1+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - lea rsi,[16+rsi] - test r11,r11 - jnz NEAR $L$htable_polyval_prefix_loop - jmp NEAR $L$htable_polyval_prefix_complete - - -ALIGN 64 -$L$htable_polyval_prefix_loop: - sub r11,16 - - vmovdqu xmm0,XMMWORD[rsi] - - vpclmulqdq xmm6,xmm0,XMMWORD[r11*1+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[r11*1+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[r11*1+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[r11*1+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - test r11,r11 - - lea rsi,[16+rsi] - - jnz NEAR $L$htable_polyval_prefix_loop - -$L$htable_polyval_prefix_complete: - vpsrldq xmm6,xmm5,8 - vpslldq xmm5,xmm5,8 - - vpxor xmm9,xmm4,xmm6 - vpxor xmm1,xmm3,xmm5 - - jmp NEAR $L$htable_polyval_main_loop - -$L$htable_polyval_no_prefix: - - - - - vpxor xmm1,xmm1,xmm1 - vmovdqa xmm9,XMMWORD[rcx] - -ALIGN 64 -$L$htable_polyval_main_loop: - sub rdx,0x80 - jb NEAR $L$htable_polyval_out - - vmovdqu xmm0,XMMWORD[112+rsi] - - vpclmulqdq xmm5,xmm0,XMMWORD[rdi],0x01 - vpclmulqdq xmm3,xmm0,XMMWORD[rdi],0x00 - vpclmulqdq xmm4,xmm0,XMMWORD[rdi],0x11 - vpclmulqdq xmm6,xmm0,XMMWORD[rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vmovdqu xmm0,XMMWORD[96+rsi] - vpclmulqdq xmm6,xmm0,XMMWORD[16+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[16+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[16+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[16+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - - vmovdqu xmm0,XMMWORD[80+rsi] - - vpclmulqdq xmm7,xmm1,XMMWORD[poly],0x10 - vpalignr xmm1,xmm1,xmm1,8 - - vpclmulqdq xmm6,xmm0,XMMWORD[32+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[32+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[32+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[32+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vpxor xmm1,xmm1,xmm7 - - vmovdqu xmm0,XMMWORD[64+rsi] - - vpclmulqdq xmm6,xmm0,XMMWORD[48+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[48+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[48+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[48+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vmovdqu xmm0,XMMWORD[48+rsi] - - vpclmulqdq xmm7,xmm1,XMMWORD[poly],0x10 - vpalignr xmm1,xmm1,xmm1,8 - - vpclmulqdq xmm6,xmm0,XMMWORD[64+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[64+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[64+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[64+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vpxor xmm1,xmm1,xmm7 - - vmovdqu xmm0,XMMWORD[32+rsi] - - vpclmulqdq xmm6,xmm0,XMMWORD[80+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[80+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[80+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[80+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vpxor xmm1,xmm1,xmm9 - - vmovdqu xmm0,XMMWORD[16+rsi] - - vpclmulqdq xmm6,xmm0,XMMWORD[96+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[96+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[96+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[96+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vmovdqu xmm0,XMMWORD[rsi] - vpxor xmm0,xmm0,xmm1 - - vpclmulqdq xmm6,xmm0,XMMWORD[112+rdi],0x01 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[112+rdi],0x00 - vpxor xmm3,xmm3,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[112+rdi],0x11 - vpxor xmm4,xmm4,xmm6 - vpclmulqdq xmm6,xmm0,XMMWORD[112+rdi],0x10 - vpxor xmm5,xmm5,xmm6 - - - vpsrldq xmm6,xmm5,8 - vpslldq xmm5,xmm5,8 - - vpxor xmm9,xmm4,xmm6 - vpxor xmm1,xmm3,xmm5 - - lea rsi,[128+rsi] - jmp NEAR $L$htable_polyval_main_loop - - - -$L$htable_polyval_out: - vpclmulqdq xmm6,xmm1,XMMWORD[poly],0x10 - vpalignr xmm1,xmm1,xmm1,8 - vpxor xmm1,xmm1,xmm6 - - vpclmulqdq xmm6,xmm1,XMMWORD[poly],0x10 - vpalignr xmm1,xmm1,xmm1,8 - vpxor xmm1,xmm1,xmm6 - vpxor xmm1,xmm1,xmm9 - - vmovdqu XMMWORD[rcx],xmm1 - vzeroupper - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aesgcmsiv_htable_polyval: -global aesgcmsiv_polyval_horner - -ALIGN 16 -aesgcmsiv_polyval_horner: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aesgcmsiv_polyval_horner: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - - - - test rcx,rcx - jnz NEAR $L$polyval_horner_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$polyval_horner_start: - - - - xor r10,r10 - shl rcx,4 - - vmovdqa xmm1,XMMWORD[rsi] - vmovdqa xmm0,XMMWORD[rdi] - -$L$polyval_horner_loop: - vpxor xmm0,xmm0,XMMWORD[r10*1+rdx] - call GFMUL - - add r10,16 - cmp rcx,r10 - jne NEAR $L$polyval_horner_loop - - - vmovdqa XMMWORD[rdi],xmm0 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aesgcmsiv_polyval_horner: -global aes128gcmsiv_aes_ks - -ALIGN 16 -aes128gcmsiv_aes_ks: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_aes_ks: - mov rdi,rcx - mov rsi,rdx - - - - vmovdqu xmm1,XMMWORD[rdi] - vmovdqa XMMWORD[rsi],xmm1 - - vmovdqa xmm0,XMMWORD[con1] - vmovdqa xmm15,XMMWORD[mask] - - mov rax,8 - -$L$ks128_loop: - add rsi,16 - sub rax,1 - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm3,xmm1,4 - vpxor xmm1,xmm1,xmm3 - vpslldq xmm3,xmm3,4 - vpxor xmm1,xmm1,xmm3 - vpslldq xmm3,xmm3,4 - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - vmovdqa XMMWORD[rsi],xmm1 - jne NEAR $L$ks128_loop - - vmovdqa xmm0,XMMWORD[con2] - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm3,xmm1,4 - vpxor xmm1,xmm1,xmm3 - vpslldq xmm3,xmm3,4 - vpxor xmm1,xmm1,xmm3 - vpslldq xmm3,xmm3,4 - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - vmovdqa XMMWORD[16+rsi],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslldq xmm3,xmm1,4 - vpxor xmm1,xmm1,xmm3 - vpslldq xmm3,xmm3,4 - vpxor xmm1,xmm1,xmm3 - vpslldq xmm3,xmm3,4 - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - vmovdqa XMMWORD[32+rsi],xmm1 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_aes_ks: -global aes256gcmsiv_aes_ks - -ALIGN 16 -aes256gcmsiv_aes_ks: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_aes_ks: - mov rdi,rcx - mov rsi,rdx - - - - vmovdqu xmm1,XMMWORD[rdi] - vmovdqu xmm3,XMMWORD[16+rdi] - vmovdqa XMMWORD[rsi],xmm1 - vmovdqa XMMWORD[16+rsi],xmm3 - vmovdqa xmm0,XMMWORD[con1] - vmovdqa xmm15,XMMWORD[mask] - vpxor xmm14,xmm14,xmm14 - mov rax,6 - -$L$ks256_loop: - add rsi,32 - sub rax,1 - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm4,xmm1,32 - vpxor xmm1,xmm1,xmm4 - vpshufb xmm4,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vmovdqa XMMWORD[rsi],xmm1 - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpsllq xmm4,xmm3,32 - vpxor xmm3,xmm3,xmm4 - vpshufb xmm4,xmm3,XMMWORD[con3] - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vmovdqa XMMWORD[16+rsi],xmm3 - jne NEAR $L$ks256_loop - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpsllq xmm4,xmm1,32 - vpxor xmm1,xmm1,xmm4 - vpshufb xmm4,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vmovdqa XMMWORD[32+rsi],xmm1 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -global aes128gcmsiv_aes_ks_enc_x1 - -ALIGN 16 -aes128gcmsiv_aes_ks_enc_x1: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_aes_ks_enc_x1: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - - - - vmovdqa xmm1,XMMWORD[rcx] - vmovdqa xmm4,XMMWORD[rdi] - - vmovdqa XMMWORD[rdx],xmm1 - vpxor xmm4,xmm4,xmm1 - - vmovdqa xmm0,XMMWORD[con1] - vmovdqa xmm15,XMMWORD[mask] - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[16+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[32+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[48+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[64+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[80+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[96+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[112+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[128+rdx],xmm1 - - - vmovdqa xmm0,XMMWORD[con2] - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenc xmm4,xmm4,xmm1 - vmovdqa XMMWORD[144+rdx],xmm1 - - vpshufb xmm2,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpsllq xmm3,xmm1,32 - vpxor xmm1,xmm1,xmm3 - vpshufb xmm3,xmm1,XMMWORD[con3] - vpxor xmm1,xmm1,xmm3 - vpxor xmm1,xmm1,xmm2 - - vaesenclast xmm4,xmm4,xmm1 - vmovdqa XMMWORD[160+rdx],xmm1 - - - vmovdqa XMMWORD[rsi],xmm4 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_aes_ks_enc_x1: -global aes128gcmsiv_kdf - -ALIGN 16 -aes128gcmsiv_kdf: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_kdf: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - - - - - vmovdqa xmm1,XMMWORD[rdx] - vmovdqa xmm9,XMMWORD[rdi] - vmovdqa xmm12,XMMWORD[and_mask] - vmovdqa xmm13,XMMWORD[one] - vpshufd xmm9,xmm9,0x90 - vpand xmm9,xmm9,xmm12 - vpaddd xmm10,xmm9,xmm13 - vpaddd xmm11,xmm10,xmm13 - vpaddd xmm12,xmm11,xmm13 - - vpxor xmm9,xmm9,xmm1 - vpxor xmm10,xmm10,xmm1 - vpxor xmm11,xmm11,xmm1 - vpxor xmm12,xmm12,xmm1 - - vmovdqa xmm1,XMMWORD[16+rdx] - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - - vmovdqa xmm2,XMMWORD[32+rdx] - vaesenc xmm9,xmm9,xmm2 - vaesenc xmm10,xmm10,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - - vmovdqa xmm1,XMMWORD[48+rdx] - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - - vmovdqa xmm2,XMMWORD[64+rdx] - vaesenc xmm9,xmm9,xmm2 - vaesenc xmm10,xmm10,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - - vmovdqa xmm1,XMMWORD[80+rdx] - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - - vmovdqa xmm2,XMMWORD[96+rdx] - vaesenc xmm9,xmm9,xmm2 - vaesenc xmm10,xmm10,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - - vmovdqa xmm1,XMMWORD[112+rdx] - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - - vmovdqa xmm2,XMMWORD[128+rdx] - vaesenc xmm9,xmm9,xmm2 - vaesenc xmm10,xmm10,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - - vmovdqa xmm1,XMMWORD[144+rdx] - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - - vmovdqa xmm2,XMMWORD[160+rdx] - vaesenclast xmm9,xmm9,xmm2 - vaesenclast xmm10,xmm10,xmm2 - vaesenclast xmm11,xmm11,xmm2 - vaesenclast xmm12,xmm12,xmm2 - - - vmovdqa XMMWORD[rsi],xmm9 - vmovdqa XMMWORD[16+rsi],xmm10 - vmovdqa XMMWORD[32+rsi],xmm11 - vmovdqa XMMWORD[48+rsi],xmm12 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_kdf: -global aes128gcmsiv_enc_msg_x4 - -ALIGN 16 -aes128gcmsiv_enc_msg_x4: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_enc_msg_x4: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - test r8,r8 - jnz NEAR $L$128_enc_msg_x4_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$128_enc_msg_x4_start: - push r12 - - push r13 - - - shr r8,4 - mov r10,r8 - shl r10,62 - shr r10,62 - - - vmovdqa xmm15,XMMWORD[rdx] - vpor xmm15,xmm15,XMMWORD[OR_MASK] - - vmovdqu xmm4,XMMWORD[four] - vmovdqa xmm0,xmm15 - vpaddd xmm1,xmm15,XMMWORD[one] - vpaddd xmm2,xmm15,XMMWORD[two] - vpaddd xmm3,xmm15,XMMWORD[three] - - shr r8,2 - je NEAR $L$128_enc_msg_x4_check_remainder - - sub rsi,64 - sub rdi,64 - -$L$128_enc_msg_x4_loop1: - add rsi,64 - add rdi,64 - - vmovdqa xmm5,xmm0 - vmovdqa xmm6,xmm1 - vmovdqa xmm7,xmm2 - vmovdqa xmm8,xmm3 - - vpxor xmm5,xmm5,XMMWORD[rcx] - vpxor xmm6,xmm6,XMMWORD[rcx] - vpxor xmm7,xmm7,XMMWORD[rcx] - vpxor xmm8,xmm8,XMMWORD[rcx] - - vmovdqu xmm12,XMMWORD[16+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm0,xmm0,xmm4 - vmovdqu xmm12,XMMWORD[32+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm1,xmm1,xmm4 - vmovdqu xmm12,XMMWORD[48+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm2,xmm2,xmm4 - vmovdqu xmm12,XMMWORD[64+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm3,xmm3,xmm4 - - vmovdqu xmm12,XMMWORD[80+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[96+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[112+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[128+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[144+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[160+rcx] - vaesenclast xmm5,xmm5,xmm12 - vaesenclast xmm6,xmm6,xmm12 - vaesenclast xmm7,xmm7,xmm12 - vaesenclast xmm8,xmm8,xmm12 - - - - vpxor xmm5,xmm5,XMMWORD[rdi] - vpxor xmm6,xmm6,XMMWORD[16+rdi] - vpxor xmm7,xmm7,XMMWORD[32+rdi] - vpxor xmm8,xmm8,XMMWORD[48+rdi] - - sub r8,1 - - vmovdqu XMMWORD[rsi],xmm5 - vmovdqu XMMWORD[16+rsi],xmm6 - vmovdqu XMMWORD[32+rsi],xmm7 - vmovdqu XMMWORD[48+rsi],xmm8 - - jne NEAR $L$128_enc_msg_x4_loop1 - - add rsi,64 - add rdi,64 - -$L$128_enc_msg_x4_check_remainder: - cmp r10,0 - je NEAR $L$128_enc_msg_x4_out - -$L$128_enc_msg_x4_loop2: - - - vmovdqa xmm5,xmm0 - vpaddd xmm0,xmm0,XMMWORD[one] - - vpxor xmm5,xmm5,XMMWORD[rcx] - vaesenc xmm5,xmm5,XMMWORD[16+rcx] - vaesenc xmm5,xmm5,XMMWORD[32+rcx] - vaesenc xmm5,xmm5,XMMWORD[48+rcx] - vaesenc xmm5,xmm5,XMMWORD[64+rcx] - vaesenc xmm5,xmm5,XMMWORD[80+rcx] - vaesenc xmm5,xmm5,XMMWORD[96+rcx] - vaesenc xmm5,xmm5,XMMWORD[112+rcx] - vaesenc xmm5,xmm5,XMMWORD[128+rcx] - vaesenc xmm5,xmm5,XMMWORD[144+rcx] - vaesenclast xmm5,xmm5,XMMWORD[160+rcx] - - - vpxor xmm5,xmm5,XMMWORD[rdi] - vmovdqu XMMWORD[rsi],xmm5 - - add rdi,16 - add rsi,16 - - sub r10,1 - jne NEAR $L$128_enc_msg_x4_loop2 - -$L$128_enc_msg_x4_out: - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_enc_msg_x4: -global aes128gcmsiv_enc_msg_x8 - -ALIGN 16 -aes128gcmsiv_enc_msg_x8: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_enc_msg_x8: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - test r8,r8 - jnz NEAR $L$128_enc_msg_x8_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$128_enc_msg_x8_start: - push r12 - - push r13 - - push rbp - - mov rbp,rsp - - - - sub rsp,128 - and rsp,-64 - - shr r8,4 - mov r10,r8 - shl r10,61 - shr r10,61 - - - vmovdqu xmm1,XMMWORD[rdx] - vpor xmm1,xmm1,XMMWORD[OR_MASK] - - - vpaddd xmm0,xmm1,XMMWORD[seven] - vmovdqu XMMWORD[rsp],xmm0 - vpaddd xmm9,xmm1,XMMWORD[one] - vpaddd xmm10,xmm1,XMMWORD[two] - vpaddd xmm11,xmm1,XMMWORD[three] - vpaddd xmm12,xmm1,XMMWORD[four] - vpaddd xmm13,xmm1,XMMWORD[five] - vpaddd xmm14,xmm1,XMMWORD[six] - vmovdqa xmm0,xmm1 - - shr r8,3 - je NEAR $L$128_enc_msg_x8_check_remainder - - sub rsi,128 - sub rdi,128 - -$L$128_enc_msg_x8_loop1: - add rsi,128 - add rdi,128 - - vmovdqa xmm1,xmm0 - vmovdqa xmm2,xmm9 - vmovdqa xmm3,xmm10 - vmovdqa xmm4,xmm11 - vmovdqa xmm5,xmm12 - vmovdqa xmm6,xmm13 - vmovdqa xmm7,xmm14 - - vmovdqu xmm8,XMMWORD[rsp] - - vpxor xmm1,xmm1,XMMWORD[rcx] - vpxor xmm2,xmm2,XMMWORD[rcx] - vpxor xmm3,xmm3,XMMWORD[rcx] - vpxor xmm4,xmm4,XMMWORD[rcx] - vpxor xmm5,xmm5,XMMWORD[rcx] - vpxor xmm6,xmm6,XMMWORD[rcx] - vpxor xmm7,xmm7,XMMWORD[rcx] - vpxor xmm8,xmm8,XMMWORD[rcx] - - vmovdqu xmm15,XMMWORD[16+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm14,XMMWORD[rsp] - vpaddd xmm14,xmm14,XMMWORD[eight] - vmovdqu XMMWORD[rsp],xmm14 - vmovdqu xmm15,XMMWORD[32+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpsubd xmm14,xmm14,XMMWORD[one] - vmovdqu xmm15,XMMWORD[48+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm0,xmm0,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[64+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm9,xmm9,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[80+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm10,xmm10,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[96+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm11,xmm11,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[112+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm12,xmm12,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[128+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm13,xmm13,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[144+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm15,XMMWORD[160+rcx] - vaesenclast xmm1,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm15 - vaesenclast xmm3,xmm3,xmm15 - vaesenclast xmm4,xmm4,xmm15 - vaesenclast xmm5,xmm5,xmm15 - vaesenclast xmm6,xmm6,xmm15 - vaesenclast xmm7,xmm7,xmm15 - vaesenclast xmm8,xmm8,xmm15 - - - - vpxor xmm1,xmm1,XMMWORD[rdi] - vpxor xmm2,xmm2,XMMWORD[16+rdi] - vpxor xmm3,xmm3,XMMWORD[32+rdi] - vpxor xmm4,xmm4,XMMWORD[48+rdi] - vpxor xmm5,xmm5,XMMWORD[64+rdi] - vpxor xmm6,xmm6,XMMWORD[80+rdi] - vpxor xmm7,xmm7,XMMWORD[96+rdi] - vpxor xmm8,xmm8,XMMWORD[112+rdi] - - dec r8 - - vmovdqu XMMWORD[rsi],xmm1 - vmovdqu XMMWORD[16+rsi],xmm2 - vmovdqu XMMWORD[32+rsi],xmm3 - vmovdqu XMMWORD[48+rsi],xmm4 - vmovdqu XMMWORD[64+rsi],xmm5 - vmovdqu XMMWORD[80+rsi],xmm6 - vmovdqu XMMWORD[96+rsi],xmm7 - vmovdqu XMMWORD[112+rsi],xmm8 - - jne NEAR $L$128_enc_msg_x8_loop1 - - add rsi,128 - add rdi,128 - -$L$128_enc_msg_x8_check_remainder: - cmp r10,0 - je NEAR $L$128_enc_msg_x8_out - -$L$128_enc_msg_x8_loop2: - - - vmovdqa xmm1,xmm0 - vpaddd xmm0,xmm0,XMMWORD[one] - - vpxor xmm1,xmm1,XMMWORD[rcx] - vaesenc xmm1,xmm1,XMMWORD[16+rcx] - vaesenc xmm1,xmm1,XMMWORD[32+rcx] - vaesenc xmm1,xmm1,XMMWORD[48+rcx] - vaesenc xmm1,xmm1,XMMWORD[64+rcx] - vaesenc xmm1,xmm1,XMMWORD[80+rcx] - vaesenc xmm1,xmm1,XMMWORD[96+rcx] - vaesenc xmm1,xmm1,XMMWORD[112+rcx] - vaesenc xmm1,xmm1,XMMWORD[128+rcx] - vaesenc xmm1,xmm1,XMMWORD[144+rcx] - vaesenclast xmm1,xmm1,XMMWORD[160+rcx] - - - vpxor xmm1,xmm1,XMMWORD[rdi] - - vmovdqu XMMWORD[rsi],xmm1 - - add rdi,16 - add rsi,16 - - dec r10 - jne NEAR $L$128_enc_msg_x8_loop2 - -$L$128_enc_msg_x8_out: - mov rsp,rbp - - pop rbp - - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_enc_msg_x8: -global aes128gcmsiv_dec - -ALIGN 16 -aes128gcmsiv_dec: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_dec: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - test r9,~15 - jnz NEAR $L$128_dec_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$128_dec_start: - vzeroupper - vmovdqa xmm0,XMMWORD[rdx] - mov rax,rdx - - lea rax,[32+rax] - lea rcx,[32+rcx] - - - vmovdqu xmm15,XMMWORD[r9*1+rdi] - vpor xmm15,xmm15,XMMWORD[OR_MASK] - and r9,~15 - - - cmp r9,96 - jb NEAR $L$128_dec_loop2 - - - sub r9,96 - vmovdqa xmm7,xmm15 - vpaddd xmm8,xmm7,XMMWORD[one] - vpaddd xmm9,xmm7,XMMWORD[two] - vpaddd xmm10,xmm9,XMMWORD[one] - vpaddd xmm11,xmm9,XMMWORD[two] - vpaddd xmm12,xmm11,XMMWORD[one] - vpaddd xmm15,xmm11,XMMWORD[two] - - vpxor xmm7,xmm7,XMMWORD[r8] - vpxor xmm8,xmm8,XMMWORD[r8] - vpxor xmm9,xmm9,XMMWORD[r8] - vpxor xmm10,xmm10,XMMWORD[r8] - vpxor xmm11,xmm11,XMMWORD[r8] - vpxor xmm12,xmm12,XMMWORD[r8] - - vmovdqu xmm4,XMMWORD[16+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[32+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[48+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[64+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[80+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[96+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[112+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[128+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[144+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[160+r8] - vaesenclast xmm7,xmm7,xmm4 - vaesenclast xmm8,xmm8,xmm4 - vaesenclast xmm9,xmm9,xmm4 - vaesenclast xmm10,xmm10,xmm4 - vaesenclast xmm11,xmm11,xmm4 - vaesenclast xmm12,xmm12,xmm4 - - - vpxor xmm7,xmm7,XMMWORD[rdi] - vpxor xmm8,xmm8,XMMWORD[16+rdi] - vpxor xmm9,xmm9,XMMWORD[32+rdi] - vpxor xmm10,xmm10,XMMWORD[48+rdi] - vpxor xmm11,xmm11,XMMWORD[64+rdi] - vpxor xmm12,xmm12,XMMWORD[80+rdi] - - vmovdqu XMMWORD[rsi],xmm7 - vmovdqu XMMWORD[16+rsi],xmm8 - vmovdqu XMMWORD[32+rsi],xmm9 - vmovdqu XMMWORD[48+rsi],xmm10 - vmovdqu XMMWORD[64+rsi],xmm11 - vmovdqu XMMWORD[80+rsi],xmm12 - - add rdi,96 - add rsi,96 - jmp NEAR $L$128_dec_loop1 - - -ALIGN 64 -$L$128_dec_loop1: - cmp r9,96 - jb NEAR $L$128_dec_finish_96 - sub r9,96 - - vmovdqa xmm6,xmm12 - vmovdqa XMMWORD[(16-32)+rax],xmm11 - vmovdqa XMMWORD[(32-32)+rax],xmm10 - vmovdqa XMMWORD[(48-32)+rax],xmm9 - vmovdqa XMMWORD[(64-32)+rax],xmm8 - vmovdqa XMMWORD[(80-32)+rax],xmm7 - - vmovdqa xmm7,xmm15 - vpaddd xmm8,xmm7,XMMWORD[one] - vpaddd xmm9,xmm7,XMMWORD[two] - vpaddd xmm10,xmm9,XMMWORD[one] - vpaddd xmm11,xmm9,XMMWORD[two] - vpaddd xmm12,xmm11,XMMWORD[one] - vpaddd xmm15,xmm11,XMMWORD[two] - - vmovdqa xmm4,XMMWORD[r8] - vpxor xmm7,xmm7,xmm4 - vpxor xmm8,xmm8,xmm4 - vpxor xmm9,xmm9,xmm4 - vpxor xmm10,xmm10,xmm4 - vpxor xmm11,xmm11,xmm4 - vpxor xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[((0-32))+rcx] - vpclmulqdq xmm2,xmm6,xmm4,0x11 - vpclmulqdq xmm3,xmm6,xmm4,0x00 - vpclmulqdq xmm1,xmm6,xmm4,0x01 - vpclmulqdq xmm4,xmm6,xmm4,0x10 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm4,XMMWORD[16+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[((-16))+rax] - vmovdqu xmm13,XMMWORD[((-16))+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[32+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[rax] - vmovdqu xmm13,XMMWORD[rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[48+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[16+rax] - vmovdqu xmm13,XMMWORD[16+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[64+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[32+rax] - vmovdqu xmm13,XMMWORD[32+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[80+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[96+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[112+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - - vmovdqa xmm6,XMMWORD[((80-32))+rax] - vpxor xmm6,xmm6,xmm0 - vmovdqu xmm5,XMMWORD[((80-32))+rcx] - - vpclmulqdq xmm4,xmm6,xmm5,0x01 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x10 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm4,XMMWORD[128+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - - vpsrldq xmm4,xmm1,8 - vpxor xmm5,xmm2,xmm4 - vpslldq xmm4,xmm1,8 - vpxor xmm0,xmm3,xmm4 - - vmovdqa xmm3,XMMWORD[poly] - - vmovdqu xmm4,XMMWORD[144+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[160+r8] - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vpxor xmm4,xmm6,XMMWORD[rdi] - vaesenclast xmm7,xmm7,xmm4 - vpxor xmm4,xmm6,XMMWORD[16+rdi] - vaesenclast xmm8,xmm8,xmm4 - vpxor xmm4,xmm6,XMMWORD[32+rdi] - vaesenclast xmm9,xmm9,xmm4 - vpxor xmm4,xmm6,XMMWORD[48+rdi] - vaesenclast xmm10,xmm10,xmm4 - vpxor xmm4,xmm6,XMMWORD[64+rdi] - vaesenclast xmm11,xmm11,xmm4 - vpxor xmm4,xmm6,XMMWORD[80+rdi] - vaesenclast xmm12,xmm12,xmm4 - - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vmovdqu XMMWORD[rsi],xmm7 - vmovdqu XMMWORD[16+rsi],xmm8 - vmovdqu XMMWORD[32+rsi],xmm9 - vmovdqu XMMWORD[48+rsi],xmm10 - vmovdqu XMMWORD[64+rsi],xmm11 - vmovdqu XMMWORD[80+rsi],xmm12 - - vpxor xmm0,xmm0,xmm5 - - lea rdi,[96+rdi] - lea rsi,[96+rsi] - jmp NEAR $L$128_dec_loop1 - -$L$128_dec_finish_96: - vmovdqa xmm6,xmm12 - vmovdqa XMMWORD[(16-32)+rax],xmm11 - vmovdqa XMMWORD[(32-32)+rax],xmm10 - vmovdqa XMMWORD[(48-32)+rax],xmm9 - vmovdqa XMMWORD[(64-32)+rax],xmm8 - vmovdqa XMMWORD[(80-32)+rax],xmm7 - - vmovdqu xmm4,XMMWORD[((0-32))+rcx] - vpclmulqdq xmm1,xmm6,xmm4,0x10 - vpclmulqdq xmm2,xmm6,xmm4,0x11 - vpclmulqdq xmm3,xmm6,xmm4,0x00 - vpclmulqdq xmm4,xmm6,xmm4,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[((-16))+rax] - vmovdqu xmm13,XMMWORD[((-16))+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[rax] - vmovdqu xmm13,XMMWORD[rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[16+rax] - vmovdqu xmm13,XMMWORD[16+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[32+rax] - vmovdqu xmm13,XMMWORD[32+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm6,XMMWORD[((80-32))+rax] - vpxor xmm6,xmm6,xmm0 - vmovdqu xmm5,XMMWORD[((80-32))+rcx] - vpclmulqdq xmm4,xmm6,xmm5,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x01 - vpxor xmm1,xmm1,xmm4 - - vpsrldq xmm4,xmm1,8 - vpxor xmm5,xmm2,xmm4 - vpslldq xmm4,xmm1,8 - vpxor xmm0,xmm3,xmm4 - - vmovdqa xmm3,XMMWORD[poly] - - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vpxor xmm0,xmm0,xmm5 - -$L$128_dec_loop2: - - - - cmp r9,16 - jb NEAR $L$128_dec_out - sub r9,16 - - vmovdqa xmm2,xmm15 - vpaddd xmm15,xmm15,XMMWORD[one] - - vpxor xmm2,xmm2,XMMWORD[r8] - vaesenc xmm2,xmm2,XMMWORD[16+r8] - vaesenc xmm2,xmm2,XMMWORD[32+r8] - vaesenc xmm2,xmm2,XMMWORD[48+r8] - vaesenc xmm2,xmm2,XMMWORD[64+r8] - vaesenc xmm2,xmm2,XMMWORD[80+r8] - vaesenc xmm2,xmm2,XMMWORD[96+r8] - vaesenc xmm2,xmm2,XMMWORD[112+r8] - vaesenc xmm2,xmm2,XMMWORD[128+r8] - vaesenc xmm2,xmm2,XMMWORD[144+r8] - vaesenclast xmm2,xmm2,XMMWORD[160+r8] - vpxor xmm2,xmm2,XMMWORD[rdi] - vmovdqu XMMWORD[rsi],xmm2 - add rdi,16 - add rsi,16 - - vpxor xmm0,xmm0,xmm2 - vmovdqa xmm1,XMMWORD[((-32))+rcx] - call GFMUL - - jmp NEAR $L$128_dec_loop2 - -$L$128_dec_out: - vmovdqu XMMWORD[rdx],xmm0 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_dec: -global aes128gcmsiv_ecb_enc_block - -ALIGN 16 -aes128gcmsiv_ecb_enc_block: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes128gcmsiv_ecb_enc_block: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - vmovdqa xmm1,XMMWORD[rdi] - - vpxor xmm1,xmm1,XMMWORD[rdx] - vaesenc xmm1,xmm1,XMMWORD[16+rdx] - vaesenc xmm1,xmm1,XMMWORD[32+rdx] - vaesenc xmm1,xmm1,XMMWORD[48+rdx] - vaesenc xmm1,xmm1,XMMWORD[64+rdx] - vaesenc xmm1,xmm1,XMMWORD[80+rdx] - vaesenc xmm1,xmm1,XMMWORD[96+rdx] - vaesenc xmm1,xmm1,XMMWORD[112+rdx] - vaesenc xmm1,xmm1,XMMWORD[128+rdx] - vaesenc xmm1,xmm1,XMMWORD[144+rdx] - vaesenclast xmm1,xmm1,XMMWORD[160+rdx] - - vmovdqa XMMWORD[rsi],xmm1 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes128gcmsiv_ecb_enc_block: -global aes256gcmsiv_aes_ks_enc_x1 - -ALIGN 16 -aes256gcmsiv_aes_ks_enc_x1: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_aes_ks_enc_x1: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - - - - vmovdqa xmm0,XMMWORD[con1] - vmovdqa xmm15,XMMWORD[mask] - vmovdqa xmm8,XMMWORD[rdi] - vmovdqa xmm1,XMMWORD[rcx] - vmovdqa xmm3,XMMWORD[16+rcx] - vpxor xmm8,xmm8,xmm1 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[rdx],xmm1 - vmovdqu XMMWORD[16+rdx],xmm3 - vpxor xmm14,xmm14,xmm14 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenc xmm8,xmm8,xmm1 - vmovdqu XMMWORD[32+rdx],xmm1 - - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpslldq xmm4,xmm3,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[48+rdx],xmm3 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenc xmm8,xmm8,xmm1 - vmovdqu XMMWORD[64+rdx],xmm1 - - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpslldq xmm4,xmm3,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[80+rdx],xmm3 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenc xmm8,xmm8,xmm1 - vmovdqu XMMWORD[96+rdx],xmm1 - - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpslldq xmm4,xmm3,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[112+rdx],xmm3 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenc xmm8,xmm8,xmm1 - vmovdqu XMMWORD[128+rdx],xmm1 - - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpslldq xmm4,xmm3,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[144+rdx],xmm3 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenc xmm8,xmm8,xmm1 - vmovdqu XMMWORD[160+rdx],xmm1 - - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpslldq xmm4,xmm3,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[176+rdx],xmm3 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslld xmm0,xmm0,1 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenc xmm8,xmm8,xmm1 - vmovdqu XMMWORD[192+rdx],xmm1 - - vpshufd xmm2,xmm1,0xff - vaesenclast xmm2,xmm2,xmm14 - vpslldq xmm4,xmm3,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm3,xmm3,xmm4 - vpxor xmm3,xmm3,xmm2 - vaesenc xmm8,xmm8,xmm3 - vmovdqu XMMWORD[208+rdx],xmm3 - - vpshufb xmm2,xmm3,xmm15 - vaesenclast xmm2,xmm2,xmm0 - vpslldq xmm4,xmm1,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpslldq xmm4,xmm4,4 - vpxor xmm1,xmm1,xmm4 - vpxor xmm1,xmm1,xmm2 - vaesenclast xmm8,xmm8,xmm1 - vmovdqu XMMWORD[224+rdx],xmm1 - - vmovdqa XMMWORD[rsi],xmm8 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes256gcmsiv_aes_ks_enc_x1: -global aes256gcmsiv_ecb_enc_block - -ALIGN 16 -aes256gcmsiv_ecb_enc_block: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_ecb_enc_block: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - vmovdqa xmm1,XMMWORD[rdi] - vpxor xmm1,xmm1,XMMWORD[rdx] - vaesenc xmm1,xmm1,XMMWORD[16+rdx] - vaesenc xmm1,xmm1,XMMWORD[32+rdx] - vaesenc xmm1,xmm1,XMMWORD[48+rdx] - vaesenc xmm1,xmm1,XMMWORD[64+rdx] - vaesenc xmm1,xmm1,XMMWORD[80+rdx] - vaesenc xmm1,xmm1,XMMWORD[96+rdx] - vaesenc xmm1,xmm1,XMMWORD[112+rdx] - vaesenc xmm1,xmm1,XMMWORD[128+rdx] - vaesenc xmm1,xmm1,XMMWORD[144+rdx] - vaesenc xmm1,xmm1,XMMWORD[160+rdx] - vaesenc xmm1,xmm1,XMMWORD[176+rdx] - vaesenc xmm1,xmm1,XMMWORD[192+rdx] - vaesenc xmm1,xmm1,XMMWORD[208+rdx] - vaesenclast xmm1,xmm1,XMMWORD[224+rdx] - vmovdqa XMMWORD[rsi],xmm1 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes256gcmsiv_ecb_enc_block: -global aes256gcmsiv_enc_msg_x4 - -ALIGN 16 -aes256gcmsiv_enc_msg_x4: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_enc_msg_x4: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - test r8,r8 - jnz NEAR $L$256_enc_msg_x4_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$256_enc_msg_x4_start: - mov r10,r8 - shr r8,4 - shl r10,60 - jz NEAR $L$256_enc_msg_x4_start2 - add r8,1 - -$L$256_enc_msg_x4_start2: - mov r10,r8 - shl r10,62 - shr r10,62 - - - vmovdqa xmm15,XMMWORD[rdx] - vpor xmm15,xmm15,XMMWORD[OR_MASK] - - vmovdqa xmm4,XMMWORD[four] - vmovdqa xmm0,xmm15 - vpaddd xmm1,xmm15,XMMWORD[one] - vpaddd xmm2,xmm15,XMMWORD[two] - vpaddd xmm3,xmm15,XMMWORD[three] - - shr r8,2 - je NEAR $L$256_enc_msg_x4_check_remainder - - sub rsi,64 - sub rdi,64 - -$L$256_enc_msg_x4_loop1: - add rsi,64 - add rdi,64 - - vmovdqa xmm5,xmm0 - vmovdqa xmm6,xmm1 - vmovdqa xmm7,xmm2 - vmovdqa xmm8,xmm3 - - vpxor xmm5,xmm5,XMMWORD[rcx] - vpxor xmm6,xmm6,XMMWORD[rcx] - vpxor xmm7,xmm7,XMMWORD[rcx] - vpxor xmm8,xmm8,XMMWORD[rcx] - - vmovdqu xmm12,XMMWORD[16+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm0,xmm0,xmm4 - vmovdqu xmm12,XMMWORD[32+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm1,xmm1,xmm4 - vmovdqu xmm12,XMMWORD[48+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm2,xmm2,xmm4 - vmovdqu xmm12,XMMWORD[64+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vpaddd xmm3,xmm3,xmm4 - - vmovdqu xmm12,XMMWORD[80+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[96+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[112+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[128+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[144+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[160+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[176+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[192+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[208+rcx] - vaesenc xmm5,xmm5,xmm12 - vaesenc xmm6,xmm6,xmm12 - vaesenc xmm7,xmm7,xmm12 - vaesenc xmm8,xmm8,xmm12 - - vmovdqu xmm12,XMMWORD[224+rcx] - vaesenclast xmm5,xmm5,xmm12 - vaesenclast xmm6,xmm6,xmm12 - vaesenclast xmm7,xmm7,xmm12 - vaesenclast xmm8,xmm8,xmm12 - - - - vpxor xmm5,xmm5,XMMWORD[rdi] - vpxor xmm6,xmm6,XMMWORD[16+rdi] - vpxor xmm7,xmm7,XMMWORD[32+rdi] - vpxor xmm8,xmm8,XMMWORD[48+rdi] - - sub r8,1 - - vmovdqu XMMWORD[rsi],xmm5 - vmovdqu XMMWORD[16+rsi],xmm6 - vmovdqu XMMWORD[32+rsi],xmm7 - vmovdqu XMMWORD[48+rsi],xmm8 - - jne NEAR $L$256_enc_msg_x4_loop1 - - add rsi,64 - add rdi,64 - -$L$256_enc_msg_x4_check_remainder: - cmp r10,0 - je NEAR $L$256_enc_msg_x4_out - -$L$256_enc_msg_x4_loop2: - - - - vmovdqa xmm5,xmm0 - vpaddd xmm0,xmm0,XMMWORD[one] - vpxor xmm5,xmm5,XMMWORD[rcx] - vaesenc xmm5,xmm5,XMMWORD[16+rcx] - vaesenc xmm5,xmm5,XMMWORD[32+rcx] - vaesenc xmm5,xmm5,XMMWORD[48+rcx] - vaesenc xmm5,xmm5,XMMWORD[64+rcx] - vaesenc xmm5,xmm5,XMMWORD[80+rcx] - vaesenc xmm5,xmm5,XMMWORD[96+rcx] - vaesenc xmm5,xmm5,XMMWORD[112+rcx] - vaesenc xmm5,xmm5,XMMWORD[128+rcx] - vaesenc xmm5,xmm5,XMMWORD[144+rcx] - vaesenc xmm5,xmm5,XMMWORD[160+rcx] - vaesenc xmm5,xmm5,XMMWORD[176+rcx] - vaesenc xmm5,xmm5,XMMWORD[192+rcx] - vaesenc xmm5,xmm5,XMMWORD[208+rcx] - vaesenclast xmm5,xmm5,XMMWORD[224+rcx] - - - vpxor xmm5,xmm5,XMMWORD[rdi] - - vmovdqu XMMWORD[rsi],xmm5 - - add rdi,16 - add rsi,16 - - sub r10,1 - jne NEAR $L$256_enc_msg_x4_loop2 - -$L$256_enc_msg_x4_out: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes256gcmsiv_enc_msg_x4: -global aes256gcmsiv_enc_msg_x8 - -ALIGN 16 -aes256gcmsiv_enc_msg_x8: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_enc_msg_x8: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - test r8,r8 - jnz NEAR $L$256_enc_msg_x8_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$256_enc_msg_x8_start: - - mov r11,rsp - sub r11,16 - and r11,-64 - - mov r10,r8 - shr r8,4 - shl r10,60 - jz NEAR $L$256_enc_msg_x8_start2 - add r8,1 - -$L$256_enc_msg_x8_start2: - mov r10,r8 - shl r10,61 - shr r10,61 - - - vmovdqa xmm1,XMMWORD[rdx] - vpor xmm1,xmm1,XMMWORD[OR_MASK] - - - vpaddd xmm0,xmm1,XMMWORD[seven] - vmovdqa XMMWORD[r11],xmm0 - vpaddd xmm9,xmm1,XMMWORD[one] - vpaddd xmm10,xmm1,XMMWORD[two] - vpaddd xmm11,xmm1,XMMWORD[three] - vpaddd xmm12,xmm1,XMMWORD[four] - vpaddd xmm13,xmm1,XMMWORD[five] - vpaddd xmm14,xmm1,XMMWORD[six] - vmovdqa xmm0,xmm1 - - shr r8,3 - jz NEAR $L$256_enc_msg_x8_check_remainder - - sub rsi,128 - sub rdi,128 - -$L$256_enc_msg_x8_loop1: - add rsi,128 - add rdi,128 - - vmovdqa xmm1,xmm0 - vmovdqa xmm2,xmm9 - vmovdqa xmm3,xmm10 - vmovdqa xmm4,xmm11 - vmovdqa xmm5,xmm12 - vmovdqa xmm6,xmm13 - vmovdqa xmm7,xmm14 - - vmovdqa xmm8,XMMWORD[r11] - - vpxor xmm1,xmm1,XMMWORD[rcx] - vpxor xmm2,xmm2,XMMWORD[rcx] - vpxor xmm3,xmm3,XMMWORD[rcx] - vpxor xmm4,xmm4,XMMWORD[rcx] - vpxor xmm5,xmm5,XMMWORD[rcx] - vpxor xmm6,xmm6,XMMWORD[rcx] - vpxor xmm7,xmm7,XMMWORD[rcx] - vpxor xmm8,xmm8,XMMWORD[rcx] - - vmovdqu xmm15,XMMWORD[16+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqa xmm14,XMMWORD[r11] - vpaddd xmm14,xmm14,XMMWORD[eight] - vmovdqa XMMWORD[r11],xmm14 - vmovdqu xmm15,XMMWORD[32+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpsubd xmm14,xmm14,XMMWORD[one] - vmovdqu xmm15,XMMWORD[48+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm0,xmm0,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[64+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm9,xmm9,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[80+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm10,xmm10,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[96+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm11,xmm11,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[112+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm12,xmm12,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[128+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vpaddd xmm13,xmm13,XMMWORD[eight] - vmovdqu xmm15,XMMWORD[144+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm15,XMMWORD[160+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm15,XMMWORD[176+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm15,XMMWORD[192+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm15,XMMWORD[208+rcx] - vaesenc xmm1,xmm1,xmm15 - vaesenc xmm2,xmm2,xmm15 - vaesenc xmm3,xmm3,xmm15 - vaesenc xmm4,xmm4,xmm15 - vaesenc xmm5,xmm5,xmm15 - vaesenc xmm6,xmm6,xmm15 - vaesenc xmm7,xmm7,xmm15 - vaesenc xmm8,xmm8,xmm15 - - vmovdqu xmm15,XMMWORD[224+rcx] - vaesenclast xmm1,xmm1,xmm15 - vaesenclast xmm2,xmm2,xmm15 - vaesenclast xmm3,xmm3,xmm15 - vaesenclast xmm4,xmm4,xmm15 - vaesenclast xmm5,xmm5,xmm15 - vaesenclast xmm6,xmm6,xmm15 - vaesenclast xmm7,xmm7,xmm15 - vaesenclast xmm8,xmm8,xmm15 - - - - vpxor xmm1,xmm1,XMMWORD[rdi] - vpxor xmm2,xmm2,XMMWORD[16+rdi] - vpxor xmm3,xmm3,XMMWORD[32+rdi] - vpxor xmm4,xmm4,XMMWORD[48+rdi] - vpxor xmm5,xmm5,XMMWORD[64+rdi] - vpxor xmm6,xmm6,XMMWORD[80+rdi] - vpxor xmm7,xmm7,XMMWORD[96+rdi] - vpxor xmm8,xmm8,XMMWORD[112+rdi] - - sub r8,1 - - vmovdqu XMMWORD[rsi],xmm1 - vmovdqu XMMWORD[16+rsi],xmm2 - vmovdqu XMMWORD[32+rsi],xmm3 - vmovdqu XMMWORD[48+rsi],xmm4 - vmovdqu XMMWORD[64+rsi],xmm5 - vmovdqu XMMWORD[80+rsi],xmm6 - vmovdqu XMMWORD[96+rsi],xmm7 - vmovdqu XMMWORD[112+rsi],xmm8 - - jne NEAR $L$256_enc_msg_x8_loop1 - - add rsi,128 - add rdi,128 - -$L$256_enc_msg_x8_check_remainder: - cmp r10,0 - je NEAR $L$256_enc_msg_x8_out - -$L$256_enc_msg_x8_loop2: - - - vmovdqa xmm1,xmm0 - vpaddd xmm0,xmm0,XMMWORD[one] - - vpxor xmm1,xmm1,XMMWORD[rcx] - vaesenc xmm1,xmm1,XMMWORD[16+rcx] - vaesenc xmm1,xmm1,XMMWORD[32+rcx] - vaesenc xmm1,xmm1,XMMWORD[48+rcx] - vaesenc xmm1,xmm1,XMMWORD[64+rcx] - vaesenc xmm1,xmm1,XMMWORD[80+rcx] - vaesenc xmm1,xmm1,XMMWORD[96+rcx] - vaesenc xmm1,xmm1,XMMWORD[112+rcx] - vaesenc xmm1,xmm1,XMMWORD[128+rcx] - vaesenc xmm1,xmm1,XMMWORD[144+rcx] - vaesenc xmm1,xmm1,XMMWORD[160+rcx] - vaesenc xmm1,xmm1,XMMWORD[176+rcx] - vaesenc xmm1,xmm1,XMMWORD[192+rcx] - vaesenc xmm1,xmm1,XMMWORD[208+rcx] - vaesenclast xmm1,xmm1,XMMWORD[224+rcx] - - - vpxor xmm1,xmm1,XMMWORD[rdi] - - vmovdqu XMMWORD[rsi],xmm1 - - add rdi,16 - add rsi,16 - sub r10,1 - jnz NEAR $L$256_enc_msg_x8_loop2 - -$L$256_enc_msg_x8_out: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - - -$L$SEH_end_aes256gcmsiv_enc_msg_x8: -global aes256gcmsiv_dec - -ALIGN 16 -aes256gcmsiv_dec: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_dec: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - test r9,~15 - jnz NEAR $L$256_dec_start - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$256_dec_start: - vzeroupper - vmovdqa xmm0,XMMWORD[rdx] - mov rax,rdx - - lea rax,[32+rax] - lea rcx,[32+rcx] - - - vmovdqu xmm15,XMMWORD[r9*1+rdi] - vpor xmm15,xmm15,XMMWORD[OR_MASK] - and r9,~15 - - - cmp r9,96 - jb NEAR $L$256_dec_loop2 - - - sub r9,96 - vmovdqa xmm7,xmm15 - vpaddd xmm8,xmm7,XMMWORD[one] - vpaddd xmm9,xmm7,XMMWORD[two] - vpaddd xmm10,xmm9,XMMWORD[one] - vpaddd xmm11,xmm9,XMMWORD[two] - vpaddd xmm12,xmm11,XMMWORD[one] - vpaddd xmm15,xmm11,XMMWORD[two] - - vpxor xmm7,xmm7,XMMWORD[r8] - vpxor xmm8,xmm8,XMMWORD[r8] - vpxor xmm9,xmm9,XMMWORD[r8] - vpxor xmm10,xmm10,XMMWORD[r8] - vpxor xmm11,xmm11,XMMWORD[r8] - vpxor xmm12,xmm12,XMMWORD[r8] - - vmovdqu xmm4,XMMWORD[16+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[32+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[48+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[64+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[80+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[96+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[112+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[128+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[144+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[160+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[176+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[192+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[208+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[224+r8] - vaesenclast xmm7,xmm7,xmm4 - vaesenclast xmm8,xmm8,xmm4 - vaesenclast xmm9,xmm9,xmm4 - vaesenclast xmm10,xmm10,xmm4 - vaesenclast xmm11,xmm11,xmm4 - vaesenclast xmm12,xmm12,xmm4 - - - vpxor xmm7,xmm7,XMMWORD[rdi] - vpxor xmm8,xmm8,XMMWORD[16+rdi] - vpxor xmm9,xmm9,XMMWORD[32+rdi] - vpxor xmm10,xmm10,XMMWORD[48+rdi] - vpxor xmm11,xmm11,XMMWORD[64+rdi] - vpxor xmm12,xmm12,XMMWORD[80+rdi] - - vmovdqu XMMWORD[rsi],xmm7 - vmovdqu XMMWORD[16+rsi],xmm8 - vmovdqu XMMWORD[32+rsi],xmm9 - vmovdqu XMMWORD[48+rsi],xmm10 - vmovdqu XMMWORD[64+rsi],xmm11 - vmovdqu XMMWORD[80+rsi],xmm12 - - add rdi,96 - add rsi,96 - jmp NEAR $L$256_dec_loop1 - - -ALIGN 64 -$L$256_dec_loop1: - cmp r9,96 - jb NEAR $L$256_dec_finish_96 - sub r9,96 - - vmovdqa xmm6,xmm12 - vmovdqa XMMWORD[(16-32)+rax],xmm11 - vmovdqa XMMWORD[(32-32)+rax],xmm10 - vmovdqa XMMWORD[(48-32)+rax],xmm9 - vmovdqa XMMWORD[(64-32)+rax],xmm8 - vmovdqa XMMWORD[(80-32)+rax],xmm7 - - vmovdqa xmm7,xmm15 - vpaddd xmm8,xmm7,XMMWORD[one] - vpaddd xmm9,xmm7,XMMWORD[two] - vpaddd xmm10,xmm9,XMMWORD[one] - vpaddd xmm11,xmm9,XMMWORD[two] - vpaddd xmm12,xmm11,XMMWORD[one] - vpaddd xmm15,xmm11,XMMWORD[two] - - vmovdqa xmm4,XMMWORD[r8] - vpxor xmm7,xmm7,xmm4 - vpxor xmm8,xmm8,xmm4 - vpxor xmm9,xmm9,xmm4 - vpxor xmm10,xmm10,xmm4 - vpxor xmm11,xmm11,xmm4 - vpxor xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[((0-32))+rcx] - vpclmulqdq xmm2,xmm6,xmm4,0x11 - vpclmulqdq xmm3,xmm6,xmm4,0x00 - vpclmulqdq xmm1,xmm6,xmm4,0x01 - vpclmulqdq xmm4,xmm6,xmm4,0x10 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm4,XMMWORD[16+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[((-16))+rax] - vmovdqu xmm13,XMMWORD[((-16))+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[32+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[rax] - vmovdqu xmm13,XMMWORD[rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[48+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[16+rax] - vmovdqu xmm13,XMMWORD[16+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[64+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[32+rax] - vmovdqu xmm13,XMMWORD[32+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm4,XMMWORD[80+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[96+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[112+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - - vmovdqa xmm6,XMMWORD[((80-32))+rax] - vpxor xmm6,xmm6,xmm0 - vmovdqu xmm5,XMMWORD[((80-32))+rcx] - - vpclmulqdq xmm4,xmm6,xmm5,0x01 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x10 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm4,XMMWORD[128+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - - vpsrldq xmm4,xmm1,8 - vpxor xmm5,xmm2,xmm4 - vpslldq xmm4,xmm1,8 - vpxor xmm0,xmm3,xmm4 - - vmovdqa xmm3,XMMWORD[poly] - - vmovdqu xmm4,XMMWORD[144+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[160+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[176+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[192+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm4,XMMWORD[208+r8] - vaesenc xmm7,xmm7,xmm4 - vaesenc xmm8,xmm8,xmm4 - vaesenc xmm9,xmm9,xmm4 - vaesenc xmm10,xmm10,xmm4 - vaesenc xmm11,xmm11,xmm4 - vaesenc xmm12,xmm12,xmm4 - - vmovdqu xmm6,XMMWORD[224+r8] - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vpxor xmm4,xmm6,XMMWORD[rdi] - vaesenclast xmm7,xmm7,xmm4 - vpxor xmm4,xmm6,XMMWORD[16+rdi] - vaesenclast xmm8,xmm8,xmm4 - vpxor xmm4,xmm6,XMMWORD[32+rdi] - vaesenclast xmm9,xmm9,xmm4 - vpxor xmm4,xmm6,XMMWORD[48+rdi] - vaesenclast xmm10,xmm10,xmm4 - vpxor xmm4,xmm6,XMMWORD[64+rdi] - vaesenclast xmm11,xmm11,xmm4 - vpxor xmm4,xmm6,XMMWORD[80+rdi] - vaesenclast xmm12,xmm12,xmm4 - - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vmovdqu XMMWORD[rsi],xmm7 - vmovdqu XMMWORD[16+rsi],xmm8 - vmovdqu XMMWORD[32+rsi],xmm9 - vmovdqu XMMWORD[48+rsi],xmm10 - vmovdqu XMMWORD[64+rsi],xmm11 - vmovdqu XMMWORD[80+rsi],xmm12 - - vpxor xmm0,xmm0,xmm5 - - lea rdi,[96+rdi] - lea rsi,[96+rsi] - jmp NEAR $L$256_dec_loop1 - -$L$256_dec_finish_96: - vmovdqa xmm6,xmm12 - vmovdqa XMMWORD[(16-32)+rax],xmm11 - vmovdqa XMMWORD[(32-32)+rax],xmm10 - vmovdqa XMMWORD[(48-32)+rax],xmm9 - vmovdqa XMMWORD[(64-32)+rax],xmm8 - vmovdqa XMMWORD[(80-32)+rax],xmm7 - - vmovdqu xmm4,XMMWORD[((0-32))+rcx] - vpclmulqdq xmm1,xmm6,xmm4,0x10 - vpclmulqdq xmm2,xmm6,xmm4,0x11 - vpclmulqdq xmm3,xmm6,xmm4,0x00 - vpclmulqdq xmm4,xmm6,xmm4,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[((-16))+rax] - vmovdqu xmm13,XMMWORD[((-16))+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[rax] - vmovdqu xmm13,XMMWORD[rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[16+rax] - vmovdqu xmm13,XMMWORD[16+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - vmovdqu xmm6,XMMWORD[32+rax] - vmovdqu xmm13,XMMWORD[32+rcx] - - vpclmulqdq xmm4,xmm6,xmm13,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm13,0x01 - vpxor xmm1,xmm1,xmm4 - - - vmovdqu xmm6,XMMWORD[((80-32))+rax] - vpxor xmm6,xmm6,xmm0 - vmovdqu xmm5,XMMWORD[((80-32))+rcx] - vpclmulqdq xmm4,xmm6,xmm5,0x11 - vpxor xmm2,xmm2,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x00 - vpxor xmm3,xmm3,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x10 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm4,xmm6,xmm5,0x01 - vpxor xmm1,xmm1,xmm4 - - vpsrldq xmm4,xmm1,8 - vpxor xmm5,xmm2,xmm4 - vpslldq xmm4,xmm1,8 - vpxor xmm0,xmm3,xmm4 - - vmovdqa xmm3,XMMWORD[poly] - - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vpalignr xmm2,xmm0,xmm0,8 - vpclmulqdq xmm0,xmm0,xmm3,0x10 - vpxor xmm0,xmm2,xmm0 - - vpxor xmm0,xmm0,xmm5 - -$L$256_dec_loop2: - - - - cmp r9,16 - jb NEAR $L$256_dec_out - sub r9,16 - - vmovdqa xmm2,xmm15 - vpaddd xmm15,xmm15,XMMWORD[one] - - vpxor xmm2,xmm2,XMMWORD[r8] - vaesenc xmm2,xmm2,XMMWORD[16+r8] - vaesenc xmm2,xmm2,XMMWORD[32+r8] - vaesenc xmm2,xmm2,XMMWORD[48+r8] - vaesenc xmm2,xmm2,XMMWORD[64+r8] - vaesenc xmm2,xmm2,XMMWORD[80+r8] - vaesenc xmm2,xmm2,XMMWORD[96+r8] - vaesenc xmm2,xmm2,XMMWORD[112+r8] - vaesenc xmm2,xmm2,XMMWORD[128+r8] - vaesenc xmm2,xmm2,XMMWORD[144+r8] - vaesenc xmm2,xmm2,XMMWORD[160+r8] - vaesenc xmm2,xmm2,XMMWORD[176+r8] - vaesenc xmm2,xmm2,XMMWORD[192+r8] - vaesenc xmm2,xmm2,XMMWORD[208+r8] - vaesenclast xmm2,xmm2,XMMWORD[224+r8] - vpxor xmm2,xmm2,XMMWORD[rdi] - vmovdqu XMMWORD[rsi],xmm2 - add rdi,16 - add rsi,16 - - vpxor xmm0,xmm0,xmm2 - vmovdqa xmm1,XMMWORD[((-32))+rcx] - call GFMUL - - jmp NEAR $L$256_dec_loop2 - -$L$256_dec_out: - vmovdqu XMMWORD[rdx],xmm0 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes256gcmsiv_dec: -global aes256gcmsiv_kdf - -ALIGN 16 -aes256gcmsiv_kdf: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes256gcmsiv_kdf: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - - - - - vmovdqa xmm1,XMMWORD[rdx] - vmovdqa xmm4,XMMWORD[rdi] - vmovdqa xmm11,XMMWORD[and_mask] - vmovdqa xmm8,XMMWORD[one] - vpshufd xmm4,xmm4,0x90 - vpand xmm4,xmm4,xmm11 - vpaddd xmm6,xmm4,xmm8 - vpaddd xmm7,xmm6,xmm8 - vpaddd xmm11,xmm7,xmm8 - vpaddd xmm12,xmm11,xmm8 - vpaddd xmm13,xmm12,xmm8 - - vpxor xmm4,xmm4,xmm1 - vpxor xmm6,xmm6,xmm1 - vpxor xmm7,xmm7,xmm1 - vpxor xmm11,xmm11,xmm1 - vpxor xmm12,xmm12,xmm1 - vpxor xmm13,xmm13,xmm1 - - vmovdqa xmm1,XMMWORD[16+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[32+rdx] - vaesenc xmm4,xmm4,xmm2 - vaesenc xmm6,xmm6,xmm2 - vaesenc xmm7,xmm7,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - vaesenc xmm13,xmm13,xmm2 - - vmovdqa xmm1,XMMWORD[48+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[64+rdx] - vaesenc xmm4,xmm4,xmm2 - vaesenc xmm6,xmm6,xmm2 - vaesenc xmm7,xmm7,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - vaesenc xmm13,xmm13,xmm2 - - vmovdqa xmm1,XMMWORD[80+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[96+rdx] - vaesenc xmm4,xmm4,xmm2 - vaesenc xmm6,xmm6,xmm2 - vaesenc xmm7,xmm7,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - vaesenc xmm13,xmm13,xmm2 - - vmovdqa xmm1,XMMWORD[112+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[128+rdx] - vaesenc xmm4,xmm4,xmm2 - vaesenc xmm6,xmm6,xmm2 - vaesenc xmm7,xmm7,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - vaesenc xmm13,xmm13,xmm2 - - vmovdqa xmm1,XMMWORD[144+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[160+rdx] - vaesenc xmm4,xmm4,xmm2 - vaesenc xmm6,xmm6,xmm2 - vaesenc xmm7,xmm7,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - vaesenc xmm13,xmm13,xmm2 - - vmovdqa xmm1,XMMWORD[176+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[192+rdx] - vaesenc xmm4,xmm4,xmm2 - vaesenc xmm6,xmm6,xmm2 - vaesenc xmm7,xmm7,xmm2 - vaesenc xmm11,xmm11,xmm2 - vaesenc xmm12,xmm12,xmm2 - vaesenc xmm13,xmm13,xmm2 - - vmovdqa xmm1,XMMWORD[208+rdx] - vaesenc xmm4,xmm4,xmm1 - vaesenc xmm6,xmm6,xmm1 - vaesenc xmm7,xmm7,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - - vmovdqa xmm2,XMMWORD[224+rdx] - vaesenclast xmm4,xmm4,xmm2 - vaesenclast xmm6,xmm6,xmm2 - vaesenclast xmm7,xmm7,xmm2 - vaesenclast xmm11,xmm11,xmm2 - vaesenclast xmm12,xmm12,xmm2 - vaesenclast xmm13,xmm13,xmm2 - - - vmovdqa XMMWORD[rsi],xmm4 - vmovdqa XMMWORD[16+rsi],xmm6 - vmovdqa XMMWORD[32+rsi],xmm7 - vmovdqa XMMWORD[48+rsi],xmm11 - vmovdqa XMMWORD[64+rsi],xmm12 - vmovdqa XMMWORD[80+rsi],xmm13 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes256gcmsiv_kdf: diff --git a/packager/third_party/boringssl/win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm deleted file mode 100644 index b1159ae098..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm +++ /dev/null @@ -1,17 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - -global dummy_chacha20_poly1305_asm - -dummy_chacha20_poly1305_asm: - DB 0F3h,0C3h ;repret diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aes-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aes-x86_64.asm deleted file mode 100644 index 329185ee67..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aes-x86_64.asm +++ /dev/null @@ -1,2962 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -ALIGN 16 -_x86_64_AES_encrypt: - xor eax,DWORD[r15] - xor ebx,DWORD[4+r15] - xor ecx,DWORD[8+r15] - xor edx,DWORD[12+r15] - - mov r13d,DWORD[240+r15] - sub r13d,1 - jmp NEAR $L$enc_loop -ALIGN 16 -$L$enc_loop: - - movzx esi,al - movzx edi,bl - movzx ebp,cl - mov r10d,DWORD[rsi*8+r14] - mov r11d,DWORD[rdi*8+r14] - mov r12d,DWORD[rbp*8+r14] - - movzx esi,bh - movzx edi,ch - movzx ebp,dl - xor r10d,DWORD[3+rsi*8+r14] - xor r11d,DWORD[3+rdi*8+r14] - mov r8d,DWORD[rbp*8+r14] - - movzx esi,dh - shr ecx,16 - movzx ebp,ah - xor r12d,DWORD[3+rsi*8+r14] - shr edx,16 - xor r8d,DWORD[3+rbp*8+r14] - - shr ebx,16 - lea r15,[16+r15] - shr eax,16 - - movzx esi,cl - movzx edi,dl - movzx ebp,al - xor r10d,DWORD[2+rsi*8+r14] - xor r11d,DWORD[2+rdi*8+r14] - xor r12d,DWORD[2+rbp*8+r14] - - movzx esi,dh - movzx edi,ah - movzx ebp,bl - xor r10d,DWORD[1+rsi*8+r14] - xor r11d,DWORD[1+rdi*8+r14] - xor r8d,DWORD[2+rbp*8+r14] - - mov edx,DWORD[12+r15] - movzx edi,bh - movzx ebp,ch - mov eax,DWORD[r15] - xor r12d,DWORD[1+rdi*8+r14] - xor r8d,DWORD[1+rbp*8+r14] - - mov ebx,DWORD[4+r15] - mov ecx,DWORD[8+r15] - xor eax,r10d - xor ebx,r11d - xor ecx,r12d - xor edx,r8d - sub r13d,1 - jnz NEAR $L$enc_loop - movzx esi,al - movzx edi,bl - movzx ebp,cl - movzx r10d,BYTE[2+rsi*8+r14] - movzx r11d,BYTE[2+rdi*8+r14] - movzx r12d,BYTE[2+rbp*8+r14] - - movzx esi,dl - movzx edi,bh - movzx ebp,ch - movzx r8d,BYTE[2+rsi*8+r14] - mov edi,DWORD[rdi*8+r14] - mov ebp,DWORD[rbp*8+r14] - - and edi,0x0000ff00 - and ebp,0x0000ff00 - - xor r10d,edi - xor r11d,ebp - shr ecx,16 - - movzx esi,dh - movzx edi,ah - shr edx,16 - mov esi,DWORD[rsi*8+r14] - mov edi,DWORD[rdi*8+r14] - - and esi,0x0000ff00 - and edi,0x0000ff00 - shr ebx,16 - xor r12d,esi - xor r8d,edi - shr eax,16 - - movzx esi,cl - movzx edi,dl - movzx ebp,al - mov esi,DWORD[rsi*8+r14] - mov edi,DWORD[rdi*8+r14] - mov ebp,DWORD[rbp*8+r14] - - and esi,0x00ff0000 - and edi,0x00ff0000 - and ebp,0x00ff0000 - - xor r10d,esi - xor r11d,edi - xor r12d,ebp - - movzx esi,bl - movzx edi,dh - movzx ebp,ah - mov esi,DWORD[rsi*8+r14] - mov edi,DWORD[2+rdi*8+r14] - mov ebp,DWORD[2+rbp*8+r14] - - and esi,0x00ff0000 - and edi,0xff000000 - and ebp,0xff000000 - - xor r8d,esi - xor r10d,edi - xor r11d,ebp - - movzx esi,bh - movzx edi,ch - mov edx,DWORD[((16+12))+r15] - mov esi,DWORD[2+rsi*8+r14] - mov edi,DWORD[2+rdi*8+r14] - mov eax,DWORD[((16+0))+r15] - - and esi,0xff000000 - and edi,0xff000000 - - xor r12d,esi - xor r8d,edi - - mov ebx,DWORD[((16+4))+r15] - mov ecx,DWORD[((16+8))+r15] - xor eax,r10d - xor ebx,r11d - xor ecx,r12d - xor edx,r8d -DB 0xf3,0xc3 - - -ALIGN 16 -_x86_64_AES_encrypt_compact: - - lea r8,[128+r14] - mov edi,DWORD[((0-128))+r8] - mov ebp,DWORD[((32-128))+r8] - mov r10d,DWORD[((64-128))+r8] - mov r11d,DWORD[((96-128))+r8] - mov edi,DWORD[((128-128))+r8] - mov ebp,DWORD[((160-128))+r8] - mov r10d,DWORD[((192-128))+r8] - mov r11d,DWORD[((224-128))+r8] - jmp NEAR $L$enc_loop_compact -ALIGN 16 -$L$enc_loop_compact: - xor eax,DWORD[r15] - xor ebx,DWORD[4+r15] - xor ecx,DWORD[8+r15] - xor edx,DWORD[12+r15] - lea r15,[16+r15] - movzx r10d,al - movzx r11d,bl - movzx r12d,cl - movzx r8d,dl - movzx esi,bh - movzx edi,ch - shr ecx,16 - movzx ebp,dh - movzx r10d,BYTE[r10*1+r14] - movzx r11d,BYTE[r11*1+r14] - movzx r12d,BYTE[r12*1+r14] - movzx r8d,BYTE[r8*1+r14] - - movzx r9d,BYTE[rsi*1+r14] - movzx esi,ah - movzx r13d,BYTE[rdi*1+r14] - movzx edi,cl - movzx ebp,BYTE[rbp*1+r14] - movzx esi,BYTE[rsi*1+r14] - - shl r9d,8 - shr edx,16 - shl r13d,8 - xor r10d,r9d - shr eax,16 - movzx r9d,dl - shr ebx,16 - xor r11d,r13d - shl ebp,8 - movzx r13d,al - movzx edi,BYTE[rdi*1+r14] - xor r12d,ebp - - shl esi,8 - movzx ebp,bl - shl edi,16 - xor r8d,esi - movzx r9d,BYTE[r9*1+r14] - movzx esi,dh - movzx r13d,BYTE[r13*1+r14] - xor r10d,edi - - shr ecx,8 - movzx edi,ah - shl r9d,16 - shr ebx,8 - shl r13d,16 - xor r11d,r9d - movzx ebp,BYTE[rbp*1+r14] - movzx esi,BYTE[rsi*1+r14] - movzx edi,BYTE[rdi*1+r14] - movzx edx,BYTE[rcx*1+r14] - movzx ecx,BYTE[rbx*1+r14] - - shl ebp,16 - xor r12d,r13d - shl esi,24 - xor r8d,ebp - shl edi,24 - xor r10d,esi - shl edx,24 - xor r11d,edi - shl ecx,24 - mov eax,r10d - mov ebx,r11d - xor ecx,r12d - xor edx,r8d - cmp r15,QWORD[16+rsp] - je NEAR $L$enc_compact_done - mov r10d,0x80808080 - mov r11d,0x80808080 - and r10d,eax - and r11d,ebx - mov esi,r10d - mov edi,r11d - shr r10d,7 - lea r8d,[rax*1+rax] - shr r11d,7 - lea r9d,[rbx*1+rbx] - sub esi,r10d - sub edi,r11d - and r8d,0xfefefefe - and r9d,0xfefefefe - and esi,0x1b1b1b1b - and edi,0x1b1b1b1b - mov r10d,eax - mov r11d,ebx - xor r8d,esi - xor r9d,edi - - xor eax,r8d - xor ebx,r9d - mov r12d,0x80808080 - rol eax,24 - mov ebp,0x80808080 - rol ebx,24 - and r12d,ecx - and ebp,edx - xor eax,r8d - xor ebx,r9d - mov esi,r12d - ror r10d,16 - mov edi,ebp - ror r11d,16 - lea r8d,[rcx*1+rcx] - shr r12d,7 - xor eax,r10d - shr ebp,7 - xor ebx,r11d - ror r10d,8 - lea r9d,[rdx*1+rdx] - ror r11d,8 - sub esi,r12d - sub edi,ebp - xor eax,r10d - xor ebx,r11d - - and r8d,0xfefefefe - and r9d,0xfefefefe - and esi,0x1b1b1b1b - and edi,0x1b1b1b1b - mov r12d,ecx - mov ebp,edx - xor r8d,esi - xor r9d,edi - - ror r12d,16 - xor ecx,r8d - ror ebp,16 - xor edx,r9d - rol ecx,24 - mov esi,DWORD[r14] - rol edx,24 - xor ecx,r8d - mov edi,DWORD[64+r14] - xor edx,r9d - mov r8d,DWORD[128+r14] - xor ecx,r12d - ror r12d,8 - xor edx,ebp - ror ebp,8 - xor ecx,r12d - mov r9d,DWORD[192+r14] - xor edx,ebp - jmp NEAR $L$enc_loop_compact -ALIGN 16 -$L$enc_compact_done: - xor eax,DWORD[r15] - xor ebx,DWORD[4+r15] - xor ecx,DWORD[8+r15] - xor edx,DWORD[12+r15] -DB 0xf3,0xc3 - - -ALIGN 16 -global aes_nohw_encrypt - - -aes_nohw_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_nohw_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - - - lea rcx,[((-63))+rdx] - and rsp,-64 - sub rcx,rsp - neg rcx - and rcx,0x3c0 - sub rsp,rcx - sub rsp,32 - - mov QWORD[16+rsp],rsi - mov QWORD[24+rsp],rax - -$L$enc_prologue: - - mov r15,rdx - mov r13d,DWORD[240+r15] - - mov eax,DWORD[rdi] - mov ebx,DWORD[4+rdi] - mov ecx,DWORD[8+rdi] - mov edx,DWORD[12+rdi] - - shl r13d,4 - lea rbp,[r13*1+r15] - mov QWORD[rsp],r15 - mov QWORD[8+rsp],rbp - - - lea r14,[(($L$AES_Te+2048))] - lea rbp,[768+rsp] - sub rbp,r14 - and rbp,0x300 - lea r14,[rbp*1+r14] - - call _x86_64_AES_encrypt_compact - - mov r9,QWORD[16+rsp] - mov rsi,QWORD[24+rsp] - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$enc_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_nohw_encrypt: - -ALIGN 16 -_x86_64_AES_decrypt: - xor eax,DWORD[r15] - xor ebx,DWORD[4+r15] - xor ecx,DWORD[8+r15] - xor edx,DWORD[12+r15] - - mov r13d,DWORD[240+r15] - sub r13d,1 - jmp NEAR $L$dec_loop -ALIGN 16 -$L$dec_loop: - - movzx esi,al - movzx edi,bl - movzx ebp,cl - mov r10d,DWORD[rsi*8+r14] - mov r11d,DWORD[rdi*8+r14] - mov r12d,DWORD[rbp*8+r14] - - movzx esi,dh - movzx edi,ah - movzx ebp,dl - xor r10d,DWORD[3+rsi*8+r14] - xor r11d,DWORD[3+rdi*8+r14] - mov r8d,DWORD[rbp*8+r14] - - movzx esi,bh - shr eax,16 - movzx ebp,ch - xor r12d,DWORD[3+rsi*8+r14] - shr edx,16 - xor r8d,DWORD[3+rbp*8+r14] - - shr ebx,16 - lea r15,[16+r15] - shr ecx,16 - - movzx esi,cl - movzx edi,dl - movzx ebp,al - xor r10d,DWORD[2+rsi*8+r14] - xor r11d,DWORD[2+rdi*8+r14] - xor r12d,DWORD[2+rbp*8+r14] - - movzx esi,bh - movzx edi,ch - movzx ebp,bl - xor r10d,DWORD[1+rsi*8+r14] - xor r11d,DWORD[1+rdi*8+r14] - xor r8d,DWORD[2+rbp*8+r14] - - movzx esi,dh - mov edx,DWORD[12+r15] - movzx ebp,ah - xor r12d,DWORD[1+rsi*8+r14] - mov eax,DWORD[r15] - xor r8d,DWORD[1+rbp*8+r14] - - xor eax,r10d - mov ebx,DWORD[4+r15] - mov ecx,DWORD[8+r15] - xor ecx,r12d - xor ebx,r11d - xor edx,r8d - sub r13d,1 - jnz NEAR $L$dec_loop - lea r14,[2048+r14] - movzx esi,al - movzx edi,bl - movzx ebp,cl - movzx r10d,BYTE[rsi*1+r14] - movzx r11d,BYTE[rdi*1+r14] - movzx r12d,BYTE[rbp*1+r14] - - movzx esi,dl - movzx edi,dh - movzx ebp,ah - movzx r8d,BYTE[rsi*1+r14] - movzx edi,BYTE[rdi*1+r14] - movzx ebp,BYTE[rbp*1+r14] - - shl edi,8 - shl ebp,8 - - xor r10d,edi - xor r11d,ebp - shr edx,16 - - movzx esi,bh - movzx edi,ch - shr eax,16 - movzx esi,BYTE[rsi*1+r14] - movzx edi,BYTE[rdi*1+r14] - - shl esi,8 - shl edi,8 - shr ebx,16 - xor r12d,esi - xor r8d,edi - shr ecx,16 - - movzx esi,cl - movzx edi,dl - movzx ebp,al - movzx esi,BYTE[rsi*1+r14] - movzx edi,BYTE[rdi*1+r14] - movzx ebp,BYTE[rbp*1+r14] - - shl esi,16 - shl edi,16 - shl ebp,16 - - xor r10d,esi - xor r11d,edi - xor r12d,ebp - - movzx esi,bl - movzx edi,bh - movzx ebp,ch - movzx esi,BYTE[rsi*1+r14] - movzx edi,BYTE[rdi*1+r14] - movzx ebp,BYTE[rbp*1+r14] - - shl esi,16 - shl edi,24 - shl ebp,24 - - xor r8d,esi - xor r10d,edi - xor r11d,ebp - - movzx esi,dh - movzx edi,ah - mov edx,DWORD[((16+12))+r15] - movzx esi,BYTE[rsi*1+r14] - movzx edi,BYTE[rdi*1+r14] - mov eax,DWORD[((16+0))+r15] - - shl esi,24 - shl edi,24 - - xor r12d,esi - xor r8d,edi - - mov ebx,DWORD[((16+4))+r15] - mov ecx,DWORD[((16+8))+r15] - lea r14,[((-2048))+r14] - xor eax,r10d - xor ebx,r11d - xor ecx,r12d - xor edx,r8d -DB 0xf3,0xc3 - - -ALIGN 16 -_x86_64_AES_decrypt_compact: - - lea r8,[128+r14] - mov edi,DWORD[((0-128))+r8] - mov ebp,DWORD[((32-128))+r8] - mov r10d,DWORD[((64-128))+r8] - mov r11d,DWORD[((96-128))+r8] - mov edi,DWORD[((128-128))+r8] - mov ebp,DWORD[((160-128))+r8] - mov r10d,DWORD[((192-128))+r8] - mov r11d,DWORD[((224-128))+r8] - jmp NEAR $L$dec_loop_compact - -ALIGN 16 -$L$dec_loop_compact: - xor eax,DWORD[r15] - xor ebx,DWORD[4+r15] - xor ecx,DWORD[8+r15] - xor edx,DWORD[12+r15] - lea r15,[16+r15] - movzx r10d,al - movzx r11d,bl - movzx r12d,cl - movzx r8d,dl - movzx esi,dh - movzx edi,ah - shr edx,16 - movzx ebp,bh - movzx r10d,BYTE[r10*1+r14] - movzx r11d,BYTE[r11*1+r14] - movzx r12d,BYTE[r12*1+r14] - movzx r8d,BYTE[r8*1+r14] - - movzx r9d,BYTE[rsi*1+r14] - movzx esi,ch - movzx r13d,BYTE[rdi*1+r14] - movzx ebp,BYTE[rbp*1+r14] - movzx esi,BYTE[rsi*1+r14] - - shr ecx,16 - shl r13d,8 - shl r9d,8 - movzx edi,cl - shr eax,16 - xor r10d,r9d - shr ebx,16 - movzx r9d,dl - - shl ebp,8 - xor r11d,r13d - shl esi,8 - movzx r13d,al - movzx edi,BYTE[rdi*1+r14] - xor r12d,ebp - movzx ebp,bl - - shl edi,16 - xor r8d,esi - movzx r9d,BYTE[r9*1+r14] - movzx esi,bh - movzx ebp,BYTE[rbp*1+r14] - xor r10d,edi - movzx r13d,BYTE[r13*1+r14] - movzx edi,ch - - shl ebp,16 - shl r9d,16 - shl r13d,16 - xor r8d,ebp - movzx ebp,dh - xor r11d,r9d - shr eax,8 - xor r12d,r13d - - movzx esi,BYTE[rsi*1+r14] - movzx ebx,BYTE[rdi*1+r14] - movzx ecx,BYTE[rbp*1+r14] - movzx edx,BYTE[rax*1+r14] - - mov eax,r10d - shl esi,24 - shl ebx,24 - shl ecx,24 - xor eax,esi - shl edx,24 - xor ebx,r11d - xor ecx,r12d - xor edx,r8d - cmp r15,QWORD[16+rsp] - je NEAR $L$dec_compact_done - - mov rsi,QWORD[((256+0))+r14] - shl rbx,32 - shl rdx,32 - mov rdi,QWORD[((256+8))+r14] - or rax,rbx - or rcx,rdx - mov rbp,QWORD[((256+16))+r14] - mov r9,rsi - mov r12,rsi - and r9,rax - and r12,rcx - mov rbx,r9 - mov rdx,r12 - shr r9,7 - lea r8,[rax*1+rax] - shr r12,7 - lea r11,[rcx*1+rcx] - sub rbx,r9 - sub rdx,r12 - and r8,rdi - and r11,rdi - and rbx,rbp - and rdx,rbp - xor r8,rbx - xor r11,rdx - mov r10,rsi - mov r13,rsi - - and r10,r8 - and r13,r11 - mov rbx,r10 - mov rdx,r13 - shr r10,7 - lea r9,[r8*1+r8] - shr r13,7 - lea r12,[r11*1+r11] - sub rbx,r10 - sub rdx,r13 - and r9,rdi - and r12,rdi - and rbx,rbp - and rdx,rbp - xor r9,rbx - xor r12,rdx - mov r10,rsi - mov r13,rsi - - and r10,r9 - and r13,r12 - mov rbx,r10 - mov rdx,r13 - shr r10,7 - xor r8,rax - shr r13,7 - xor r11,rcx - sub rbx,r10 - sub rdx,r13 - lea r10,[r9*1+r9] - lea r13,[r12*1+r12] - xor r9,rax - xor r12,rcx - and r10,rdi - and r13,rdi - and rbx,rbp - and rdx,rbp - xor r10,rbx - xor r13,rdx - - xor rax,r10 - xor rcx,r13 - xor r8,r10 - xor r11,r13 - mov rbx,rax - mov rdx,rcx - xor r9,r10 - shr rbx,32 - xor r12,r13 - shr rdx,32 - xor r10,r8 - rol eax,8 - xor r13,r11 - rol ecx,8 - xor r10,r9 - rol ebx,8 - xor r13,r12 - - rol edx,8 - xor eax,r10d - shr r10,32 - xor ecx,r13d - shr r13,32 - xor ebx,r10d - xor edx,r13d - - mov r10,r8 - rol r8d,24 - mov r13,r11 - rol r11d,24 - shr r10,32 - xor eax,r8d - shr r13,32 - xor ecx,r11d - rol r10d,24 - mov r8,r9 - rol r13d,24 - mov r11,r12 - shr r8,32 - xor ebx,r10d - shr r11,32 - xor edx,r13d - - mov rsi,QWORD[r14] - rol r9d,16 - mov rdi,QWORD[64+r14] - rol r12d,16 - mov rbp,QWORD[128+r14] - rol r8d,16 - mov r10,QWORD[192+r14] - xor eax,r9d - rol r11d,16 - xor ecx,r12d - mov r13,QWORD[256+r14] - xor ebx,r8d - xor edx,r11d - jmp NEAR $L$dec_loop_compact -ALIGN 16 -$L$dec_compact_done: - xor eax,DWORD[r15] - xor ebx,DWORD[4+r15] - xor ecx,DWORD[8+r15] - xor edx,DWORD[12+r15] -DB 0xf3,0xc3 - - -ALIGN 16 -global aes_nohw_decrypt - - -aes_nohw_decrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_nohw_decrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - - - lea rcx,[((-63))+rdx] - and rsp,-64 - sub rcx,rsp - neg rcx - and rcx,0x3c0 - sub rsp,rcx - sub rsp,32 - - mov QWORD[16+rsp],rsi - mov QWORD[24+rsp],rax - -$L$dec_prologue: - - mov r15,rdx - mov r13d,DWORD[240+r15] - - mov eax,DWORD[rdi] - mov ebx,DWORD[4+rdi] - mov ecx,DWORD[8+rdi] - mov edx,DWORD[12+rdi] - - shl r13d,4 - lea rbp,[r13*1+r15] - mov QWORD[rsp],r15 - mov QWORD[8+rsp],rbp - - - lea r14,[(($L$AES_Td+2048))] - lea rbp,[768+rsp] - sub rbp,r14 - and rbp,0x300 - lea r14,[rbp*1+r14] - shr rbp,3 - add r14,rbp - - call _x86_64_AES_decrypt_compact - - mov r9,QWORD[16+rsp] - mov rsi,QWORD[24+rsp] - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$dec_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_nohw_decrypt: -ALIGN 16 -global aes_nohw_set_encrypt_key - -aes_nohw_set_encrypt_key: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_nohw_set_encrypt_key: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,8 - -$L$enc_key_prologue: - - call _x86_64_AES_set_encrypt_key - - mov rbp,QWORD[40+rsp] - - mov rbx,QWORD[48+rsp] - - add rsp,56 - -$L$enc_key_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_nohw_set_encrypt_key: - - -ALIGN 16 -_x86_64_AES_set_encrypt_key: - - mov ecx,esi - mov rsi,rdi - mov rdi,rdx - - test rsi,-1 - jz NEAR $L$badpointer - test rdi,-1 - jz NEAR $L$badpointer - - lea rbp,[$L$AES_Te] - lea rbp,[((2048+128))+rbp] - - - mov eax,DWORD[((0-128))+rbp] - mov ebx,DWORD[((32-128))+rbp] - mov r8d,DWORD[((64-128))+rbp] - mov edx,DWORD[((96-128))+rbp] - mov eax,DWORD[((128-128))+rbp] - mov ebx,DWORD[((160-128))+rbp] - mov r8d,DWORD[((192-128))+rbp] - mov edx,DWORD[((224-128))+rbp] - - cmp ecx,128 - je NEAR $L$10rounds - cmp ecx,192 - je NEAR $L$12rounds - cmp ecx,256 - je NEAR $L$14rounds - mov rax,-2 - jmp NEAR $L$exit - -$L$10rounds: - mov rax,QWORD[rsi] - mov rdx,QWORD[8+rsi] - mov QWORD[rdi],rax - mov QWORD[8+rdi],rdx - - shr rdx,32 - xor ecx,ecx - jmp NEAR $L$10shortcut -ALIGN 4 -$L$10loop: - mov eax,DWORD[rdi] - mov edx,DWORD[12+rdi] -$L$10shortcut: - movzx esi,dl - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,24 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shr edx,16 - movzx esi,dl - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,8 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shl ebx,16 - xor eax,ebx - - xor eax,DWORD[((1024-128))+rcx*4+rbp] - mov DWORD[16+rdi],eax - xor eax,DWORD[4+rdi] - mov DWORD[20+rdi],eax - xor eax,DWORD[8+rdi] - mov DWORD[24+rdi],eax - xor eax,DWORD[12+rdi] - mov DWORD[28+rdi],eax - add ecx,1 - lea rdi,[16+rdi] - cmp ecx,10 - jl NEAR $L$10loop - - mov DWORD[80+rdi],10 - xor rax,rax - jmp NEAR $L$exit - -$L$12rounds: - mov rax,QWORD[rsi] - mov rbx,QWORD[8+rsi] - mov rdx,QWORD[16+rsi] - mov QWORD[rdi],rax - mov QWORD[8+rdi],rbx - mov QWORD[16+rdi],rdx - - shr rdx,32 - xor ecx,ecx - jmp NEAR $L$12shortcut -ALIGN 4 -$L$12loop: - mov eax,DWORD[rdi] - mov edx,DWORD[20+rdi] -$L$12shortcut: - movzx esi,dl - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,24 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shr edx,16 - movzx esi,dl - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,8 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shl ebx,16 - xor eax,ebx - - xor eax,DWORD[((1024-128))+rcx*4+rbp] - mov DWORD[24+rdi],eax - xor eax,DWORD[4+rdi] - mov DWORD[28+rdi],eax - xor eax,DWORD[8+rdi] - mov DWORD[32+rdi],eax - xor eax,DWORD[12+rdi] - mov DWORD[36+rdi],eax - - cmp ecx,7 - je NEAR $L$12break - add ecx,1 - - xor eax,DWORD[16+rdi] - mov DWORD[40+rdi],eax - xor eax,DWORD[20+rdi] - mov DWORD[44+rdi],eax - - lea rdi,[24+rdi] - jmp NEAR $L$12loop -$L$12break: - mov DWORD[72+rdi],12 - xor rax,rax - jmp NEAR $L$exit - -$L$14rounds: - mov rax,QWORD[rsi] - mov rbx,QWORD[8+rsi] - mov rcx,QWORD[16+rsi] - mov rdx,QWORD[24+rsi] - mov QWORD[rdi],rax - mov QWORD[8+rdi],rbx - mov QWORD[16+rdi],rcx - mov QWORD[24+rdi],rdx - - shr rdx,32 - xor ecx,ecx - jmp NEAR $L$14shortcut -ALIGN 4 -$L$14loop: - mov eax,DWORD[rdi] - mov edx,DWORD[28+rdi] -$L$14shortcut: - movzx esi,dl - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,24 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shr edx,16 - movzx esi,dl - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,8 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shl ebx,16 - xor eax,ebx - - xor eax,DWORD[((1024-128))+rcx*4+rbp] - mov DWORD[32+rdi],eax - xor eax,DWORD[4+rdi] - mov DWORD[36+rdi],eax - xor eax,DWORD[8+rdi] - mov DWORD[40+rdi],eax - xor eax,DWORD[12+rdi] - mov DWORD[44+rdi],eax - - cmp ecx,6 - je NEAR $L$14break - add ecx,1 - - mov edx,eax - mov eax,DWORD[16+rdi] - movzx esi,dl - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shr edx,16 - shl ebx,8 - movzx esi,dl - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - movzx esi,dh - shl ebx,16 - xor eax,ebx - - movzx ebx,BYTE[((-128))+rsi*1+rbp] - shl ebx,24 - xor eax,ebx - - mov DWORD[48+rdi],eax - xor eax,DWORD[20+rdi] - mov DWORD[52+rdi],eax - xor eax,DWORD[24+rdi] - mov DWORD[56+rdi],eax - xor eax,DWORD[28+rdi] - mov DWORD[60+rdi],eax - - lea rdi,[32+rdi] - jmp NEAR $L$14loop -$L$14break: - mov DWORD[48+rdi],14 - xor rax,rax - jmp NEAR $L$exit - -$L$badpointer: - mov rax,-1 -$L$exit: -DB 0xf3,0xc3 - - -ALIGN 16 -global aes_nohw_set_decrypt_key - -aes_nohw_set_decrypt_key: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_nohw_set_decrypt_key: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - push rdx - -$L$dec_key_prologue: - - call _x86_64_AES_set_encrypt_key - mov r8,QWORD[rsp] - cmp eax,0 - jne NEAR $L$abort - - mov r14d,DWORD[240+r8] - xor rdi,rdi - lea rcx,[r14*4+rdi] - mov rsi,r8 - lea rdi,[rcx*4+r8] -ALIGN 4 -$L$invert: - mov rax,QWORD[rsi] - mov rbx,QWORD[8+rsi] - mov rcx,QWORD[rdi] - mov rdx,QWORD[8+rdi] - mov QWORD[rdi],rax - mov QWORD[8+rdi],rbx - mov QWORD[rsi],rcx - mov QWORD[8+rsi],rdx - lea rsi,[16+rsi] - lea rdi,[((-16))+rdi] - cmp rdi,rsi - jne NEAR $L$invert - - lea rax,[(($L$AES_Te+2048+1024))] - - mov rsi,QWORD[40+rax] - mov rdi,QWORD[48+rax] - mov rbp,QWORD[56+rax] - - mov r15,r8 - sub r14d,1 -ALIGN 4 -$L$permute: - lea r15,[16+r15] - mov rax,QWORD[r15] - mov rcx,QWORD[8+r15] - mov r9,rsi - mov r12,rsi - and r9,rax - and r12,rcx - mov rbx,r9 - mov rdx,r12 - shr r9,7 - lea r8,[rax*1+rax] - shr r12,7 - lea r11,[rcx*1+rcx] - sub rbx,r9 - sub rdx,r12 - and r8,rdi - and r11,rdi - and rbx,rbp - and rdx,rbp - xor r8,rbx - xor r11,rdx - mov r10,rsi - mov r13,rsi - - and r10,r8 - and r13,r11 - mov rbx,r10 - mov rdx,r13 - shr r10,7 - lea r9,[r8*1+r8] - shr r13,7 - lea r12,[r11*1+r11] - sub rbx,r10 - sub rdx,r13 - and r9,rdi - and r12,rdi - and rbx,rbp - and rdx,rbp - xor r9,rbx - xor r12,rdx - mov r10,rsi - mov r13,rsi - - and r10,r9 - and r13,r12 - mov rbx,r10 - mov rdx,r13 - shr r10,7 - xor r8,rax - shr r13,7 - xor r11,rcx - sub rbx,r10 - sub rdx,r13 - lea r10,[r9*1+r9] - lea r13,[r12*1+r12] - xor r9,rax - xor r12,rcx - and r10,rdi - and r13,rdi - and rbx,rbp - and rdx,rbp - xor r10,rbx - xor r13,rdx - - xor rax,r10 - xor rcx,r13 - xor r8,r10 - xor r11,r13 - mov rbx,rax - mov rdx,rcx - xor r9,r10 - shr rbx,32 - xor r12,r13 - shr rdx,32 - xor r10,r8 - rol eax,8 - xor r13,r11 - rol ecx,8 - xor r10,r9 - rol ebx,8 - xor r13,r12 - - rol edx,8 - xor eax,r10d - shr r10,32 - xor ecx,r13d - shr r13,32 - xor ebx,r10d - xor edx,r13d - - mov r10,r8 - rol r8d,24 - mov r13,r11 - rol r11d,24 - shr r10,32 - xor eax,r8d - shr r13,32 - xor ecx,r11d - rol r10d,24 - mov r8,r9 - rol r13d,24 - mov r11,r12 - shr r8,32 - xor ebx,r10d - shr r11,32 - xor edx,r13d - - - rol r9d,16 - - rol r12d,16 - - rol r8d,16 - - xor eax,r9d - rol r11d,16 - xor ecx,r12d - - xor ebx,r8d - xor edx,r11d - mov DWORD[r15],eax - mov DWORD[4+r15],ebx - mov DWORD[8+r15],ecx - mov DWORD[12+r15],edx - sub r14d,1 - jnz NEAR $L$permute - - xor rax,rax -$L$abort: - mov r15,QWORD[8+rsp] - - mov r14,QWORD[16+rsp] - - mov r13,QWORD[24+rsp] - - mov r12,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - mov rbx,QWORD[48+rsp] - - add rsp,56 - -$L$dec_key_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_nohw_set_decrypt_key: -ALIGN 16 -global aes_nohw_cbc_encrypt - -EXTERN OPENSSL_ia32cap_P - -aes_nohw_cbc_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_nohw_cbc_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - cmp rdx,0 - je NEAR $L$cbc_epilogue - pushfq - - - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$cbc_prologue: - - cld - mov r9d,r9d - - lea r14,[$L$AES_Te] - lea r10,[$L$AES_Td] - cmp r9,0 - cmove r14,r10 - - - lea r10,[OPENSSL_ia32cap_P] - mov r10d,DWORD[r10] - cmp rdx,512 - jb NEAR $L$cbc_slow_prologue - test rdx,15 - jnz NEAR $L$cbc_slow_prologue - bt r10d,28 - jc NEAR $L$cbc_slow_prologue - - - lea r15,[((-88-248))+rsp] - and r15,-64 - - - mov r10,r14 - lea r11,[2304+r14] - mov r12,r15 - and r10,0xFFF - and r11,0xFFF - and r12,0xFFF - - cmp r12,r11 - jb NEAR $L$cbc_te_break_out - sub r12,r11 - sub r15,r12 - jmp NEAR $L$cbc_te_ok -$L$cbc_te_break_out: - sub r12,r10 - and r12,0xFFF - add r12,320 - sub r15,r12 -ALIGN 4 -$L$cbc_te_ok: - - xchg r15,rsp - - - mov QWORD[16+rsp],r15 - -$L$cbc_fast_body: - mov QWORD[24+rsp],rdi - mov QWORD[32+rsp],rsi - mov QWORD[40+rsp],rdx - mov QWORD[48+rsp],rcx - mov QWORD[56+rsp],r8 - mov DWORD[((80+240))+rsp],0 - mov rbp,r8 - mov rbx,r9 - mov r9,rsi - mov r8,rdi - mov r15,rcx - - mov eax,DWORD[240+r15] - - mov r10,r15 - sub r10,r14 - and r10,0xfff - cmp r10,2304 - jb NEAR $L$cbc_do_ecopy - cmp r10,4096-248 - jb NEAR $L$cbc_skip_ecopy -ALIGN 4 -$L$cbc_do_ecopy: - mov rsi,r15 - lea rdi,[80+rsp] - lea r15,[80+rsp] - mov ecx,240/8 - DD 0x90A548F3 - mov DWORD[rdi],eax -$L$cbc_skip_ecopy: - mov QWORD[rsp],r15 - - mov ecx,18 -ALIGN 4 -$L$cbc_prefetch_te: - mov r10,QWORD[r14] - mov r11,QWORD[32+r14] - mov r12,QWORD[64+r14] - mov r13,QWORD[96+r14] - lea r14,[128+r14] - sub ecx,1 - jnz NEAR $L$cbc_prefetch_te - lea r14,[((-2304))+r14] - - cmp rbx,0 - je NEAR $L$FAST_DECRYPT - - - mov eax,DWORD[rbp] - mov ebx,DWORD[4+rbp] - mov ecx,DWORD[8+rbp] - mov edx,DWORD[12+rbp] - -ALIGN 4 -$L$cbc_fast_enc_loop: - xor eax,DWORD[r8] - xor ebx,DWORD[4+r8] - xor ecx,DWORD[8+r8] - xor edx,DWORD[12+r8] - mov r15,QWORD[rsp] - mov QWORD[24+rsp],r8 - - call _x86_64_AES_encrypt - - mov r8,QWORD[24+rsp] - mov r10,QWORD[40+rsp] - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - lea r8,[16+r8] - lea r9,[16+r9] - sub r10,16 - test r10,-16 - mov QWORD[40+rsp],r10 - jnz NEAR $L$cbc_fast_enc_loop - mov rbp,QWORD[56+rsp] - mov DWORD[rbp],eax - mov DWORD[4+rbp],ebx - mov DWORD[8+rbp],ecx - mov DWORD[12+rbp],edx - - jmp NEAR $L$cbc_fast_cleanup - - -ALIGN 16 -$L$FAST_DECRYPT: - cmp r9,r8 - je NEAR $L$cbc_fast_dec_in_place - - mov QWORD[64+rsp],rbp -ALIGN 4 -$L$cbc_fast_dec_loop: - mov eax,DWORD[r8] - mov ebx,DWORD[4+r8] - mov ecx,DWORD[8+r8] - mov edx,DWORD[12+r8] - mov r15,QWORD[rsp] - mov QWORD[24+rsp],r8 - - call _x86_64_AES_decrypt - - mov rbp,QWORD[64+rsp] - mov r8,QWORD[24+rsp] - mov r10,QWORD[40+rsp] - xor eax,DWORD[rbp] - xor ebx,DWORD[4+rbp] - xor ecx,DWORD[8+rbp] - xor edx,DWORD[12+rbp] - mov rbp,r8 - - sub r10,16 - mov QWORD[40+rsp],r10 - mov QWORD[64+rsp],rbp - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - lea r8,[16+r8] - lea r9,[16+r9] - jnz NEAR $L$cbc_fast_dec_loop - mov r12,QWORD[56+rsp] - mov r10,QWORD[rbp] - mov r11,QWORD[8+rbp] - mov QWORD[r12],r10 - mov QWORD[8+r12],r11 - jmp NEAR $L$cbc_fast_cleanup - -ALIGN 16 -$L$cbc_fast_dec_in_place: - mov r10,QWORD[rbp] - mov r11,QWORD[8+rbp] - mov QWORD[((0+64))+rsp],r10 - mov QWORD[((8+64))+rsp],r11 -ALIGN 4 -$L$cbc_fast_dec_in_place_loop: - mov eax,DWORD[r8] - mov ebx,DWORD[4+r8] - mov ecx,DWORD[8+r8] - mov edx,DWORD[12+r8] - mov r15,QWORD[rsp] - mov QWORD[24+rsp],r8 - - call _x86_64_AES_decrypt - - mov r8,QWORD[24+rsp] - mov r10,QWORD[40+rsp] - xor eax,DWORD[((0+64))+rsp] - xor ebx,DWORD[((4+64))+rsp] - xor ecx,DWORD[((8+64))+rsp] - xor edx,DWORD[((12+64))+rsp] - - mov r11,QWORD[r8] - mov r12,QWORD[8+r8] - sub r10,16 - jz NEAR $L$cbc_fast_dec_in_place_done - - mov QWORD[((0+64))+rsp],r11 - mov QWORD[((8+64))+rsp],r12 - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - lea r8,[16+r8] - lea r9,[16+r9] - mov QWORD[40+rsp],r10 - jmp NEAR $L$cbc_fast_dec_in_place_loop -$L$cbc_fast_dec_in_place_done: - mov rdi,QWORD[56+rsp] - mov QWORD[rdi],r11 - mov QWORD[8+rdi],r12 - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - -ALIGN 4 -$L$cbc_fast_cleanup: - cmp DWORD[((80+240))+rsp],0 - lea rdi,[80+rsp] - je NEAR $L$cbc_exit - mov ecx,240/8 - xor rax,rax - DD 0x90AB48F3 - - jmp NEAR $L$cbc_exit - - -ALIGN 16 -$L$cbc_slow_prologue: - - - lea rbp,[((-88))+rsp] - and rbp,-64 - - lea r10,[((-88-63))+rcx] - sub r10,rbp - neg r10 - and r10,0x3c0 - sub rbp,r10 - - xchg rbp,rsp - - - mov QWORD[16+rsp],rbp - -$L$cbc_slow_body: - - - - - mov QWORD[56+rsp],r8 - mov rbp,r8 - mov rbx,r9 - mov r9,rsi - mov r8,rdi - mov r15,rcx - mov r10,rdx - - mov eax,DWORD[240+r15] - mov QWORD[rsp],r15 - shl eax,4 - lea rax,[rax*1+r15] - mov QWORD[8+rsp],rax - - - lea r14,[2048+r14] - lea rax,[((768-8))+rsp] - sub rax,r14 - and rax,0x300 - lea r14,[rax*1+r14] - - cmp rbx,0 - je NEAR $L$SLOW_DECRYPT - - - test r10,-16 - mov eax,DWORD[rbp] - mov ebx,DWORD[4+rbp] - mov ecx,DWORD[8+rbp] - mov edx,DWORD[12+rbp] - jz NEAR $L$cbc_slow_enc_tail - -ALIGN 4 -$L$cbc_slow_enc_loop: - xor eax,DWORD[r8] - xor ebx,DWORD[4+r8] - xor ecx,DWORD[8+r8] - xor edx,DWORD[12+r8] - mov r15,QWORD[rsp] - mov QWORD[24+rsp],r8 - mov QWORD[32+rsp],r9 - mov QWORD[40+rsp],r10 - - call _x86_64_AES_encrypt_compact - - mov r8,QWORD[24+rsp] - mov r9,QWORD[32+rsp] - mov r10,QWORD[40+rsp] - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - lea r8,[16+r8] - lea r9,[16+r9] - sub r10,16 - test r10,-16 - jnz NEAR $L$cbc_slow_enc_loop - test r10,15 - jnz NEAR $L$cbc_slow_enc_tail - mov rbp,QWORD[56+rsp] - mov DWORD[rbp],eax - mov DWORD[4+rbp],ebx - mov DWORD[8+rbp],ecx - mov DWORD[12+rbp],edx - - jmp NEAR $L$cbc_exit - -ALIGN 4 -$L$cbc_slow_enc_tail: - mov r11,rax - mov r12,rcx - mov rcx,r10 - mov rsi,r8 - mov rdi,r9 - DD 0x9066A4F3 - mov rcx,16 - sub rcx,r10 - xor rax,rax - DD 0x9066AAF3 - mov r8,r9 - mov r10,16 - mov rax,r11 - mov rcx,r12 - jmp NEAR $L$cbc_slow_enc_loop - -ALIGN 16 -$L$SLOW_DECRYPT: - shr rax,3 - add r14,rax - - mov r11,QWORD[rbp] - mov r12,QWORD[8+rbp] - mov QWORD[((0+64))+rsp],r11 - mov QWORD[((8+64))+rsp],r12 - -ALIGN 4 -$L$cbc_slow_dec_loop: - mov eax,DWORD[r8] - mov ebx,DWORD[4+r8] - mov ecx,DWORD[8+r8] - mov edx,DWORD[12+r8] - mov r15,QWORD[rsp] - mov QWORD[24+rsp],r8 - mov QWORD[32+rsp],r9 - mov QWORD[40+rsp],r10 - - call _x86_64_AES_decrypt_compact - - mov r8,QWORD[24+rsp] - mov r9,QWORD[32+rsp] - mov r10,QWORD[40+rsp] - xor eax,DWORD[((0+64))+rsp] - xor ebx,DWORD[((4+64))+rsp] - xor ecx,DWORD[((8+64))+rsp] - xor edx,DWORD[((12+64))+rsp] - - mov r11,QWORD[r8] - mov r12,QWORD[8+r8] - sub r10,16 - jc NEAR $L$cbc_slow_dec_partial - jz NEAR $L$cbc_slow_dec_done - - mov QWORD[((0+64))+rsp],r11 - mov QWORD[((8+64))+rsp],r12 - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - lea r8,[16+r8] - lea r9,[16+r9] - jmp NEAR $L$cbc_slow_dec_loop -$L$cbc_slow_dec_done: - mov rdi,QWORD[56+rsp] - mov QWORD[rdi],r11 - mov QWORD[8+rdi],r12 - - mov DWORD[r9],eax - mov DWORD[4+r9],ebx - mov DWORD[8+r9],ecx - mov DWORD[12+r9],edx - - jmp NEAR $L$cbc_exit - -ALIGN 4 -$L$cbc_slow_dec_partial: - mov rdi,QWORD[56+rsp] - mov QWORD[rdi],r11 - mov QWORD[8+rdi],r12 - - mov DWORD[((0+64))+rsp],eax - mov DWORD[((4+64))+rsp],ebx - mov DWORD[((8+64))+rsp],ecx - mov DWORD[((12+64))+rsp],edx - - mov rdi,r9 - lea rsi,[64+rsp] - lea rcx,[16+r10] - DD 0x9066A4F3 - jmp NEAR $L$cbc_exit - -ALIGN 16 -$L$cbc_exit: - mov rsi,QWORD[16+rsp] - - mov r15,QWORD[rsi] - - mov r14,QWORD[8+rsi] - - mov r13,QWORD[16+rsi] - - mov r12,QWORD[24+rsi] - - mov rbp,QWORD[32+rsi] - - mov rbx,QWORD[40+rsi] - - lea rsp,[48+rsi] - -$L$cbc_popfq: - popfq - - - -$L$cbc_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_nohw_cbc_encrypt: -ALIGN 64 -$L$AES_Te: - DD 0xa56363c6,0xa56363c6 - DD 0x847c7cf8,0x847c7cf8 - DD 0x997777ee,0x997777ee - DD 0x8d7b7bf6,0x8d7b7bf6 - DD 0x0df2f2ff,0x0df2f2ff - DD 0xbd6b6bd6,0xbd6b6bd6 - DD 0xb16f6fde,0xb16f6fde - DD 0x54c5c591,0x54c5c591 - DD 0x50303060,0x50303060 - DD 0x03010102,0x03010102 - DD 0xa96767ce,0xa96767ce - DD 0x7d2b2b56,0x7d2b2b56 - DD 0x19fefee7,0x19fefee7 - DD 0x62d7d7b5,0x62d7d7b5 - DD 0xe6abab4d,0xe6abab4d - DD 0x9a7676ec,0x9a7676ec - DD 0x45caca8f,0x45caca8f - DD 0x9d82821f,0x9d82821f - DD 0x40c9c989,0x40c9c989 - DD 0x877d7dfa,0x877d7dfa - DD 0x15fafaef,0x15fafaef - DD 0xeb5959b2,0xeb5959b2 - DD 0xc947478e,0xc947478e - DD 0x0bf0f0fb,0x0bf0f0fb - DD 0xecadad41,0xecadad41 - DD 0x67d4d4b3,0x67d4d4b3 - DD 0xfda2a25f,0xfda2a25f - DD 0xeaafaf45,0xeaafaf45 - DD 0xbf9c9c23,0xbf9c9c23 - DD 0xf7a4a453,0xf7a4a453 - DD 0x967272e4,0x967272e4 - DD 0x5bc0c09b,0x5bc0c09b - DD 0xc2b7b775,0xc2b7b775 - DD 0x1cfdfde1,0x1cfdfde1 - DD 0xae93933d,0xae93933d - DD 0x6a26264c,0x6a26264c - DD 0x5a36366c,0x5a36366c - DD 0x413f3f7e,0x413f3f7e - DD 0x02f7f7f5,0x02f7f7f5 - DD 0x4fcccc83,0x4fcccc83 - DD 0x5c343468,0x5c343468 - DD 0xf4a5a551,0xf4a5a551 - DD 0x34e5e5d1,0x34e5e5d1 - DD 0x08f1f1f9,0x08f1f1f9 - DD 0x937171e2,0x937171e2 - DD 0x73d8d8ab,0x73d8d8ab - DD 0x53313162,0x53313162 - DD 0x3f15152a,0x3f15152a - DD 0x0c040408,0x0c040408 - DD 0x52c7c795,0x52c7c795 - DD 0x65232346,0x65232346 - DD 0x5ec3c39d,0x5ec3c39d - DD 0x28181830,0x28181830 - DD 0xa1969637,0xa1969637 - DD 0x0f05050a,0x0f05050a - DD 0xb59a9a2f,0xb59a9a2f - DD 0x0907070e,0x0907070e - DD 0x36121224,0x36121224 - DD 0x9b80801b,0x9b80801b - DD 0x3de2e2df,0x3de2e2df - DD 0x26ebebcd,0x26ebebcd - DD 0x6927274e,0x6927274e - DD 0xcdb2b27f,0xcdb2b27f - DD 0x9f7575ea,0x9f7575ea - DD 0x1b090912,0x1b090912 - DD 0x9e83831d,0x9e83831d - DD 0x742c2c58,0x742c2c58 - DD 0x2e1a1a34,0x2e1a1a34 - DD 0x2d1b1b36,0x2d1b1b36 - DD 0xb26e6edc,0xb26e6edc - DD 0xee5a5ab4,0xee5a5ab4 - DD 0xfba0a05b,0xfba0a05b - DD 0xf65252a4,0xf65252a4 - DD 0x4d3b3b76,0x4d3b3b76 - DD 0x61d6d6b7,0x61d6d6b7 - DD 0xceb3b37d,0xceb3b37d - DD 0x7b292952,0x7b292952 - DD 0x3ee3e3dd,0x3ee3e3dd - DD 0x712f2f5e,0x712f2f5e - DD 0x97848413,0x97848413 - DD 0xf55353a6,0xf55353a6 - DD 0x68d1d1b9,0x68d1d1b9 - DD 0x00000000,0x00000000 - DD 0x2cededc1,0x2cededc1 - DD 0x60202040,0x60202040 - DD 0x1ffcfce3,0x1ffcfce3 - DD 0xc8b1b179,0xc8b1b179 - DD 0xed5b5bb6,0xed5b5bb6 - DD 0xbe6a6ad4,0xbe6a6ad4 - DD 0x46cbcb8d,0x46cbcb8d - DD 0xd9bebe67,0xd9bebe67 - DD 0x4b393972,0x4b393972 - DD 0xde4a4a94,0xde4a4a94 - DD 0xd44c4c98,0xd44c4c98 - DD 0xe85858b0,0xe85858b0 - DD 0x4acfcf85,0x4acfcf85 - DD 0x6bd0d0bb,0x6bd0d0bb - DD 0x2aefefc5,0x2aefefc5 - DD 0xe5aaaa4f,0xe5aaaa4f - DD 0x16fbfbed,0x16fbfbed - DD 0xc5434386,0xc5434386 - DD 0xd74d4d9a,0xd74d4d9a - DD 0x55333366,0x55333366 - DD 0x94858511,0x94858511 - DD 0xcf45458a,0xcf45458a - DD 0x10f9f9e9,0x10f9f9e9 - DD 0x06020204,0x06020204 - DD 0x817f7ffe,0x817f7ffe - DD 0xf05050a0,0xf05050a0 - DD 0x443c3c78,0x443c3c78 - DD 0xba9f9f25,0xba9f9f25 - DD 0xe3a8a84b,0xe3a8a84b - DD 0xf35151a2,0xf35151a2 - DD 0xfea3a35d,0xfea3a35d - DD 0xc0404080,0xc0404080 - DD 0x8a8f8f05,0x8a8f8f05 - DD 0xad92923f,0xad92923f - DD 0xbc9d9d21,0xbc9d9d21 - DD 0x48383870,0x48383870 - DD 0x04f5f5f1,0x04f5f5f1 - DD 0xdfbcbc63,0xdfbcbc63 - DD 0xc1b6b677,0xc1b6b677 - DD 0x75dadaaf,0x75dadaaf - DD 0x63212142,0x63212142 - DD 0x30101020,0x30101020 - DD 0x1affffe5,0x1affffe5 - DD 0x0ef3f3fd,0x0ef3f3fd - DD 0x6dd2d2bf,0x6dd2d2bf - DD 0x4ccdcd81,0x4ccdcd81 - DD 0x140c0c18,0x140c0c18 - DD 0x35131326,0x35131326 - DD 0x2fececc3,0x2fececc3 - DD 0xe15f5fbe,0xe15f5fbe - DD 0xa2979735,0xa2979735 - DD 0xcc444488,0xcc444488 - DD 0x3917172e,0x3917172e - DD 0x57c4c493,0x57c4c493 - DD 0xf2a7a755,0xf2a7a755 - DD 0x827e7efc,0x827e7efc - DD 0x473d3d7a,0x473d3d7a - DD 0xac6464c8,0xac6464c8 - DD 0xe75d5dba,0xe75d5dba - DD 0x2b191932,0x2b191932 - DD 0x957373e6,0x957373e6 - DD 0xa06060c0,0xa06060c0 - DD 0x98818119,0x98818119 - DD 0xd14f4f9e,0xd14f4f9e - DD 0x7fdcdca3,0x7fdcdca3 - DD 0x66222244,0x66222244 - DD 0x7e2a2a54,0x7e2a2a54 - DD 0xab90903b,0xab90903b - DD 0x8388880b,0x8388880b - DD 0xca46468c,0xca46468c - DD 0x29eeeec7,0x29eeeec7 - DD 0xd3b8b86b,0xd3b8b86b - DD 0x3c141428,0x3c141428 - DD 0x79dedea7,0x79dedea7 - DD 0xe25e5ebc,0xe25e5ebc - DD 0x1d0b0b16,0x1d0b0b16 - DD 0x76dbdbad,0x76dbdbad - DD 0x3be0e0db,0x3be0e0db - DD 0x56323264,0x56323264 - DD 0x4e3a3a74,0x4e3a3a74 - DD 0x1e0a0a14,0x1e0a0a14 - DD 0xdb494992,0xdb494992 - DD 0x0a06060c,0x0a06060c - DD 0x6c242448,0x6c242448 - DD 0xe45c5cb8,0xe45c5cb8 - DD 0x5dc2c29f,0x5dc2c29f - DD 0x6ed3d3bd,0x6ed3d3bd - DD 0xefacac43,0xefacac43 - DD 0xa66262c4,0xa66262c4 - DD 0xa8919139,0xa8919139 - DD 0xa4959531,0xa4959531 - DD 0x37e4e4d3,0x37e4e4d3 - DD 0x8b7979f2,0x8b7979f2 - DD 0x32e7e7d5,0x32e7e7d5 - DD 0x43c8c88b,0x43c8c88b - DD 0x5937376e,0x5937376e - DD 0xb76d6dda,0xb76d6dda - DD 0x8c8d8d01,0x8c8d8d01 - DD 0x64d5d5b1,0x64d5d5b1 - DD 0xd24e4e9c,0xd24e4e9c - DD 0xe0a9a949,0xe0a9a949 - DD 0xb46c6cd8,0xb46c6cd8 - DD 0xfa5656ac,0xfa5656ac - DD 0x07f4f4f3,0x07f4f4f3 - DD 0x25eaeacf,0x25eaeacf - DD 0xaf6565ca,0xaf6565ca - DD 0x8e7a7af4,0x8e7a7af4 - DD 0xe9aeae47,0xe9aeae47 - DD 0x18080810,0x18080810 - DD 0xd5baba6f,0xd5baba6f - DD 0x887878f0,0x887878f0 - DD 0x6f25254a,0x6f25254a - DD 0x722e2e5c,0x722e2e5c - DD 0x241c1c38,0x241c1c38 - DD 0xf1a6a657,0xf1a6a657 - DD 0xc7b4b473,0xc7b4b473 - DD 0x51c6c697,0x51c6c697 - DD 0x23e8e8cb,0x23e8e8cb - DD 0x7cdddda1,0x7cdddda1 - DD 0x9c7474e8,0x9c7474e8 - DD 0x211f1f3e,0x211f1f3e - DD 0xdd4b4b96,0xdd4b4b96 - DD 0xdcbdbd61,0xdcbdbd61 - DD 0x868b8b0d,0x868b8b0d - DD 0x858a8a0f,0x858a8a0f - DD 0x907070e0,0x907070e0 - DD 0x423e3e7c,0x423e3e7c - DD 0xc4b5b571,0xc4b5b571 - DD 0xaa6666cc,0xaa6666cc - DD 0xd8484890,0xd8484890 - DD 0x05030306,0x05030306 - DD 0x01f6f6f7,0x01f6f6f7 - DD 0x120e0e1c,0x120e0e1c - DD 0xa36161c2,0xa36161c2 - DD 0x5f35356a,0x5f35356a - DD 0xf95757ae,0xf95757ae - DD 0xd0b9b969,0xd0b9b969 - DD 0x91868617,0x91868617 - DD 0x58c1c199,0x58c1c199 - DD 0x271d1d3a,0x271d1d3a - DD 0xb99e9e27,0xb99e9e27 - DD 0x38e1e1d9,0x38e1e1d9 - DD 0x13f8f8eb,0x13f8f8eb - DD 0xb398982b,0xb398982b - DD 0x33111122,0x33111122 - DD 0xbb6969d2,0xbb6969d2 - DD 0x70d9d9a9,0x70d9d9a9 - DD 0x898e8e07,0x898e8e07 - DD 0xa7949433,0xa7949433 - DD 0xb69b9b2d,0xb69b9b2d - DD 0x221e1e3c,0x221e1e3c - DD 0x92878715,0x92878715 - DD 0x20e9e9c9,0x20e9e9c9 - DD 0x49cece87,0x49cece87 - DD 0xff5555aa,0xff5555aa - DD 0x78282850,0x78282850 - DD 0x7adfdfa5,0x7adfdfa5 - DD 0x8f8c8c03,0x8f8c8c03 - DD 0xf8a1a159,0xf8a1a159 - DD 0x80898909,0x80898909 - DD 0x170d0d1a,0x170d0d1a - DD 0xdabfbf65,0xdabfbf65 - DD 0x31e6e6d7,0x31e6e6d7 - DD 0xc6424284,0xc6424284 - DD 0xb86868d0,0xb86868d0 - DD 0xc3414182,0xc3414182 - DD 0xb0999929,0xb0999929 - DD 0x772d2d5a,0x772d2d5a - DD 0x110f0f1e,0x110f0f1e - DD 0xcbb0b07b,0xcbb0b07b - DD 0xfc5454a8,0xfc5454a8 - DD 0xd6bbbb6d,0xd6bbbb6d - DD 0x3a16162c,0x3a16162c -DB 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -DB 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -DB 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -DB 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -DB 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -DB 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -DB 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -DB 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -DB 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -DB 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -DB 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -DB 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -DB 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -DB 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -DB 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -DB 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -DB 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -DB 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -DB 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -DB 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -DB 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -DB 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -DB 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -DB 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -DB 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -DB 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -DB 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -DB 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -DB 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -DB 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -DB 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -DB 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -DB 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -DB 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -DB 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -DB 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -DB 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -DB 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -DB 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -DB 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -DB 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -DB 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -DB 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -DB 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -DB 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -DB 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -DB 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -DB 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -DB 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -DB 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -DB 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -DB 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -DB 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -DB 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -DB 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -DB 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -DB 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -DB 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -DB 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -DB 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -DB 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -DB 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -DB 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -DB 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -DB 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -DB 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -DB 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -DB 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -DB 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -DB 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -DB 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -DB 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -DB 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -DB 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -DB 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -DB 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -DB 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -DB 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -DB 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -DB 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -DB 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -DB 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -DB 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -DB 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -DB 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -DB 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -DB 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -DB 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -DB 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -DB 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -DB 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -DB 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -DB 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -DB 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -DB 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -DB 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 -DB 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5 -DB 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76 -DB 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0 -DB 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0 -DB 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc -DB 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15 -DB 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a -DB 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75 -DB 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0 -DB 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84 -DB 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b -DB 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf -DB 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85 -DB 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8 -DB 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5 -DB 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2 -DB 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17 -DB 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73 -DB 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88 -DB 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb -DB 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c -DB 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79 -DB 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9 -DB 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08 -DB 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6 -DB 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a -DB 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e -DB 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e -DB 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94 -DB 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf -DB 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68 -DB 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 - DD 0x00000001,0x00000002,0x00000004,0x00000008 - DD 0x00000010,0x00000020,0x00000040,0x00000080 - DD 0x0000001b,0x00000036,0x80808080,0x80808080 - DD 0xfefefefe,0xfefefefe,0x1b1b1b1b,0x1b1b1b1b -ALIGN 64 -$L$AES_Td: - DD 0x50a7f451,0x50a7f451 - DD 0x5365417e,0x5365417e - DD 0xc3a4171a,0xc3a4171a - DD 0x965e273a,0x965e273a - DD 0xcb6bab3b,0xcb6bab3b - DD 0xf1459d1f,0xf1459d1f - DD 0xab58faac,0xab58faac - DD 0x9303e34b,0x9303e34b - DD 0x55fa3020,0x55fa3020 - DD 0xf66d76ad,0xf66d76ad - DD 0x9176cc88,0x9176cc88 - DD 0x254c02f5,0x254c02f5 - DD 0xfcd7e54f,0xfcd7e54f - DD 0xd7cb2ac5,0xd7cb2ac5 - DD 0x80443526,0x80443526 - DD 0x8fa362b5,0x8fa362b5 - DD 0x495ab1de,0x495ab1de - DD 0x671bba25,0x671bba25 - DD 0x980eea45,0x980eea45 - DD 0xe1c0fe5d,0xe1c0fe5d - DD 0x02752fc3,0x02752fc3 - DD 0x12f04c81,0x12f04c81 - DD 0xa397468d,0xa397468d - DD 0xc6f9d36b,0xc6f9d36b - DD 0xe75f8f03,0xe75f8f03 - DD 0x959c9215,0x959c9215 - DD 0xeb7a6dbf,0xeb7a6dbf - DD 0xda595295,0xda595295 - DD 0x2d83bed4,0x2d83bed4 - DD 0xd3217458,0xd3217458 - DD 0x2969e049,0x2969e049 - DD 0x44c8c98e,0x44c8c98e - DD 0x6a89c275,0x6a89c275 - DD 0x78798ef4,0x78798ef4 - DD 0x6b3e5899,0x6b3e5899 - DD 0xdd71b927,0xdd71b927 - DD 0xb64fe1be,0xb64fe1be - DD 0x17ad88f0,0x17ad88f0 - DD 0x66ac20c9,0x66ac20c9 - DD 0xb43ace7d,0xb43ace7d - DD 0x184adf63,0x184adf63 - DD 0x82311ae5,0x82311ae5 - DD 0x60335197,0x60335197 - DD 0x457f5362,0x457f5362 - DD 0xe07764b1,0xe07764b1 - DD 0x84ae6bbb,0x84ae6bbb - DD 0x1ca081fe,0x1ca081fe - DD 0x942b08f9,0x942b08f9 - DD 0x58684870,0x58684870 - DD 0x19fd458f,0x19fd458f - DD 0x876cde94,0x876cde94 - DD 0xb7f87b52,0xb7f87b52 - DD 0x23d373ab,0x23d373ab - DD 0xe2024b72,0xe2024b72 - DD 0x578f1fe3,0x578f1fe3 - DD 0x2aab5566,0x2aab5566 - DD 0x0728ebb2,0x0728ebb2 - DD 0x03c2b52f,0x03c2b52f - DD 0x9a7bc586,0x9a7bc586 - DD 0xa50837d3,0xa50837d3 - DD 0xf2872830,0xf2872830 - DD 0xb2a5bf23,0xb2a5bf23 - DD 0xba6a0302,0xba6a0302 - DD 0x5c8216ed,0x5c8216ed - DD 0x2b1ccf8a,0x2b1ccf8a - DD 0x92b479a7,0x92b479a7 - DD 0xf0f207f3,0xf0f207f3 - DD 0xa1e2694e,0xa1e2694e - DD 0xcdf4da65,0xcdf4da65 - DD 0xd5be0506,0xd5be0506 - DD 0x1f6234d1,0x1f6234d1 - DD 0x8afea6c4,0x8afea6c4 - DD 0x9d532e34,0x9d532e34 - DD 0xa055f3a2,0xa055f3a2 - DD 0x32e18a05,0x32e18a05 - DD 0x75ebf6a4,0x75ebf6a4 - DD 0x39ec830b,0x39ec830b - DD 0xaaef6040,0xaaef6040 - DD 0x069f715e,0x069f715e - DD 0x51106ebd,0x51106ebd - DD 0xf98a213e,0xf98a213e - DD 0x3d06dd96,0x3d06dd96 - DD 0xae053edd,0xae053edd - DD 0x46bde64d,0x46bde64d - DD 0xb58d5491,0xb58d5491 - DD 0x055dc471,0x055dc471 - DD 0x6fd40604,0x6fd40604 - DD 0xff155060,0xff155060 - DD 0x24fb9819,0x24fb9819 - DD 0x97e9bdd6,0x97e9bdd6 - DD 0xcc434089,0xcc434089 - DD 0x779ed967,0x779ed967 - DD 0xbd42e8b0,0xbd42e8b0 - DD 0x888b8907,0x888b8907 - DD 0x385b19e7,0x385b19e7 - DD 0xdbeec879,0xdbeec879 - DD 0x470a7ca1,0x470a7ca1 - DD 0xe90f427c,0xe90f427c - DD 0xc91e84f8,0xc91e84f8 - DD 0x00000000,0x00000000 - DD 0x83868009,0x83868009 - DD 0x48ed2b32,0x48ed2b32 - DD 0xac70111e,0xac70111e - DD 0x4e725a6c,0x4e725a6c - DD 0xfbff0efd,0xfbff0efd - DD 0x5638850f,0x5638850f - DD 0x1ed5ae3d,0x1ed5ae3d - DD 0x27392d36,0x27392d36 - DD 0x64d90f0a,0x64d90f0a - DD 0x21a65c68,0x21a65c68 - DD 0xd1545b9b,0xd1545b9b - DD 0x3a2e3624,0x3a2e3624 - DD 0xb1670a0c,0xb1670a0c - DD 0x0fe75793,0x0fe75793 - DD 0xd296eeb4,0xd296eeb4 - DD 0x9e919b1b,0x9e919b1b - DD 0x4fc5c080,0x4fc5c080 - DD 0xa220dc61,0xa220dc61 - DD 0x694b775a,0x694b775a - DD 0x161a121c,0x161a121c - DD 0x0aba93e2,0x0aba93e2 - DD 0xe52aa0c0,0xe52aa0c0 - DD 0x43e0223c,0x43e0223c - DD 0x1d171b12,0x1d171b12 - DD 0x0b0d090e,0x0b0d090e - DD 0xadc78bf2,0xadc78bf2 - DD 0xb9a8b62d,0xb9a8b62d - DD 0xc8a91e14,0xc8a91e14 - DD 0x8519f157,0x8519f157 - DD 0x4c0775af,0x4c0775af - DD 0xbbdd99ee,0xbbdd99ee - DD 0xfd607fa3,0xfd607fa3 - DD 0x9f2601f7,0x9f2601f7 - DD 0xbcf5725c,0xbcf5725c - DD 0xc53b6644,0xc53b6644 - DD 0x347efb5b,0x347efb5b - DD 0x7629438b,0x7629438b - DD 0xdcc623cb,0xdcc623cb - DD 0x68fcedb6,0x68fcedb6 - DD 0x63f1e4b8,0x63f1e4b8 - DD 0xcadc31d7,0xcadc31d7 - DD 0x10856342,0x10856342 - DD 0x40229713,0x40229713 - DD 0x2011c684,0x2011c684 - DD 0x7d244a85,0x7d244a85 - DD 0xf83dbbd2,0xf83dbbd2 - DD 0x1132f9ae,0x1132f9ae - DD 0x6da129c7,0x6da129c7 - DD 0x4b2f9e1d,0x4b2f9e1d - DD 0xf330b2dc,0xf330b2dc - DD 0xec52860d,0xec52860d - DD 0xd0e3c177,0xd0e3c177 - DD 0x6c16b32b,0x6c16b32b - DD 0x99b970a9,0x99b970a9 - DD 0xfa489411,0xfa489411 - DD 0x2264e947,0x2264e947 - DD 0xc48cfca8,0xc48cfca8 - DD 0x1a3ff0a0,0x1a3ff0a0 - DD 0xd82c7d56,0xd82c7d56 - DD 0xef903322,0xef903322 - DD 0xc74e4987,0xc74e4987 - DD 0xc1d138d9,0xc1d138d9 - DD 0xfea2ca8c,0xfea2ca8c - DD 0x360bd498,0x360bd498 - DD 0xcf81f5a6,0xcf81f5a6 - DD 0x28de7aa5,0x28de7aa5 - DD 0x268eb7da,0x268eb7da - DD 0xa4bfad3f,0xa4bfad3f - DD 0xe49d3a2c,0xe49d3a2c - DD 0x0d927850,0x0d927850 - DD 0x9bcc5f6a,0x9bcc5f6a - DD 0x62467e54,0x62467e54 - DD 0xc2138df6,0xc2138df6 - DD 0xe8b8d890,0xe8b8d890 - DD 0x5ef7392e,0x5ef7392e - DD 0xf5afc382,0xf5afc382 - DD 0xbe805d9f,0xbe805d9f - DD 0x7c93d069,0x7c93d069 - DD 0xa92dd56f,0xa92dd56f - DD 0xb31225cf,0xb31225cf - DD 0x3b99acc8,0x3b99acc8 - DD 0xa77d1810,0xa77d1810 - DD 0x6e639ce8,0x6e639ce8 - DD 0x7bbb3bdb,0x7bbb3bdb - DD 0x097826cd,0x097826cd - DD 0xf418596e,0xf418596e - DD 0x01b79aec,0x01b79aec - DD 0xa89a4f83,0xa89a4f83 - DD 0x656e95e6,0x656e95e6 - DD 0x7ee6ffaa,0x7ee6ffaa - DD 0x08cfbc21,0x08cfbc21 - DD 0xe6e815ef,0xe6e815ef - DD 0xd99be7ba,0xd99be7ba - DD 0xce366f4a,0xce366f4a - DD 0xd4099fea,0xd4099fea - DD 0xd67cb029,0xd67cb029 - DD 0xafb2a431,0xafb2a431 - DD 0x31233f2a,0x31233f2a - DD 0x3094a5c6,0x3094a5c6 - DD 0xc066a235,0xc066a235 - DD 0x37bc4e74,0x37bc4e74 - DD 0xa6ca82fc,0xa6ca82fc - DD 0xb0d090e0,0xb0d090e0 - DD 0x15d8a733,0x15d8a733 - DD 0x4a9804f1,0x4a9804f1 - DD 0xf7daec41,0xf7daec41 - DD 0x0e50cd7f,0x0e50cd7f - DD 0x2ff69117,0x2ff69117 - DD 0x8dd64d76,0x8dd64d76 - DD 0x4db0ef43,0x4db0ef43 - DD 0x544daacc,0x544daacc - DD 0xdf0496e4,0xdf0496e4 - DD 0xe3b5d19e,0xe3b5d19e - DD 0x1b886a4c,0x1b886a4c - DD 0xb81f2cc1,0xb81f2cc1 - DD 0x7f516546,0x7f516546 - DD 0x04ea5e9d,0x04ea5e9d - DD 0x5d358c01,0x5d358c01 - DD 0x737487fa,0x737487fa - DD 0x2e410bfb,0x2e410bfb - DD 0x5a1d67b3,0x5a1d67b3 - DD 0x52d2db92,0x52d2db92 - DD 0x335610e9,0x335610e9 - DD 0x1347d66d,0x1347d66d - DD 0x8c61d79a,0x8c61d79a - DD 0x7a0ca137,0x7a0ca137 - DD 0x8e14f859,0x8e14f859 - DD 0x893c13eb,0x893c13eb - DD 0xee27a9ce,0xee27a9ce - DD 0x35c961b7,0x35c961b7 - DD 0xede51ce1,0xede51ce1 - DD 0x3cb1477a,0x3cb1477a - DD 0x59dfd29c,0x59dfd29c - DD 0x3f73f255,0x3f73f255 - DD 0x79ce1418,0x79ce1418 - DD 0xbf37c773,0xbf37c773 - DD 0xeacdf753,0xeacdf753 - DD 0x5baafd5f,0x5baafd5f - DD 0x146f3ddf,0x146f3ddf - DD 0x86db4478,0x86db4478 - DD 0x81f3afca,0x81f3afca - DD 0x3ec468b9,0x3ec468b9 - DD 0x2c342438,0x2c342438 - DD 0x5f40a3c2,0x5f40a3c2 - DD 0x72c31d16,0x72c31d16 - DD 0x0c25e2bc,0x0c25e2bc - DD 0x8b493c28,0x8b493c28 - DD 0x41950dff,0x41950dff - DD 0x7101a839,0x7101a839 - DD 0xdeb30c08,0xdeb30c08 - DD 0x9ce4b4d8,0x9ce4b4d8 - DD 0x90c15664,0x90c15664 - DD 0x6184cb7b,0x6184cb7b - DD 0x70b632d5,0x70b632d5 - DD 0x745c6c48,0x745c6c48 - DD 0x4257b8d0,0x4257b8d0 -DB 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -DB 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -DB 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -DB 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -DB 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -DB 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -DB 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -DB 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -DB 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -DB 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -DB 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -DB 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -DB 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -DB 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -DB 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -DB 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -DB 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -DB 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -DB 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -DB 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -DB 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -DB 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -DB 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -DB 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -DB 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -DB 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -DB 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -DB 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -DB 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -DB 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -DB 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -DB 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d - DD 0x80808080,0x80808080,0xfefefefe,0xfefefefe - DD 0x1b1b1b1b,0x1b1b1b1b,0,0 -DB 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -DB 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -DB 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -DB 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -DB 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -DB 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -DB 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -DB 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -DB 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -DB 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -DB 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -DB 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -DB 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -DB 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -DB 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -DB 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -DB 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -DB 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -DB 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -DB 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -DB 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -DB 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -DB 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -DB 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -DB 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -DB 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -DB 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -DB 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -DB 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -DB 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -DB 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -DB 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d - DD 0x80808080,0x80808080,0xfefefefe,0xfefefefe - DD 0x1b1b1b1b,0x1b1b1b1b,0,0 -DB 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -DB 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -DB 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -DB 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -DB 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -DB 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -DB 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -DB 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -DB 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -DB 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -DB 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -DB 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -DB 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -DB 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -DB 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -DB 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -DB 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -DB 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -DB 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -DB 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -DB 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -DB 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -DB 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -DB 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -DB 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -DB 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -DB 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -DB 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -DB 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -DB 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -DB 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -DB 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d - DD 0x80808080,0x80808080,0xfefefefe,0xfefefefe - DD 0x1b1b1b1b,0x1b1b1b1b,0,0 -DB 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38 -DB 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb -DB 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87 -DB 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb -DB 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d -DB 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e -DB 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2 -DB 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25 -DB 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16 -DB 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92 -DB 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda -DB 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84 -DB 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a -DB 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06 -DB 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02 -DB 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b -DB 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea -DB 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73 -DB 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85 -DB 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e -DB 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89 -DB 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b -DB 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20 -DB 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4 -DB 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31 -DB 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f -DB 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d -DB 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef -DB 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0 -DB 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61 -DB 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26 -DB 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d - DD 0x80808080,0x80808080,0xfefefefe,0xfefefefe - DD 0x1b1b1b1b,0x1b1b1b1b,0,0 -DB 65,69,83,32,102,111,114,32,120,56,54,95,54,52,44,32 -DB 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 -DB 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 -DB 62,0 -ALIGN 64 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -block_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$in_block_prologue - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$in_block_prologue - - mov rax,QWORD[24+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$in_block_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - jmp NEAR $L$common_seh_exit - - - -ALIGN 16 -key_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$in_key_prologue - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$in_key_prologue - - lea rax,[56+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$in_key_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - jmp NEAR $L$common_seh_exit - - - -ALIGN 16 -cbc_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - lea r10,[$L$cbc_prologue] - cmp rbx,r10 - jb NEAR $L$in_cbc_prologue - - lea r10,[$L$cbc_fast_body] - cmp rbx,r10 - jb NEAR $L$in_cbc_frame_setup - - lea r10,[$L$cbc_slow_prologue] - cmp rbx,r10 - jb NEAR $L$in_cbc_body - - lea r10,[$L$cbc_slow_body] - cmp rbx,r10 - jb NEAR $L$in_cbc_frame_setup - -$L$in_cbc_body: - mov rax,QWORD[152+r8] - - lea r10,[$L$cbc_epilogue] - cmp rbx,r10 - jae NEAR $L$in_cbc_prologue - - lea rax,[8+rax] - - lea r10,[$L$cbc_popfq] - cmp rbx,r10 - jae NEAR $L$in_cbc_prologue - - mov rax,QWORD[8+rax] - lea rax,[56+rax] - -$L$in_cbc_frame_setup: - mov rbx,QWORD[((-16))+rax] - mov rbp,QWORD[((-24))+rax] - mov r12,QWORD[((-32))+rax] - mov r13,QWORD[((-40))+rax] - mov r14,QWORD[((-48))+rax] - mov r15,QWORD[((-56))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$in_cbc_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - -$L$common_seh_exit: - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_aes_nohw_encrypt wrt ..imagebase - DD $L$SEH_end_aes_nohw_encrypt wrt ..imagebase - DD $L$SEH_info_aes_nohw_encrypt wrt ..imagebase - - DD $L$SEH_begin_aes_nohw_decrypt wrt ..imagebase - DD $L$SEH_end_aes_nohw_decrypt wrt ..imagebase - DD $L$SEH_info_aes_nohw_decrypt wrt ..imagebase - - DD $L$SEH_begin_aes_nohw_set_encrypt_key wrt ..imagebase - DD $L$SEH_end_aes_nohw_set_encrypt_key wrt ..imagebase - DD $L$SEH_info_aes_nohw_set_encrypt_key wrt ..imagebase - - DD $L$SEH_begin_aes_nohw_set_decrypt_key wrt ..imagebase - DD $L$SEH_end_aes_nohw_set_decrypt_key wrt ..imagebase - DD $L$SEH_info_aes_nohw_set_decrypt_key wrt ..imagebase - - DD $L$SEH_begin_aes_nohw_cbc_encrypt wrt ..imagebase - DD $L$SEH_end_aes_nohw_cbc_encrypt wrt ..imagebase - DD $L$SEH_info_aes_nohw_cbc_encrypt wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_aes_nohw_encrypt: -DB 9,0,0,0 - DD block_se_handler wrt ..imagebase - DD $L$enc_prologue wrt ..imagebase,$L$enc_epilogue wrt ..imagebase -$L$SEH_info_aes_nohw_decrypt: -DB 9,0,0,0 - DD block_se_handler wrt ..imagebase - DD $L$dec_prologue wrt ..imagebase,$L$dec_epilogue wrt ..imagebase -$L$SEH_info_aes_nohw_set_encrypt_key: -DB 9,0,0,0 - DD key_se_handler wrt ..imagebase - DD $L$enc_key_prologue wrt ..imagebase,$L$enc_key_epilogue wrt ..imagebase -$L$SEH_info_aes_nohw_set_decrypt_key: -DB 9,0,0,0 - DD key_se_handler wrt ..imagebase - DD $L$dec_key_prologue wrt ..imagebase,$L$dec_key_epilogue wrt ..imagebase -$L$SEH_info_aes_nohw_cbc_encrypt: -DB 9,0,0,0 - DD cbc_se_handler wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm deleted file mode 100644 index 2b51a26849..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm +++ /dev/null @@ -1,1033 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - -ALIGN 32 -_aesni_ctr32_ghash_6x: - - vmovdqu xmm2,XMMWORD[32+r11] - sub rdx,6 - vpxor xmm4,xmm4,xmm4 - vmovdqu xmm15,XMMWORD[((0-128))+rcx] - vpaddb xmm10,xmm1,xmm2 - vpaddb xmm11,xmm10,xmm2 - vpaddb xmm12,xmm11,xmm2 - vpaddb xmm13,xmm12,xmm2 - vpaddb xmm14,xmm13,xmm2 - vpxor xmm9,xmm1,xmm15 - vmovdqu XMMWORD[(16+8)+rsp],xmm4 - jmp NEAR $L$oop6x - -ALIGN 32 -$L$oop6x: - add ebx,100663296 - jc NEAR $L$handle_ctr32 - vmovdqu xmm3,XMMWORD[((0-32))+r9] - vpaddb xmm1,xmm14,xmm2 - vpxor xmm10,xmm10,xmm15 - vpxor xmm11,xmm11,xmm15 - -$L$resume_ctr32: - vmovdqu XMMWORD[r8],xmm1 - vpclmulqdq xmm5,xmm7,xmm3,0x10 - vpxor xmm12,xmm12,xmm15 - vmovups xmm2,XMMWORD[((16-128))+rcx] - vpclmulqdq xmm6,xmm7,xmm3,0x01 - - - - - - - - - - - - - - - - - - xor r12,r12 - cmp r15,r14 - - vaesenc xmm9,xmm9,xmm2 - vmovdqu xmm0,XMMWORD[((48+8))+rsp] - vpxor xmm13,xmm13,xmm15 - vpclmulqdq xmm1,xmm7,xmm3,0x00 - vaesenc xmm10,xmm10,xmm2 - vpxor xmm14,xmm14,xmm15 - setnc r12b - vpclmulqdq xmm7,xmm7,xmm3,0x11 - vaesenc xmm11,xmm11,xmm2 - vmovdqu xmm3,XMMWORD[((16-32))+r9] - neg r12 - vaesenc xmm12,xmm12,xmm2 - vpxor xmm6,xmm6,xmm5 - vpclmulqdq xmm5,xmm0,xmm3,0x00 - vpxor xmm8,xmm8,xmm4 - vaesenc xmm13,xmm13,xmm2 - vpxor xmm4,xmm1,xmm5 - and r12,0x60 - vmovups xmm15,XMMWORD[((32-128))+rcx] - vpclmulqdq xmm1,xmm0,xmm3,0x10 - vaesenc xmm14,xmm14,xmm2 - - vpclmulqdq xmm2,xmm0,xmm3,0x01 - lea r14,[r12*1+r14] - vaesenc xmm9,xmm9,xmm15 - vpxor xmm8,xmm8,XMMWORD[((16+8))+rsp] - vpclmulqdq xmm3,xmm0,xmm3,0x11 - vmovdqu xmm0,XMMWORD[((64+8))+rsp] - vaesenc xmm10,xmm10,xmm15 - movbe r13,QWORD[88+r14] - vaesenc xmm11,xmm11,xmm15 - movbe r12,QWORD[80+r14] - vaesenc xmm12,xmm12,xmm15 - mov QWORD[((32+8))+rsp],r13 - vaesenc xmm13,xmm13,xmm15 - mov QWORD[((40+8))+rsp],r12 - vmovdqu xmm5,XMMWORD[((48-32))+r9] - vaesenc xmm14,xmm14,xmm15 - - vmovups xmm15,XMMWORD[((48-128))+rcx] - vpxor xmm6,xmm6,xmm1 - vpclmulqdq xmm1,xmm0,xmm5,0x00 - vaesenc xmm9,xmm9,xmm15 - vpxor xmm6,xmm6,xmm2 - vpclmulqdq xmm2,xmm0,xmm5,0x10 - vaesenc xmm10,xmm10,xmm15 - vpxor xmm7,xmm7,xmm3 - vpclmulqdq xmm3,xmm0,xmm5,0x01 - vaesenc xmm11,xmm11,xmm15 - vpclmulqdq xmm5,xmm0,xmm5,0x11 - vmovdqu xmm0,XMMWORD[((80+8))+rsp] - vaesenc xmm12,xmm12,xmm15 - vaesenc xmm13,xmm13,xmm15 - vpxor xmm4,xmm4,xmm1 - vmovdqu xmm1,XMMWORD[((64-32))+r9] - vaesenc xmm14,xmm14,xmm15 - - vmovups xmm15,XMMWORD[((64-128))+rcx] - vpxor xmm6,xmm6,xmm2 - vpclmulqdq xmm2,xmm0,xmm1,0x00 - vaesenc xmm9,xmm9,xmm15 - vpxor xmm6,xmm6,xmm3 - vpclmulqdq xmm3,xmm0,xmm1,0x10 - vaesenc xmm10,xmm10,xmm15 - movbe r13,QWORD[72+r14] - vpxor xmm7,xmm7,xmm5 - vpclmulqdq xmm5,xmm0,xmm1,0x01 - vaesenc xmm11,xmm11,xmm15 - movbe r12,QWORD[64+r14] - vpclmulqdq xmm1,xmm0,xmm1,0x11 - vmovdqu xmm0,XMMWORD[((96+8))+rsp] - vaesenc xmm12,xmm12,xmm15 - mov QWORD[((48+8))+rsp],r13 - vaesenc xmm13,xmm13,xmm15 - mov QWORD[((56+8))+rsp],r12 - vpxor xmm4,xmm4,xmm2 - vmovdqu xmm2,XMMWORD[((96-32))+r9] - vaesenc xmm14,xmm14,xmm15 - - vmovups xmm15,XMMWORD[((80-128))+rcx] - vpxor xmm6,xmm6,xmm3 - vpclmulqdq xmm3,xmm0,xmm2,0x00 - vaesenc xmm9,xmm9,xmm15 - vpxor xmm6,xmm6,xmm5 - vpclmulqdq xmm5,xmm0,xmm2,0x10 - vaesenc xmm10,xmm10,xmm15 - movbe r13,QWORD[56+r14] - vpxor xmm7,xmm7,xmm1 - vpclmulqdq xmm1,xmm0,xmm2,0x01 - vpxor xmm8,xmm8,XMMWORD[((112+8))+rsp] - vaesenc xmm11,xmm11,xmm15 - movbe r12,QWORD[48+r14] - vpclmulqdq xmm2,xmm0,xmm2,0x11 - vaesenc xmm12,xmm12,xmm15 - mov QWORD[((64+8))+rsp],r13 - vaesenc xmm13,xmm13,xmm15 - mov QWORD[((72+8))+rsp],r12 - vpxor xmm4,xmm4,xmm3 - vmovdqu xmm3,XMMWORD[((112-32))+r9] - vaesenc xmm14,xmm14,xmm15 - - vmovups xmm15,XMMWORD[((96-128))+rcx] - vpxor xmm6,xmm6,xmm5 - vpclmulqdq xmm5,xmm8,xmm3,0x10 - vaesenc xmm9,xmm9,xmm15 - vpxor xmm6,xmm6,xmm1 - vpclmulqdq xmm1,xmm8,xmm3,0x01 - vaesenc xmm10,xmm10,xmm15 - movbe r13,QWORD[40+r14] - vpxor xmm7,xmm7,xmm2 - vpclmulqdq xmm2,xmm8,xmm3,0x00 - vaesenc xmm11,xmm11,xmm15 - movbe r12,QWORD[32+r14] - vpclmulqdq xmm8,xmm8,xmm3,0x11 - vaesenc xmm12,xmm12,xmm15 - mov QWORD[((80+8))+rsp],r13 - vaesenc xmm13,xmm13,xmm15 - mov QWORD[((88+8))+rsp],r12 - vpxor xmm6,xmm6,xmm5 - vaesenc xmm14,xmm14,xmm15 - vpxor xmm6,xmm6,xmm1 - - vmovups xmm15,XMMWORD[((112-128))+rcx] - vpslldq xmm5,xmm6,8 - vpxor xmm4,xmm4,xmm2 - vmovdqu xmm3,XMMWORD[16+r11] - - vaesenc xmm9,xmm9,xmm15 - vpxor xmm7,xmm7,xmm8 - vaesenc xmm10,xmm10,xmm15 - vpxor xmm4,xmm4,xmm5 - movbe r13,QWORD[24+r14] - vaesenc xmm11,xmm11,xmm15 - movbe r12,QWORD[16+r14] - vpalignr xmm0,xmm4,xmm4,8 - vpclmulqdq xmm4,xmm4,xmm3,0x10 - mov QWORD[((96+8))+rsp],r13 - vaesenc xmm12,xmm12,xmm15 - mov QWORD[((104+8))+rsp],r12 - vaesenc xmm13,xmm13,xmm15 - vmovups xmm1,XMMWORD[((128-128))+rcx] - vaesenc xmm14,xmm14,xmm15 - - vaesenc xmm9,xmm9,xmm1 - vmovups xmm15,XMMWORD[((144-128))+rcx] - vaesenc xmm10,xmm10,xmm1 - vpsrldq xmm6,xmm6,8 - vaesenc xmm11,xmm11,xmm1 - vpxor xmm7,xmm7,xmm6 - vaesenc xmm12,xmm12,xmm1 - vpxor xmm4,xmm4,xmm0 - movbe r13,QWORD[8+r14] - vaesenc xmm13,xmm13,xmm1 - movbe r12,QWORD[r14] - vaesenc xmm14,xmm14,xmm1 - vmovups xmm1,XMMWORD[((160-128))+rcx] - cmp ebp,11 - jb NEAR $L$enc_tail - - vaesenc xmm9,xmm9,xmm15 - vaesenc xmm10,xmm10,xmm15 - vaesenc xmm11,xmm11,xmm15 - vaesenc xmm12,xmm12,xmm15 - vaesenc xmm13,xmm13,xmm15 - vaesenc xmm14,xmm14,xmm15 - - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - vmovups xmm15,XMMWORD[((176-128))+rcx] - vaesenc xmm14,xmm14,xmm1 - vmovups xmm1,XMMWORD[((192-128))+rcx] - je NEAR $L$enc_tail - - vaesenc xmm9,xmm9,xmm15 - vaesenc xmm10,xmm10,xmm15 - vaesenc xmm11,xmm11,xmm15 - vaesenc xmm12,xmm12,xmm15 - vaesenc xmm13,xmm13,xmm15 - vaesenc xmm14,xmm14,xmm15 - - vaesenc xmm9,xmm9,xmm1 - vaesenc xmm10,xmm10,xmm1 - vaesenc xmm11,xmm11,xmm1 - vaesenc xmm12,xmm12,xmm1 - vaesenc xmm13,xmm13,xmm1 - vmovups xmm15,XMMWORD[((208-128))+rcx] - vaesenc xmm14,xmm14,xmm1 - vmovups xmm1,XMMWORD[((224-128))+rcx] - jmp NEAR $L$enc_tail - -ALIGN 32 -$L$handle_ctr32: - vmovdqu xmm0,XMMWORD[r11] - vpshufb xmm6,xmm1,xmm0 - vmovdqu xmm5,XMMWORD[48+r11] - vpaddd xmm10,xmm6,XMMWORD[64+r11] - vpaddd xmm11,xmm6,xmm5 - vmovdqu xmm3,XMMWORD[((0-32))+r9] - vpaddd xmm12,xmm10,xmm5 - vpshufb xmm10,xmm10,xmm0 - vpaddd xmm13,xmm11,xmm5 - vpshufb xmm11,xmm11,xmm0 - vpxor xmm10,xmm10,xmm15 - vpaddd xmm14,xmm12,xmm5 - vpshufb xmm12,xmm12,xmm0 - vpxor xmm11,xmm11,xmm15 - vpaddd xmm1,xmm13,xmm5 - vpshufb xmm13,xmm13,xmm0 - vpshufb xmm14,xmm14,xmm0 - vpshufb xmm1,xmm1,xmm0 - jmp NEAR $L$resume_ctr32 - -ALIGN 32 -$L$enc_tail: - vaesenc xmm9,xmm9,xmm15 - vmovdqu XMMWORD[(16+8)+rsp],xmm7 - vpalignr xmm8,xmm4,xmm4,8 - vaesenc xmm10,xmm10,xmm15 - vpclmulqdq xmm4,xmm4,xmm3,0x10 - vpxor xmm2,xmm1,XMMWORD[rdi] - vaesenc xmm11,xmm11,xmm15 - vpxor xmm0,xmm1,XMMWORD[16+rdi] - vaesenc xmm12,xmm12,xmm15 - vpxor xmm5,xmm1,XMMWORD[32+rdi] - vaesenc xmm13,xmm13,xmm15 - vpxor xmm6,xmm1,XMMWORD[48+rdi] - vaesenc xmm14,xmm14,xmm15 - vpxor xmm7,xmm1,XMMWORD[64+rdi] - vpxor xmm3,xmm1,XMMWORD[80+rdi] - vmovdqu xmm1,XMMWORD[r8] - - vaesenclast xmm9,xmm9,xmm2 - vmovdqu xmm2,XMMWORD[32+r11] - vaesenclast xmm10,xmm10,xmm0 - vpaddb xmm0,xmm1,xmm2 - mov QWORD[((112+8))+rsp],r13 - lea rdi,[96+rdi] - vaesenclast xmm11,xmm11,xmm5 - vpaddb xmm5,xmm0,xmm2 - mov QWORD[((120+8))+rsp],r12 - lea rsi,[96+rsi] - vmovdqu xmm15,XMMWORD[((0-128))+rcx] - vaesenclast xmm12,xmm12,xmm6 - vpaddb xmm6,xmm5,xmm2 - vaesenclast xmm13,xmm13,xmm7 - vpaddb xmm7,xmm6,xmm2 - vaesenclast xmm14,xmm14,xmm3 - vpaddb xmm3,xmm7,xmm2 - - add r10,0x60 - sub rdx,0x6 - jc NEAR $L$6x_done - - vmovups XMMWORD[(-96)+rsi],xmm9 - vpxor xmm9,xmm1,xmm15 - vmovups XMMWORD[(-80)+rsi],xmm10 - vmovdqa xmm10,xmm0 - vmovups XMMWORD[(-64)+rsi],xmm11 - vmovdqa xmm11,xmm5 - vmovups XMMWORD[(-48)+rsi],xmm12 - vmovdqa xmm12,xmm6 - vmovups XMMWORD[(-32)+rsi],xmm13 - vmovdqa xmm13,xmm7 - vmovups XMMWORD[(-16)+rsi],xmm14 - vmovdqa xmm14,xmm3 - vmovdqu xmm7,XMMWORD[((32+8))+rsp] - jmp NEAR $L$oop6x - -$L$6x_done: - vpxor xmm8,xmm8,XMMWORD[((16+8))+rsp] - vpxor xmm8,xmm8,xmm4 - - DB 0F3h,0C3h ;repret - - -global aesni_gcm_decrypt - -ALIGN 32 -aesni_gcm_decrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aesni_gcm_decrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - xor r10,r10 - - - - cmp rdx,0x60 - jb NEAR $L$gcm_dec_abort - - lea rax,[rsp] - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - lea rsp,[((-168))+rsp] - movaps XMMWORD[(-216)+rax],xmm6 - movaps XMMWORD[(-200)+rax],xmm7 - movaps XMMWORD[(-184)+rax],xmm8 - movaps XMMWORD[(-168)+rax],xmm9 - movaps XMMWORD[(-152)+rax],xmm10 - movaps XMMWORD[(-136)+rax],xmm11 - movaps XMMWORD[(-120)+rax],xmm12 - movaps XMMWORD[(-104)+rax],xmm13 - movaps XMMWORD[(-88)+rax],xmm14 - movaps XMMWORD[(-72)+rax],xmm15 -$L$gcm_dec_body: - vzeroupper - - vmovdqu xmm1,XMMWORD[r8] - add rsp,-128 - mov ebx,DWORD[12+r8] - lea r11,[$L$bswap_mask] - lea r14,[((-128))+rcx] - mov r15,0xf80 - vmovdqu xmm8,XMMWORD[r9] - and rsp,-128 - vmovdqu xmm0,XMMWORD[r11] - lea rcx,[128+rcx] - lea r9,[((32+32))+r9] - mov ebp,DWORD[((240-128))+rcx] - vpshufb xmm8,xmm8,xmm0 - - and r14,r15 - and r15,rsp - sub r15,r14 - jc NEAR $L$dec_no_key_aliasing - cmp r15,768 - jnc NEAR $L$dec_no_key_aliasing - sub rsp,r15 -$L$dec_no_key_aliasing: - - vmovdqu xmm7,XMMWORD[80+rdi] - lea r14,[rdi] - vmovdqu xmm4,XMMWORD[64+rdi] - - - - - - - - lea r15,[((-192))+rdx*1+rdi] - - vmovdqu xmm5,XMMWORD[48+rdi] - shr rdx,4 - xor r10,r10 - vmovdqu xmm6,XMMWORD[32+rdi] - vpshufb xmm7,xmm7,xmm0 - vmovdqu xmm2,XMMWORD[16+rdi] - vpshufb xmm4,xmm4,xmm0 - vmovdqu xmm3,XMMWORD[rdi] - vpshufb xmm5,xmm5,xmm0 - vmovdqu XMMWORD[48+rsp],xmm4 - vpshufb xmm6,xmm6,xmm0 - vmovdqu XMMWORD[64+rsp],xmm5 - vpshufb xmm2,xmm2,xmm0 - vmovdqu XMMWORD[80+rsp],xmm6 - vpshufb xmm3,xmm3,xmm0 - vmovdqu XMMWORD[96+rsp],xmm2 - vmovdqu XMMWORD[112+rsp],xmm3 - - call _aesni_ctr32_ghash_6x - - vmovups XMMWORD[(-96)+rsi],xmm9 - vmovups XMMWORD[(-80)+rsi],xmm10 - vmovups XMMWORD[(-64)+rsi],xmm11 - vmovups XMMWORD[(-48)+rsi],xmm12 - vmovups XMMWORD[(-32)+rsi],xmm13 - vmovups XMMWORD[(-16)+rsi],xmm14 - - vpshufb xmm8,xmm8,XMMWORD[r11] - vmovdqu XMMWORD[(-64)+r9],xmm8 - - vzeroupper - movaps xmm6,XMMWORD[((-216))+rax] - movaps xmm7,XMMWORD[((-200))+rax] - movaps xmm8,XMMWORD[((-184))+rax] - movaps xmm9,XMMWORD[((-168))+rax] - movaps xmm10,XMMWORD[((-152))+rax] - movaps xmm11,XMMWORD[((-136))+rax] - movaps xmm12,XMMWORD[((-120))+rax] - movaps xmm13,XMMWORD[((-104))+rax] - movaps xmm14,XMMWORD[((-88))+rax] - movaps xmm15,XMMWORD[((-72))+rax] - mov r15,QWORD[((-48))+rax] - - mov r14,QWORD[((-40))+rax] - - mov r13,QWORD[((-32))+rax] - - mov r12,QWORD[((-24))+rax] - - mov rbp,QWORD[((-16))+rax] - - mov rbx,QWORD[((-8))+rax] - - lea rsp,[rax] - -$L$gcm_dec_abort: - mov rax,r10 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aesni_gcm_decrypt: - -ALIGN 32 -_aesni_ctr32_6x: - - vmovdqu xmm4,XMMWORD[((0-128))+rcx] - vmovdqu xmm2,XMMWORD[32+r11] - lea r13,[((-1))+rbp] - vmovups xmm15,XMMWORD[((16-128))+rcx] - lea r12,[((32-128))+rcx] - vpxor xmm9,xmm1,xmm4 - add ebx,100663296 - jc NEAR $L$handle_ctr32_2 - vpaddb xmm10,xmm1,xmm2 - vpaddb xmm11,xmm10,xmm2 - vpxor xmm10,xmm10,xmm4 - vpaddb xmm12,xmm11,xmm2 - vpxor xmm11,xmm11,xmm4 - vpaddb xmm13,xmm12,xmm2 - vpxor xmm12,xmm12,xmm4 - vpaddb xmm14,xmm13,xmm2 - vpxor xmm13,xmm13,xmm4 - vpaddb xmm1,xmm14,xmm2 - vpxor xmm14,xmm14,xmm4 - jmp NEAR $L$oop_ctr32 - -ALIGN 16 -$L$oop_ctr32: - vaesenc xmm9,xmm9,xmm15 - vaesenc xmm10,xmm10,xmm15 - vaesenc xmm11,xmm11,xmm15 - vaesenc xmm12,xmm12,xmm15 - vaesenc xmm13,xmm13,xmm15 - vaesenc xmm14,xmm14,xmm15 - vmovups xmm15,XMMWORD[r12] - lea r12,[16+r12] - dec r13d - jnz NEAR $L$oop_ctr32 - - vmovdqu xmm3,XMMWORD[r12] - vaesenc xmm9,xmm9,xmm15 - vpxor xmm4,xmm3,XMMWORD[rdi] - vaesenc xmm10,xmm10,xmm15 - vpxor xmm5,xmm3,XMMWORD[16+rdi] - vaesenc xmm11,xmm11,xmm15 - vpxor xmm6,xmm3,XMMWORD[32+rdi] - vaesenc xmm12,xmm12,xmm15 - vpxor xmm8,xmm3,XMMWORD[48+rdi] - vaesenc xmm13,xmm13,xmm15 - vpxor xmm2,xmm3,XMMWORD[64+rdi] - vaesenc xmm14,xmm14,xmm15 - vpxor xmm3,xmm3,XMMWORD[80+rdi] - lea rdi,[96+rdi] - - vaesenclast xmm9,xmm9,xmm4 - vaesenclast xmm10,xmm10,xmm5 - vaesenclast xmm11,xmm11,xmm6 - vaesenclast xmm12,xmm12,xmm8 - vaesenclast xmm13,xmm13,xmm2 - vaesenclast xmm14,xmm14,xmm3 - vmovups XMMWORD[rsi],xmm9 - vmovups XMMWORD[16+rsi],xmm10 - vmovups XMMWORD[32+rsi],xmm11 - vmovups XMMWORD[48+rsi],xmm12 - vmovups XMMWORD[64+rsi],xmm13 - vmovups XMMWORD[80+rsi],xmm14 - lea rsi,[96+rsi] - - DB 0F3h,0C3h ;repret -ALIGN 32 -$L$handle_ctr32_2: - vpshufb xmm6,xmm1,xmm0 - vmovdqu xmm5,XMMWORD[48+r11] - vpaddd xmm10,xmm6,XMMWORD[64+r11] - vpaddd xmm11,xmm6,xmm5 - vpaddd xmm12,xmm10,xmm5 - vpshufb xmm10,xmm10,xmm0 - vpaddd xmm13,xmm11,xmm5 - vpshufb xmm11,xmm11,xmm0 - vpxor xmm10,xmm10,xmm4 - vpaddd xmm14,xmm12,xmm5 - vpshufb xmm12,xmm12,xmm0 - vpxor xmm11,xmm11,xmm4 - vpaddd xmm1,xmm13,xmm5 - vpshufb xmm13,xmm13,xmm0 - vpxor xmm12,xmm12,xmm4 - vpshufb xmm14,xmm14,xmm0 - vpxor xmm13,xmm13,xmm4 - vpshufb xmm1,xmm1,xmm0 - vpxor xmm14,xmm14,xmm4 - jmp NEAR $L$oop_ctr32 - - - -global aesni_gcm_encrypt - -ALIGN 32 -aesni_gcm_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aesni_gcm_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - -%ifdef BORINGSSL_DISPATCH_TEST -EXTERN BORINGSSL_function_hit - mov BYTE[((BORINGSSL_function_hit+2))],1 -%endif - xor r10,r10 - - - - - cmp rdx,0x60*3 - jb NEAR $L$gcm_enc_abort - - lea rax,[rsp] - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - lea rsp,[((-168))+rsp] - movaps XMMWORD[(-216)+rax],xmm6 - movaps XMMWORD[(-200)+rax],xmm7 - movaps XMMWORD[(-184)+rax],xmm8 - movaps XMMWORD[(-168)+rax],xmm9 - movaps XMMWORD[(-152)+rax],xmm10 - movaps XMMWORD[(-136)+rax],xmm11 - movaps XMMWORD[(-120)+rax],xmm12 - movaps XMMWORD[(-104)+rax],xmm13 - movaps XMMWORD[(-88)+rax],xmm14 - movaps XMMWORD[(-72)+rax],xmm15 -$L$gcm_enc_body: - vzeroupper - - vmovdqu xmm1,XMMWORD[r8] - add rsp,-128 - mov ebx,DWORD[12+r8] - lea r11,[$L$bswap_mask] - lea r14,[((-128))+rcx] - mov r15,0xf80 - lea rcx,[128+rcx] - vmovdqu xmm0,XMMWORD[r11] - and rsp,-128 - mov ebp,DWORD[((240-128))+rcx] - - and r14,r15 - and r15,rsp - sub r15,r14 - jc NEAR $L$enc_no_key_aliasing - cmp r15,768 - jnc NEAR $L$enc_no_key_aliasing - sub rsp,r15 -$L$enc_no_key_aliasing: - - lea r14,[rsi] - - - - - - - - - lea r15,[((-192))+rdx*1+rsi] - - shr rdx,4 - - call _aesni_ctr32_6x - vpshufb xmm8,xmm9,xmm0 - vpshufb xmm2,xmm10,xmm0 - vmovdqu XMMWORD[112+rsp],xmm8 - vpshufb xmm4,xmm11,xmm0 - vmovdqu XMMWORD[96+rsp],xmm2 - vpshufb xmm5,xmm12,xmm0 - vmovdqu XMMWORD[80+rsp],xmm4 - vpshufb xmm6,xmm13,xmm0 - vmovdqu XMMWORD[64+rsp],xmm5 - vpshufb xmm7,xmm14,xmm0 - vmovdqu XMMWORD[48+rsp],xmm6 - - call _aesni_ctr32_6x - - vmovdqu xmm8,XMMWORD[r9] - lea r9,[((32+32))+r9] - sub rdx,12 - mov r10,0x60*2 - vpshufb xmm8,xmm8,xmm0 - - call _aesni_ctr32_ghash_6x - vmovdqu xmm7,XMMWORD[32+rsp] - vmovdqu xmm0,XMMWORD[r11] - vmovdqu xmm3,XMMWORD[((0-32))+r9] - vpunpckhqdq xmm1,xmm7,xmm7 - vmovdqu xmm15,XMMWORD[((32-32))+r9] - vmovups XMMWORD[(-96)+rsi],xmm9 - vpshufb xmm9,xmm9,xmm0 - vpxor xmm1,xmm1,xmm7 - vmovups XMMWORD[(-80)+rsi],xmm10 - vpshufb xmm10,xmm10,xmm0 - vmovups XMMWORD[(-64)+rsi],xmm11 - vpshufb xmm11,xmm11,xmm0 - vmovups XMMWORD[(-48)+rsi],xmm12 - vpshufb xmm12,xmm12,xmm0 - vmovups XMMWORD[(-32)+rsi],xmm13 - vpshufb xmm13,xmm13,xmm0 - vmovups XMMWORD[(-16)+rsi],xmm14 - vpshufb xmm14,xmm14,xmm0 - vmovdqu XMMWORD[16+rsp],xmm9 - vmovdqu xmm6,XMMWORD[48+rsp] - vmovdqu xmm0,XMMWORD[((16-32))+r9] - vpunpckhqdq xmm2,xmm6,xmm6 - vpclmulqdq xmm5,xmm7,xmm3,0x00 - vpxor xmm2,xmm2,xmm6 - vpclmulqdq xmm7,xmm7,xmm3,0x11 - vpclmulqdq xmm1,xmm1,xmm15,0x00 - - vmovdqu xmm9,XMMWORD[64+rsp] - vpclmulqdq xmm4,xmm6,xmm0,0x00 - vmovdqu xmm3,XMMWORD[((48-32))+r9] - vpxor xmm4,xmm4,xmm5 - vpunpckhqdq xmm5,xmm9,xmm9 - vpclmulqdq xmm6,xmm6,xmm0,0x11 - vpxor xmm5,xmm5,xmm9 - vpxor xmm6,xmm6,xmm7 - vpclmulqdq xmm2,xmm2,xmm15,0x10 - vmovdqu xmm15,XMMWORD[((80-32))+r9] - vpxor xmm2,xmm2,xmm1 - - vmovdqu xmm1,XMMWORD[80+rsp] - vpclmulqdq xmm7,xmm9,xmm3,0x00 - vmovdqu xmm0,XMMWORD[((64-32))+r9] - vpxor xmm7,xmm7,xmm4 - vpunpckhqdq xmm4,xmm1,xmm1 - vpclmulqdq xmm9,xmm9,xmm3,0x11 - vpxor xmm4,xmm4,xmm1 - vpxor xmm9,xmm9,xmm6 - vpclmulqdq xmm5,xmm5,xmm15,0x00 - vpxor xmm5,xmm5,xmm2 - - vmovdqu xmm2,XMMWORD[96+rsp] - vpclmulqdq xmm6,xmm1,xmm0,0x00 - vmovdqu xmm3,XMMWORD[((96-32))+r9] - vpxor xmm6,xmm6,xmm7 - vpunpckhqdq xmm7,xmm2,xmm2 - vpclmulqdq xmm1,xmm1,xmm0,0x11 - vpxor xmm7,xmm7,xmm2 - vpxor xmm1,xmm1,xmm9 - vpclmulqdq xmm4,xmm4,xmm15,0x10 - vmovdqu xmm15,XMMWORD[((128-32))+r9] - vpxor xmm4,xmm4,xmm5 - - vpxor xmm8,xmm8,XMMWORD[112+rsp] - vpclmulqdq xmm5,xmm2,xmm3,0x00 - vmovdqu xmm0,XMMWORD[((112-32))+r9] - vpunpckhqdq xmm9,xmm8,xmm8 - vpxor xmm5,xmm5,xmm6 - vpclmulqdq xmm2,xmm2,xmm3,0x11 - vpxor xmm9,xmm9,xmm8 - vpxor xmm2,xmm2,xmm1 - vpclmulqdq xmm7,xmm7,xmm15,0x00 - vpxor xmm4,xmm7,xmm4 - - vpclmulqdq xmm6,xmm8,xmm0,0x00 - vmovdqu xmm3,XMMWORD[((0-32))+r9] - vpunpckhqdq xmm1,xmm14,xmm14 - vpclmulqdq xmm8,xmm8,xmm0,0x11 - vpxor xmm1,xmm1,xmm14 - vpxor xmm5,xmm6,xmm5 - vpclmulqdq xmm9,xmm9,xmm15,0x10 - vmovdqu xmm15,XMMWORD[((32-32))+r9] - vpxor xmm7,xmm8,xmm2 - vpxor xmm6,xmm9,xmm4 - - vmovdqu xmm0,XMMWORD[((16-32))+r9] - vpxor xmm9,xmm7,xmm5 - vpclmulqdq xmm4,xmm14,xmm3,0x00 - vpxor xmm6,xmm6,xmm9 - vpunpckhqdq xmm2,xmm13,xmm13 - vpclmulqdq xmm14,xmm14,xmm3,0x11 - vpxor xmm2,xmm2,xmm13 - vpslldq xmm9,xmm6,8 - vpclmulqdq xmm1,xmm1,xmm15,0x00 - vpxor xmm8,xmm5,xmm9 - vpsrldq xmm6,xmm6,8 - vpxor xmm7,xmm7,xmm6 - - vpclmulqdq xmm5,xmm13,xmm0,0x00 - vmovdqu xmm3,XMMWORD[((48-32))+r9] - vpxor xmm5,xmm5,xmm4 - vpunpckhqdq xmm9,xmm12,xmm12 - vpclmulqdq xmm13,xmm13,xmm0,0x11 - vpxor xmm9,xmm9,xmm12 - vpxor xmm13,xmm13,xmm14 - vpalignr xmm14,xmm8,xmm8,8 - vpclmulqdq xmm2,xmm2,xmm15,0x10 - vmovdqu xmm15,XMMWORD[((80-32))+r9] - vpxor xmm2,xmm2,xmm1 - - vpclmulqdq xmm4,xmm12,xmm3,0x00 - vmovdqu xmm0,XMMWORD[((64-32))+r9] - vpxor xmm4,xmm4,xmm5 - vpunpckhqdq xmm1,xmm11,xmm11 - vpclmulqdq xmm12,xmm12,xmm3,0x11 - vpxor xmm1,xmm1,xmm11 - vpxor xmm12,xmm12,xmm13 - vxorps xmm7,xmm7,XMMWORD[16+rsp] - vpclmulqdq xmm9,xmm9,xmm15,0x00 - vpxor xmm9,xmm9,xmm2 - - vpclmulqdq xmm8,xmm8,XMMWORD[16+r11],0x10 - vxorps xmm8,xmm8,xmm14 - - vpclmulqdq xmm5,xmm11,xmm0,0x00 - vmovdqu xmm3,XMMWORD[((96-32))+r9] - vpxor xmm5,xmm5,xmm4 - vpunpckhqdq xmm2,xmm10,xmm10 - vpclmulqdq xmm11,xmm11,xmm0,0x11 - vpxor xmm2,xmm2,xmm10 - vpalignr xmm14,xmm8,xmm8,8 - vpxor xmm11,xmm11,xmm12 - vpclmulqdq xmm1,xmm1,xmm15,0x10 - vmovdqu xmm15,XMMWORD[((128-32))+r9] - vpxor xmm1,xmm1,xmm9 - - vxorps xmm14,xmm14,xmm7 - vpclmulqdq xmm8,xmm8,XMMWORD[16+r11],0x10 - vxorps xmm8,xmm8,xmm14 - - vpclmulqdq xmm4,xmm10,xmm3,0x00 - vmovdqu xmm0,XMMWORD[((112-32))+r9] - vpxor xmm4,xmm4,xmm5 - vpunpckhqdq xmm9,xmm8,xmm8 - vpclmulqdq xmm10,xmm10,xmm3,0x11 - vpxor xmm9,xmm9,xmm8 - vpxor xmm10,xmm10,xmm11 - vpclmulqdq xmm2,xmm2,xmm15,0x00 - vpxor xmm2,xmm2,xmm1 - - vpclmulqdq xmm5,xmm8,xmm0,0x00 - vpclmulqdq xmm7,xmm8,xmm0,0x11 - vpxor xmm5,xmm5,xmm4 - vpclmulqdq xmm6,xmm9,xmm15,0x10 - vpxor xmm7,xmm7,xmm10 - vpxor xmm6,xmm6,xmm2 - - vpxor xmm4,xmm7,xmm5 - vpxor xmm6,xmm6,xmm4 - vpslldq xmm1,xmm6,8 - vmovdqu xmm3,XMMWORD[16+r11] - vpsrldq xmm6,xmm6,8 - vpxor xmm8,xmm5,xmm1 - vpxor xmm7,xmm7,xmm6 - - vpalignr xmm2,xmm8,xmm8,8 - vpclmulqdq xmm8,xmm8,xmm3,0x10 - vpxor xmm8,xmm8,xmm2 - - vpalignr xmm2,xmm8,xmm8,8 - vpclmulqdq xmm8,xmm8,xmm3,0x10 - vpxor xmm2,xmm2,xmm7 - vpxor xmm8,xmm8,xmm2 - vpshufb xmm8,xmm8,XMMWORD[r11] - vmovdqu XMMWORD[(-64)+r9],xmm8 - - vzeroupper - movaps xmm6,XMMWORD[((-216))+rax] - movaps xmm7,XMMWORD[((-200))+rax] - movaps xmm8,XMMWORD[((-184))+rax] - movaps xmm9,XMMWORD[((-168))+rax] - movaps xmm10,XMMWORD[((-152))+rax] - movaps xmm11,XMMWORD[((-136))+rax] - movaps xmm12,XMMWORD[((-120))+rax] - movaps xmm13,XMMWORD[((-104))+rax] - movaps xmm14,XMMWORD[((-88))+rax] - movaps xmm15,XMMWORD[((-72))+rax] - mov r15,QWORD[((-48))+rax] - - mov r14,QWORD[((-40))+rax] - - mov r13,QWORD[((-32))+rax] - - mov r12,QWORD[((-24))+rax] - - mov rbp,QWORD[((-16))+rax] - - mov rbx,QWORD[((-8))+rax] - - lea rsp,[rax] - -$L$gcm_enc_abort: - mov rax,r10 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aesni_gcm_encrypt: -ALIGN 64 -$L$bswap_mask: -DB 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -$L$poly: -DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 -$L$one_msb: -DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 -$L$two_lsb: -DB 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -$L$one_lsb: -DB 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -DB 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108 -DB 101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82 -DB 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 -DB 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -ALIGN 64 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -gcm_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov rax,QWORD[120+r8] - - mov r15,QWORD[((-48))+rax] - mov r14,QWORD[((-40))+rax] - mov r13,QWORD[((-32))+rax] - mov r12,QWORD[((-24))+rax] - mov rbp,QWORD[((-16))+rax] - mov rbx,QWORD[((-8))+rax] - mov QWORD[240+r8],r15 - mov QWORD[232+r8],r14 - mov QWORD[224+r8],r13 - mov QWORD[216+r8],r12 - mov QWORD[160+r8],rbp - mov QWORD[144+r8],rbx - - lea rsi,[((-216))+rax] - lea rdi,[512+r8] - mov ecx,20 - DD 0xa548f3fc - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_aesni_gcm_decrypt wrt ..imagebase - DD $L$SEH_end_aesni_gcm_decrypt wrt ..imagebase - DD $L$SEH_gcm_dec_info wrt ..imagebase - - DD $L$SEH_begin_aesni_gcm_encrypt wrt ..imagebase - DD $L$SEH_end_aesni_gcm_encrypt wrt ..imagebase - DD $L$SEH_gcm_enc_info wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_gcm_dec_info: -DB 9,0,0,0 - DD gcm_se_handler wrt ..imagebase - DD $L$gcm_dec_body wrt ..imagebase,$L$gcm_dec_abort wrt ..imagebase -$L$SEH_gcm_enc_info: -DB 9,0,0,0 - DD gcm_se_handler wrt ..imagebase - DD $L$gcm_enc_body wrt ..imagebase,$L$gcm_enc_abort wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aesni-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aesni-x86_64.asm deleted file mode 100644 index 342c1523ee..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/aesni-x86_64.asm +++ /dev/null @@ -1,2806 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - -EXTERN OPENSSL_ia32cap_P -global aes_hw_encrypt - -ALIGN 16 -aes_hw_encrypt: - -%ifdef BORINGSSL_DISPATCH_TEST -EXTERN BORINGSSL_function_hit - mov BYTE[((BORINGSSL_function_hit+1))],1 -%endif - movups xmm2,XMMWORD[rcx] - mov eax,DWORD[240+r8] - movups xmm0,XMMWORD[r8] - movups xmm1,XMMWORD[16+r8] - lea r8,[32+r8] - xorps xmm2,xmm0 -$L$oop_enc1_1: -DB 102,15,56,220,209 - dec eax - movups xmm1,XMMWORD[r8] - lea r8,[16+r8] - jnz NEAR $L$oop_enc1_1 -DB 102,15,56,221,209 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movups XMMWORD[rdx],xmm2 - pxor xmm2,xmm2 - DB 0F3h,0C3h ;repret - - - -global aes_hw_decrypt - -ALIGN 16 -aes_hw_decrypt: - - movups xmm2,XMMWORD[rcx] - mov eax,DWORD[240+r8] - movups xmm0,XMMWORD[r8] - movups xmm1,XMMWORD[16+r8] - lea r8,[32+r8] - xorps xmm2,xmm0 -$L$oop_dec1_2: -DB 102,15,56,222,209 - dec eax - movups xmm1,XMMWORD[r8] - lea r8,[16+r8] - jnz NEAR $L$oop_dec1_2 -DB 102,15,56,223,209 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movups XMMWORD[rdx],xmm2 - pxor xmm2,xmm2 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_encrypt2: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - movups xmm0,XMMWORD[32+rcx] - lea rcx,[32+rax*1+rcx] - neg rax - add rax,16 - -$L$enc_loop2: -DB 102,15,56,220,209 -DB 102,15,56,220,217 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,220,208 -DB 102,15,56,220,216 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$enc_loop2 - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,221,208 -DB 102,15,56,221,216 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_decrypt2: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - movups xmm0,XMMWORD[32+rcx] - lea rcx,[32+rax*1+rcx] - neg rax - add rax,16 - -$L$dec_loop2: -DB 102,15,56,222,209 -DB 102,15,56,222,217 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,222,208 -DB 102,15,56,222,216 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$dec_loop2 - -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,223,208 -DB 102,15,56,223,216 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_encrypt3: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - xorps xmm4,xmm0 - movups xmm0,XMMWORD[32+rcx] - lea rcx,[32+rax*1+rcx] - neg rax - add rax,16 - -$L$enc_loop3: -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$enc_loop3 - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,221,208 -DB 102,15,56,221,216 -DB 102,15,56,221,224 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_decrypt3: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - xorps xmm4,xmm0 - movups xmm0,XMMWORD[32+rcx] - lea rcx,[32+rax*1+rcx] - neg rax - add rax,16 - -$L$dec_loop3: -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$dec_loop3 - -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,223,208 -DB 102,15,56,223,216 -DB 102,15,56,223,224 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_encrypt4: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - xorps xmm4,xmm0 - xorps xmm5,xmm0 - movups xmm0,XMMWORD[32+rcx] - lea rcx,[32+rax*1+rcx] - neg rax -DB 0x0f,0x1f,0x00 - add rax,16 - -$L$enc_loop4: -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 -DB 102,15,56,220,232 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$enc_loop4 - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,221,208 -DB 102,15,56,221,216 -DB 102,15,56,221,224 -DB 102,15,56,221,232 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_decrypt4: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - xorps xmm4,xmm0 - xorps xmm5,xmm0 - movups xmm0,XMMWORD[32+rcx] - lea rcx,[32+rax*1+rcx] - neg rax -DB 0x0f,0x1f,0x00 - add rax,16 - -$L$dec_loop4: -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$dec_loop4 - -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,223,208 -DB 102,15,56,223,216 -DB 102,15,56,223,224 -DB 102,15,56,223,232 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_encrypt6: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 -DB 102,15,56,220,209 - lea rcx,[32+rax*1+rcx] - neg rax -DB 102,15,56,220,217 - pxor xmm5,xmm0 - pxor xmm6,xmm0 -DB 102,15,56,220,225 - pxor xmm7,xmm0 - movups xmm0,XMMWORD[rax*1+rcx] - add rax,16 - jmp NEAR $L$enc_loop6_enter -ALIGN 16 -$L$enc_loop6: -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -$L$enc_loop6_enter: -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 -DB 102,15,56,220,232 -DB 102,15,56,220,240 -DB 102,15,56,220,248 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$enc_loop6 - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,15,56,221,208 -DB 102,15,56,221,216 -DB 102,15,56,221,224 -DB 102,15,56,221,232 -DB 102,15,56,221,240 -DB 102,15,56,221,248 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_decrypt6: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - pxor xmm3,xmm0 - pxor xmm4,xmm0 -DB 102,15,56,222,209 - lea rcx,[32+rax*1+rcx] - neg rax -DB 102,15,56,222,217 - pxor xmm5,xmm0 - pxor xmm6,xmm0 -DB 102,15,56,222,225 - pxor xmm7,xmm0 - movups xmm0,XMMWORD[rax*1+rcx] - add rax,16 - jmp NEAR $L$dec_loop6_enter -ALIGN 16 -$L$dec_loop6: -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -$L$dec_loop6_enter: -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$dec_loop6 - -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,15,56,223,208 -DB 102,15,56,223,216 -DB 102,15,56,223,224 -DB 102,15,56,223,232 -DB 102,15,56,223,240 -DB 102,15,56,223,248 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_encrypt8: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - pxor xmm4,xmm0 - pxor xmm5,xmm0 - pxor xmm6,xmm0 - lea rcx,[32+rax*1+rcx] - neg rax -DB 102,15,56,220,209 - pxor xmm7,xmm0 - pxor xmm8,xmm0 -DB 102,15,56,220,217 - pxor xmm9,xmm0 - movups xmm0,XMMWORD[rax*1+rcx] - add rax,16 - jmp NEAR $L$enc_loop8_inner -ALIGN 16 -$L$enc_loop8: -DB 102,15,56,220,209 -DB 102,15,56,220,217 -$L$enc_loop8_inner: -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 -$L$enc_loop8_enter: - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 -DB 102,15,56,220,232 -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$enc_loop8 - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 -DB 102,15,56,221,208 -DB 102,15,56,221,216 -DB 102,15,56,221,224 -DB 102,15,56,221,232 -DB 102,15,56,221,240 -DB 102,15,56,221,248 -DB 102,68,15,56,221,192 -DB 102,68,15,56,221,200 - DB 0F3h,0C3h ;repret - - - -ALIGN 16 -_aesni_decrypt8: - - movups xmm0,XMMWORD[rcx] - shl eax,4 - movups xmm1,XMMWORD[16+rcx] - xorps xmm2,xmm0 - xorps xmm3,xmm0 - pxor xmm4,xmm0 - pxor xmm5,xmm0 - pxor xmm6,xmm0 - lea rcx,[32+rax*1+rcx] - neg rax -DB 102,15,56,222,209 - pxor xmm7,xmm0 - pxor xmm8,xmm0 -DB 102,15,56,222,217 - pxor xmm9,xmm0 - movups xmm0,XMMWORD[rax*1+rcx] - add rax,16 - jmp NEAR $L$dec_loop8_inner -ALIGN 16 -$L$dec_loop8: -DB 102,15,56,222,209 -DB 102,15,56,222,217 -$L$dec_loop8_inner: -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 -$L$dec_loop8_enter: - movups xmm1,XMMWORD[rax*1+rcx] - add rax,32 -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((-16))+rax*1+rcx] - jnz NEAR $L$dec_loop8 - -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 -DB 102,15,56,223,208 -DB 102,15,56,223,216 -DB 102,15,56,223,224 -DB 102,15,56,223,232 -DB 102,15,56,223,240 -DB 102,15,56,223,248 -DB 102,68,15,56,223,192 -DB 102,68,15,56,223,200 - DB 0F3h,0C3h ;repret - - -global aes_hw_ecb_encrypt - -ALIGN 16 -aes_hw_ecb_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_hw_ecb_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - lea rsp,[((-88))+rsp] - movaps XMMWORD[rsp],xmm6 - movaps XMMWORD[16+rsp],xmm7 - movaps XMMWORD[32+rsp],xmm8 - movaps XMMWORD[48+rsp],xmm9 -$L$ecb_enc_body: - and rdx,-16 - jz NEAR $L$ecb_ret - - mov eax,DWORD[240+rcx] - movups xmm0,XMMWORD[rcx] - mov r11,rcx - mov r10d,eax - test r8d,r8d - jz NEAR $L$ecb_decrypt - - cmp rdx,0x80 - jb NEAR $L$ecb_enc_tail - - movdqu xmm2,XMMWORD[rdi] - movdqu xmm3,XMMWORD[16+rdi] - movdqu xmm4,XMMWORD[32+rdi] - movdqu xmm5,XMMWORD[48+rdi] - movdqu xmm6,XMMWORD[64+rdi] - movdqu xmm7,XMMWORD[80+rdi] - movdqu xmm8,XMMWORD[96+rdi] - movdqu xmm9,XMMWORD[112+rdi] - lea rdi,[128+rdi] - sub rdx,0x80 - jmp NEAR $L$ecb_enc_loop8_enter -ALIGN 16 -$L$ecb_enc_loop8: - movups XMMWORD[rsi],xmm2 - mov rcx,r11 - movdqu xmm2,XMMWORD[rdi] - mov eax,r10d - movups XMMWORD[16+rsi],xmm3 - movdqu xmm3,XMMWORD[16+rdi] - movups XMMWORD[32+rsi],xmm4 - movdqu xmm4,XMMWORD[32+rdi] - movups XMMWORD[48+rsi],xmm5 - movdqu xmm5,XMMWORD[48+rdi] - movups XMMWORD[64+rsi],xmm6 - movdqu xmm6,XMMWORD[64+rdi] - movups XMMWORD[80+rsi],xmm7 - movdqu xmm7,XMMWORD[80+rdi] - movups XMMWORD[96+rsi],xmm8 - movdqu xmm8,XMMWORD[96+rdi] - movups XMMWORD[112+rsi],xmm9 - lea rsi,[128+rsi] - movdqu xmm9,XMMWORD[112+rdi] - lea rdi,[128+rdi] -$L$ecb_enc_loop8_enter: - - call _aesni_encrypt8 - - sub rdx,0x80 - jnc NEAR $L$ecb_enc_loop8 - - movups XMMWORD[rsi],xmm2 - mov rcx,r11 - movups XMMWORD[16+rsi],xmm3 - mov eax,r10d - movups XMMWORD[32+rsi],xmm4 - movups XMMWORD[48+rsi],xmm5 - movups XMMWORD[64+rsi],xmm6 - movups XMMWORD[80+rsi],xmm7 - movups XMMWORD[96+rsi],xmm8 - movups XMMWORD[112+rsi],xmm9 - lea rsi,[128+rsi] - add rdx,0x80 - jz NEAR $L$ecb_ret - -$L$ecb_enc_tail: - movups xmm2,XMMWORD[rdi] - cmp rdx,0x20 - jb NEAR $L$ecb_enc_one - movups xmm3,XMMWORD[16+rdi] - je NEAR $L$ecb_enc_two - movups xmm4,XMMWORD[32+rdi] - cmp rdx,0x40 - jb NEAR $L$ecb_enc_three - movups xmm5,XMMWORD[48+rdi] - je NEAR $L$ecb_enc_four - movups xmm6,XMMWORD[64+rdi] - cmp rdx,0x60 - jb NEAR $L$ecb_enc_five - movups xmm7,XMMWORD[80+rdi] - je NEAR $L$ecb_enc_six - movdqu xmm8,XMMWORD[96+rdi] - xorps xmm9,xmm9 - call _aesni_encrypt8 - movups XMMWORD[rsi],xmm2 - movups XMMWORD[16+rsi],xmm3 - movups XMMWORD[32+rsi],xmm4 - movups XMMWORD[48+rsi],xmm5 - movups XMMWORD[64+rsi],xmm6 - movups XMMWORD[80+rsi],xmm7 - movups XMMWORD[96+rsi],xmm8 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_enc_one: - movups xmm0,XMMWORD[rcx] - movups xmm1,XMMWORD[16+rcx] - lea rcx,[32+rcx] - xorps xmm2,xmm0 -$L$oop_enc1_3: -DB 102,15,56,220,209 - dec eax - movups xmm1,XMMWORD[rcx] - lea rcx,[16+rcx] - jnz NEAR $L$oop_enc1_3 -DB 102,15,56,221,209 - movups XMMWORD[rsi],xmm2 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_enc_two: - call _aesni_encrypt2 - movups XMMWORD[rsi],xmm2 - movups XMMWORD[16+rsi],xmm3 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_enc_three: - call _aesni_encrypt3 - movups XMMWORD[rsi],xmm2 - movups XMMWORD[16+rsi],xmm3 - movups XMMWORD[32+rsi],xmm4 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_enc_four: - call _aesni_encrypt4 - movups XMMWORD[rsi],xmm2 - movups XMMWORD[16+rsi],xmm3 - movups XMMWORD[32+rsi],xmm4 - movups XMMWORD[48+rsi],xmm5 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_enc_five: - xorps xmm7,xmm7 - call _aesni_encrypt6 - movups XMMWORD[rsi],xmm2 - movups XMMWORD[16+rsi],xmm3 - movups XMMWORD[32+rsi],xmm4 - movups XMMWORD[48+rsi],xmm5 - movups XMMWORD[64+rsi],xmm6 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_enc_six: - call _aesni_encrypt6 - movups XMMWORD[rsi],xmm2 - movups XMMWORD[16+rsi],xmm3 - movups XMMWORD[32+rsi],xmm4 - movups XMMWORD[48+rsi],xmm5 - movups XMMWORD[64+rsi],xmm6 - movups XMMWORD[80+rsi],xmm7 - jmp NEAR $L$ecb_ret - -ALIGN 16 -$L$ecb_decrypt: - cmp rdx,0x80 - jb NEAR $L$ecb_dec_tail - - movdqu xmm2,XMMWORD[rdi] - movdqu xmm3,XMMWORD[16+rdi] - movdqu xmm4,XMMWORD[32+rdi] - movdqu xmm5,XMMWORD[48+rdi] - movdqu xmm6,XMMWORD[64+rdi] - movdqu xmm7,XMMWORD[80+rdi] - movdqu xmm8,XMMWORD[96+rdi] - movdqu xmm9,XMMWORD[112+rdi] - lea rdi,[128+rdi] - sub rdx,0x80 - jmp NEAR $L$ecb_dec_loop8_enter -ALIGN 16 -$L$ecb_dec_loop8: - movups XMMWORD[rsi],xmm2 - mov rcx,r11 - movdqu xmm2,XMMWORD[rdi] - mov eax,r10d - movups XMMWORD[16+rsi],xmm3 - movdqu xmm3,XMMWORD[16+rdi] - movups XMMWORD[32+rsi],xmm4 - movdqu xmm4,XMMWORD[32+rdi] - movups XMMWORD[48+rsi],xmm5 - movdqu xmm5,XMMWORD[48+rdi] - movups XMMWORD[64+rsi],xmm6 - movdqu xmm6,XMMWORD[64+rdi] - movups XMMWORD[80+rsi],xmm7 - movdqu xmm7,XMMWORD[80+rdi] - movups XMMWORD[96+rsi],xmm8 - movdqu xmm8,XMMWORD[96+rdi] - movups XMMWORD[112+rsi],xmm9 - lea rsi,[128+rsi] - movdqu xmm9,XMMWORD[112+rdi] - lea rdi,[128+rdi] -$L$ecb_dec_loop8_enter: - - call _aesni_decrypt8 - - movups xmm0,XMMWORD[r11] - sub rdx,0x80 - jnc NEAR $L$ecb_dec_loop8 - - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - mov rcx,r11 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - mov eax,r10d - movups XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - movups XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - movups XMMWORD[64+rsi],xmm6 - pxor xmm6,xmm6 - movups XMMWORD[80+rsi],xmm7 - pxor xmm7,xmm7 - movups XMMWORD[96+rsi],xmm8 - pxor xmm8,xmm8 - movups XMMWORD[112+rsi],xmm9 - pxor xmm9,xmm9 - lea rsi,[128+rsi] - add rdx,0x80 - jz NEAR $L$ecb_ret - -$L$ecb_dec_tail: - movups xmm2,XMMWORD[rdi] - cmp rdx,0x20 - jb NEAR $L$ecb_dec_one - movups xmm3,XMMWORD[16+rdi] - je NEAR $L$ecb_dec_two - movups xmm4,XMMWORD[32+rdi] - cmp rdx,0x40 - jb NEAR $L$ecb_dec_three - movups xmm5,XMMWORD[48+rdi] - je NEAR $L$ecb_dec_four - movups xmm6,XMMWORD[64+rdi] - cmp rdx,0x60 - jb NEAR $L$ecb_dec_five - movups xmm7,XMMWORD[80+rdi] - je NEAR $L$ecb_dec_six - movups xmm8,XMMWORD[96+rdi] - movups xmm0,XMMWORD[rcx] - xorps xmm9,xmm9 - call _aesni_decrypt8 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - movups XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - movups XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - movups XMMWORD[64+rsi],xmm6 - pxor xmm6,xmm6 - movups XMMWORD[80+rsi],xmm7 - pxor xmm7,xmm7 - movups XMMWORD[96+rsi],xmm8 - pxor xmm8,xmm8 - pxor xmm9,xmm9 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_dec_one: - movups xmm0,XMMWORD[rcx] - movups xmm1,XMMWORD[16+rcx] - lea rcx,[32+rcx] - xorps xmm2,xmm0 -$L$oop_dec1_4: -DB 102,15,56,222,209 - dec eax - movups xmm1,XMMWORD[rcx] - lea rcx,[16+rcx] - jnz NEAR $L$oop_dec1_4 -DB 102,15,56,223,209 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_dec_two: - call _aesni_decrypt2 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_dec_three: - call _aesni_decrypt3 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - movups XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_dec_four: - call _aesni_decrypt4 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - movups XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - movups XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_dec_five: - xorps xmm7,xmm7 - call _aesni_decrypt6 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - movups XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - movups XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - movups XMMWORD[64+rsi],xmm6 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - jmp NEAR $L$ecb_ret -ALIGN 16 -$L$ecb_dec_six: - call _aesni_decrypt6 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - movups XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - movups XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - movups XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - movups XMMWORD[64+rsi],xmm6 - pxor xmm6,xmm6 - movups XMMWORD[80+rsi],xmm7 - pxor xmm7,xmm7 - -$L$ecb_ret: - xorps xmm0,xmm0 - pxor xmm1,xmm1 - movaps xmm6,XMMWORD[rsp] - movaps XMMWORD[rsp],xmm0 - movaps xmm7,XMMWORD[16+rsp] - movaps XMMWORD[16+rsp],xmm0 - movaps xmm8,XMMWORD[32+rsp] - movaps XMMWORD[32+rsp],xmm0 - movaps xmm9,XMMWORD[48+rsp] - movaps XMMWORD[48+rsp],xmm0 - lea rsp,[88+rsp] -$L$ecb_enc_ret: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_hw_ecb_encrypt: -global aes_hw_ctr32_encrypt_blocks - -ALIGN 16 -aes_hw_ctr32_encrypt_blocks: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_hw_ctr32_encrypt_blocks: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - -%ifdef BORINGSSL_DISPATCH_TEST - mov BYTE[BORINGSSL_function_hit],1 -%endif - cmp rdx,1 - jne NEAR $L$ctr32_bulk - - - - movups xmm2,XMMWORD[r8] - movups xmm3,XMMWORD[rdi] - mov edx,DWORD[240+rcx] - movups xmm0,XMMWORD[rcx] - movups xmm1,XMMWORD[16+rcx] - lea rcx,[32+rcx] - xorps xmm2,xmm0 -$L$oop_enc1_5: -DB 102,15,56,220,209 - dec edx - movups xmm1,XMMWORD[rcx] - lea rcx,[16+rcx] - jnz NEAR $L$oop_enc1_5 -DB 102,15,56,221,209 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - xorps xmm2,xmm3 - pxor xmm3,xmm3 - movups XMMWORD[rsi],xmm2 - xorps xmm2,xmm2 - jmp NEAR $L$ctr32_epilogue - -ALIGN 16 -$L$ctr32_bulk: - lea r11,[rsp] - - push rbp - - sub rsp,288 - and rsp,-16 - movaps XMMWORD[(-168)+r11],xmm6 - movaps XMMWORD[(-152)+r11],xmm7 - movaps XMMWORD[(-136)+r11],xmm8 - movaps XMMWORD[(-120)+r11],xmm9 - movaps XMMWORD[(-104)+r11],xmm10 - movaps XMMWORD[(-88)+r11],xmm11 - movaps XMMWORD[(-72)+r11],xmm12 - movaps XMMWORD[(-56)+r11],xmm13 - movaps XMMWORD[(-40)+r11],xmm14 - movaps XMMWORD[(-24)+r11],xmm15 -$L$ctr32_body: - - - - - movdqu xmm2,XMMWORD[r8] - movdqu xmm0,XMMWORD[rcx] - mov r8d,DWORD[12+r8] - pxor xmm2,xmm0 - mov ebp,DWORD[12+rcx] - movdqa XMMWORD[rsp],xmm2 - bswap r8d - movdqa xmm3,xmm2 - movdqa xmm4,xmm2 - movdqa xmm5,xmm2 - movdqa XMMWORD[64+rsp],xmm2 - movdqa XMMWORD[80+rsp],xmm2 - movdqa XMMWORD[96+rsp],xmm2 - mov r10,rdx - movdqa XMMWORD[112+rsp],xmm2 - - lea rax,[1+r8] - lea rdx,[2+r8] - bswap eax - bswap edx - xor eax,ebp - xor edx,ebp -DB 102,15,58,34,216,3 - lea rax,[3+r8] - movdqa XMMWORD[16+rsp],xmm3 -DB 102,15,58,34,226,3 - bswap eax - mov rdx,r10 - lea r10,[4+r8] - movdqa XMMWORD[32+rsp],xmm4 - xor eax,ebp - bswap r10d -DB 102,15,58,34,232,3 - xor r10d,ebp - movdqa XMMWORD[48+rsp],xmm5 - lea r9,[5+r8] - mov DWORD[((64+12))+rsp],r10d - bswap r9d - lea r10,[6+r8] - mov eax,DWORD[240+rcx] - xor r9d,ebp - bswap r10d - mov DWORD[((80+12))+rsp],r9d - xor r10d,ebp - lea r9,[7+r8] - mov DWORD[((96+12))+rsp],r10d - bswap r9d - lea r10,[OPENSSL_ia32cap_P] - mov r10d,DWORD[4+r10] - xor r9d,ebp - and r10d,71303168 - mov DWORD[((112+12))+rsp],r9d - - movups xmm1,XMMWORD[16+rcx] - - movdqa xmm6,XMMWORD[64+rsp] - movdqa xmm7,XMMWORD[80+rsp] - - cmp rdx,8 - jb NEAR $L$ctr32_tail - - sub rdx,6 - cmp r10d,4194304 - je NEAR $L$ctr32_6x - - lea rcx,[128+rcx] - sub rdx,2 - jmp NEAR $L$ctr32_loop8 - -ALIGN 16 -$L$ctr32_6x: - shl eax,4 - mov r10d,48 - bswap ebp - lea rcx,[32+rax*1+rcx] - sub r10,rax - jmp NEAR $L$ctr32_loop6 - -ALIGN 16 -$L$ctr32_loop6: - add r8d,6 - movups xmm0,XMMWORD[((-48))+r10*1+rcx] -DB 102,15,56,220,209 - mov eax,r8d - xor eax,ebp -DB 102,15,56,220,217 -DB 0x0f,0x38,0xf1,0x44,0x24,12 - lea eax,[1+r8] -DB 102,15,56,220,225 - xor eax,ebp -DB 0x0f,0x38,0xf1,0x44,0x24,28 -DB 102,15,56,220,233 - lea eax,[2+r8] - xor eax,ebp -DB 102,15,56,220,241 -DB 0x0f,0x38,0xf1,0x44,0x24,44 - lea eax,[3+r8] -DB 102,15,56,220,249 - movups xmm1,XMMWORD[((-32))+r10*1+rcx] - xor eax,ebp - -DB 102,15,56,220,208 -DB 0x0f,0x38,0xf1,0x44,0x24,60 - lea eax,[4+r8] -DB 102,15,56,220,216 - xor eax,ebp -DB 0x0f,0x38,0xf1,0x44,0x24,76 -DB 102,15,56,220,224 - lea eax,[5+r8] - xor eax,ebp -DB 102,15,56,220,232 -DB 0x0f,0x38,0xf1,0x44,0x24,92 - mov rax,r10 -DB 102,15,56,220,240 -DB 102,15,56,220,248 - movups xmm0,XMMWORD[((-16))+r10*1+rcx] - - call $L$enc_loop6 - - movdqu xmm8,XMMWORD[rdi] - movdqu xmm9,XMMWORD[16+rdi] - movdqu xmm10,XMMWORD[32+rdi] - movdqu xmm11,XMMWORD[48+rdi] - movdqu xmm12,XMMWORD[64+rdi] - movdqu xmm13,XMMWORD[80+rdi] - lea rdi,[96+rdi] - movups xmm1,XMMWORD[((-64))+r10*1+rcx] - pxor xmm8,xmm2 - movaps xmm2,XMMWORD[rsp] - pxor xmm9,xmm3 - movaps xmm3,XMMWORD[16+rsp] - pxor xmm10,xmm4 - movaps xmm4,XMMWORD[32+rsp] - pxor xmm11,xmm5 - movaps xmm5,XMMWORD[48+rsp] - pxor xmm12,xmm6 - movaps xmm6,XMMWORD[64+rsp] - pxor xmm13,xmm7 - movaps xmm7,XMMWORD[80+rsp] - movdqu XMMWORD[rsi],xmm8 - movdqu XMMWORD[16+rsi],xmm9 - movdqu XMMWORD[32+rsi],xmm10 - movdqu XMMWORD[48+rsi],xmm11 - movdqu XMMWORD[64+rsi],xmm12 - movdqu XMMWORD[80+rsi],xmm13 - lea rsi,[96+rsi] - - sub rdx,6 - jnc NEAR $L$ctr32_loop6 - - add rdx,6 - jz NEAR $L$ctr32_done - - lea eax,[((-48))+r10] - lea rcx,[((-80))+r10*1+rcx] - neg eax - shr eax,4 - jmp NEAR $L$ctr32_tail - -ALIGN 32 -$L$ctr32_loop8: - add r8d,8 - movdqa xmm8,XMMWORD[96+rsp] -DB 102,15,56,220,209 - mov r9d,r8d - movdqa xmm9,XMMWORD[112+rsp] -DB 102,15,56,220,217 - bswap r9d - movups xmm0,XMMWORD[((32-128))+rcx] -DB 102,15,56,220,225 - xor r9d,ebp - nop -DB 102,15,56,220,233 - mov DWORD[((0+12))+rsp],r9d - lea r9,[1+r8] -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movups xmm1,XMMWORD[((48-128))+rcx] - bswap r9d -DB 102,15,56,220,208 -DB 102,15,56,220,216 - xor r9d,ebp -DB 0x66,0x90 -DB 102,15,56,220,224 -DB 102,15,56,220,232 - mov DWORD[((16+12))+rsp],r9d - lea r9,[2+r8] -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((64-128))+rcx] - bswap r9d -DB 102,15,56,220,209 -DB 102,15,56,220,217 - xor r9d,ebp -DB 0x66,0x90 -DB 102,15,56,220,225 -DB 102,15,56,220,233 - mov DWORD[((32+12))+rsp],r9d - lea r9,[3+r8] -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movups xmm1,XMMWORD[((80-128))+rcx] - bswap r9d -DB 102,15,56,220,208 -DB 102,15,56,220,216 - xor r9d,ebp -DB 0x66,0x90 -DB 102,15,56,220,224 -DB 102,15,56,220,232 - mov DWORD[((48+12))+rsp],r9d - lea r9,[4+r8] -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((96-128))+rcx] - bswap r9d -DB 102,15,56,220,209 -DB 102,15,56,220,217 - xor r9d,ebp -DB 0x66,0x90 -DB 102,15,56,220,225 -DB 102,15,56,220,233 - mov DWORD[((64+12))+rsp],r9d - lea r9,[5+r8] -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movups xmm1,XMMWORD[((112-128))+rcx] - bswap r9d -DB 102,15,56,220,208 -DB 102,15,56,220,216 - xor r9d,ebp -DB 0x66,0x90 -DB 102,15,56,220,224 -DB 102,15,56,220,232 - mov DWORD[((80+12))+rsp],r9d - lea r9,[6+r8] -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((128-128))+rcx] - bswap r9d -DB 102,15,56,220,209 -DB 102,15,56,220,217 - xor r9d,ebp -DB 0x66,0x90 -DB 102,15,56,220,225 -DB 102,15,56,220,233 - mov DWORD[((96+12))+rsp],r9d - lea r9,[7+r8] -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movups xmm1,XMMWORD[((144-128))+rcx] - bswap r9d -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 - xor r9d,ebp - movdqu xmm10,XMMWORD[rdi] -DB 102,15,56,220,232 - mov DWORD[((112+12))+rsp],r9d - cmp eax,11 -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((160-128))+rcx] - - jb NEAR $L$ctr32_enc_done - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movups xmm1,XMMWORD[((176-128))+rcx] - -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 -DB 102,15,56,220,232 -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((192-128))+rcx] - je NEAR $L$ctr32_enc_done - -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movups xmm1,XMMWORD[((208-128))+rcx] - -DB 102,15,56,220,208 -DB 102,15,56,220,216 -DB 102,15,56,220,224 -DB 102,15,56,220,232 -DB 102,15,56,220,240 -DB 102,15,56,220,248 -DB 102,68,15,56,220,192 -DB 102,68,15,56,220,200 - movups xmm0,XMMWORD[((224-128))+rcx] - jmp NEAR $L$ctr32_enc_done - -ALIGN 16 -$L$ctr32_enc_done: - movdqu xmm11,XMMWORD[16+rdi] - pxor xmm10,xmm0 - movdqu xmm12,XMMWORD[32+rdi] - pxor xmm11,xmm0 - movdqu xmm13,XMMWORD[48+rdi] - pxor xmm12,xmm0 - movdqu xmm14,XMMWORD[64+rdi] - pxor xmm13,xmm0 - movdqu xmm15,XMMWORD[80+rdi] - pxor xmm14,xmm0 - pxor xmm15,xmm0 -DB 102,15,56,220,209 -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 -DB 102,15,56,220,241 -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 -DB 102,68,15,56,220,201 - movdqu xmm1,XMMWORD[96+rdi] - lea rdi,[128+rdi] - -DB 102,65,15,56,221,210 - pxor xmm1,xmm0 - movdqu xmm10,XMMWORD[((112-128))+rdi] -DB 102,65,15,56,221,219 - pxor xmm10,xmm0 - movdqa xmm11,XMMWORD[rsp] -DB 102,65,15,56,221,228 -DB 102,65,15,56,221,237 - movdqa xmm12,XMMWORD[16+rsp] - movdqa xmm13,XMMWORD[32+rsp] -DB 102,65,15,56,221,246 -DB 102,65,15,56,221,255 - movdqa xmm14,XMMWORD[48+rsp] - movdqa xmm15,XMMWORD[64+rsp] -DB 102,68,15,56,221,193 - movdqa xmm0,XMMWORD[80+rsp] - movups xmm1,XMMWORD[((16-128))+rcx] -DB 102,69,15,56,221,202 - - movups XMMWORD[rsi],xmm2 - movdqa xmm2,xmm11 - movups XMMWORD[16+rsi],xmm3 - movdqa xmm3,xmm12 - movups XMMWORD[32+rsi],xmm4 - movdqa xmm4,xmm13 - movups XMMWORD[48+rsi],xmm5 - movdqa xmm5,xmm14 - movups XMMWORD[64+rsi],xmm6 - movdqa xmm6,xmm15 - movups XMMWORD[80+rsi],xmm7 - movdqa xmm7,xmm0 - movups XMMWORD[96+rsi],xmm8 - movups XMMWORD[112+rsi],xmm9 - lea rsi,[128+rsi] - - sub rdx,8 - jnc NEAR $L$ctr32_loop8 - - add rdx,8 - jz NEAR $L$ctr32_done - lea rcx,[((-128))+rcx] - -$L$ctr32_tail: - - - lea rcx,[16+rcx] - cmp rdx,4 - jb NEAR $L$ctr32_loop3 - je NEAR $L$ctr32_loop4 - - - shl eax,4 - movdqa xmm8,XMMWORD[96+rsp] - pxor xmm9,xmm9 - - movups xmm0,XMMWORD[16+rcx] -DB 102,15,56,220,209 -DB 102,15,56,220,217 - lea rcx,[((32-16))+rax*1+rcx] - neg rax -DB 102,15,56,220,225 - add rax,16 - movups xmm10,XMMWORD[rdi] -DB 102,15,56,220,233 -DB 102,15,56,220,241 - movups xmm11,XMMWORD[16+rdi] - movups xmm12,XMMWORD[32+rdi] -DB 102,15,56,220,249 -DB 102,68,15,56,220,193 - - call $L$enc_loop8_enter - - movdqu xmm13,XMMWORD[48+rdi] - pxor xmm2,xmm10 - movdqu xmm10,XMMWORD[64+rdi] - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm5,xmm13 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm6,xmm10 - movdqu XMMWORD[48+rsi],xmm5 - movdqu XMMWORD[64+rsi],xmm6 - cmp rdx,6 - jb NEAR $L$ctr32_done - - movups xmm11,XMMWORD[80+rdi] - xorps xmm7,xmm11 - movups XMMWORD[80+rsi],xmm7 - je NEAR $L$ctr32_done - - movups xmm12,XMMWORD[96+rdi] - xorps xmm8,xmm12 - movups XMMWORD[96+rsi],xmm8 - jmp NEAR $L$ctr32_done - -ALIGN 32 -$L$ctr32_loop4: -DB 102,15,56,220,209 - lea rcx,[16+rcx] - dec eax -DB 102,15,56,220,217 -DB 102,15,56,220,225 -DB 102,15,56,220,233 - movups xmm1,XMMWORD[rcx] - jnz NEAR $L$ctr32_loop4 -DB 102,15,56,221,209 -DB 102,15,56,221,217 - movups xmm10,XMMWORD[rdi] - movups xmm11,XMMWORD[16+rdi] -DB 102,15,56,221,225 -DB 102,15,56,221,233 - movups xmm12,XMMWORD[32+rdi] - movups xmm13,XMMWORD[48+rdi] - - xorps xmm2,xmm10 - movups XMMWORD[rsi],xmm2 - xorps xmm3,xmm11 - movups XMMWORD[16+rsi],xmm3 - pxor xmm4,xmm12 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm5,xmm13 - movdqu XMMWORD[48+rsi],xmm5 - jmp NEAR $L$ctr32_done - -ALIGN 32 -$L$ctr32_loop3: -DB 102,15,56,220,209 - lea rcx,[16+rcx] - dec eax -DB 102,15,56,220,217 -DB 102,15,56,220,225 - movups xmm1,XMMWORD[rcx] - jnz NEAR $L$ctr32_loop3 -DB 102,15,56,221,209 -DB 102,15,56,221,217 -DB 102,15,56,221,225 - - movups xmm10,XMMWORD[rdi] - xorps xmm2,xmm10 - movups XMMWORD[rsi],xmm2 - cmp rdx,2 - jb NEAR $L$ctr32_done - - movups xmm11,XMMWORD[16+rdi] - xorps xmm3,xmm11 - movups XMMWORD[16+rsi],xmm3 - je NEAR $L$ctr32_done - - movups xmm12,XMMWORD[32+rdi] - xorps xmm4,xmm12 - movups XMMWORD[32+rsi],xmm4 - -$L$ctr32_done: - xorps xmm0,xmm0 - xor ebp,ebp - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - movaps xmm6,XMMWORD[((-168))+r11] - movaps XMMWORD[(-168)+r11],xmm0 - movaps xmm7,XMMWORD[((-152))+r11] - movaps XMMWORD[(-152)+r11],xmm0 - movaps xmm8,XMMWORD[((-136))+r11] - movaps XMMWORD[(-136)+r11],xmm0 - movaps xmm9,XMMWORD[((-120))+r11] - movaps XMMWORD[(-120)+r11],xmm0 - movaps xmm10,XMMWORD[((-104))+r11] - movaps XMMWORD[(-104)+r11],xmm0 - movaps xmm11,XMMWORD[((-88))+r11] - movaps XMMWORD[(-88)+r11],xmm0 - movaps xmm12,XMMWORD[((-72))+r11] - movaps XMMWORD[(-72)+r11],xmm0 - movaps xmm13,XMMWORD[((-56))+r11] - movaps XMMWORD[(-56)+r11],xmm0 - movaps xmm14,XMMWORD[((-40))+r11] - movaps XMMWORD[(-40)+r11],xmm0 - movaps xmm15,XMMWORD[((-24))+r11] - movaps XMMWORD[(-24)+r11],xmm0 - movaps XMMWORD[rsp],xmm0 - movaps XMMWORD[16+rsp],xmm0 - movaps XMMWORD[32+rsp],xmm0 - movaps XMMWORD[48+rsp],xmm0 - movaps XMMWORD[64+rsp],xmm0 - movaps XMMWORD[80+rsp],xmm0 - movaps XMMWORD[96+rsp],xmm0 - movaps XMMWORD[112+rsp],xmm0 - mov rbp,QWORD[((-8))+r11] - - lea rsp,[r11] - -$L$ctr32_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_hw_ctr32_encrypt_blocks: -global aes_hw_cbc_encrypt - -ALIGN 16 -aes_hw_cbc_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_aes_hw_cbc_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - test rdx,rdx - jz NEAR $L$cbc_ret - - mov r10d,DWORD[240+rcx] - mov r11,rcx - test r9d,r9d - jz NEAR $L$cbc_decrypt - - movups xmm2,XMMWORD[r8] - mov eax,r10d - cmp rdx,16 - jb NEAR $L$cbc_enc_tail - sub rdx,16 - jmp NEAR $L$cbc_enc_loop -ALIGN 16 -$L$cbc_enc_loop: - movups xmm3,XMMWORD[rdi] - lea rdi,[16+rdi] - - movups xmm0,XMMWORD[rcx] - movups xmm1,XMMWORD[16+rcx] - xorps xmm3,xmm0 - lea rcx,[32+rcx] - xorps xmm2,xmm3 -$L$oop_enc1_6: -DB 102,15,56,220,209 - dec eax - movups xmm1,XMMWORD[rcx] - lea rcx,[16+rcx] - jnz NEAR $L$oop_enc1_6 -DB 102,15,56,221,209 - mov eax,r10d - mov rcx,r11 - movups XMMWORD[rsi],xmm2 - lea rsi,[16+rsi] - sub rdx,16 - jnc NEAR $L$cbc_enc_loop - add rdx,16 - jnz NEAR $L$cbc_enc_tail - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movups XMMWORD[r8],xmm2 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - jmp NEAR $L$cbc_ret - -$L$cbc_enc_tail: - mov rcx,rdx - xchg rsi,rdi - DD 0x9066A4F3 - mov ecx,16 - sub rcx,rdx - xor eax,eax - DD 0x9066AAF3 - lea rdi,[((-16))+rdi] - mov eax,r10d - mov rsi,rdi - mov rcx,r11 - xor rdx,rdx - jmp NEAR $L$cbc_enc_loop - -ALIGN 16 -$L$cbc_decrypt: - cmp rdx,16 - jne NEAR $L$cbc_decrypt_bulk - - - - movdqu xmm2,XMMWORD[rdi] - movdqu xmm3,XMMWORD[r8] - movdqa xmm4,xmm2 - movups xmm0,XMMWORD[rcx] - movups xmm1,XMMWORD[16+rcx] - lea rcx,[32+rcx] - xorps xmm2,xmm0 -$L$oop_dec1_7: -DB 102,15,56,222,209 - dec r10d - movups xmm1,XMMWORD[rcx] - lea rcx,[16+rcx] - jnz NEAR $L$oop_dec1_7 -DB 102,15,56,223,209 - pxor xmm0,xmm0 - pxor xmm1,xmm1 - movdqu XMMWORD[r8],xmm4 - xorps xmm2,xmm3 - pxor xmm3,xmm3 - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - jmp NEAR $L$cbc_ret -ALIGN 16 -$L$cbc_decrypt_bulk: - lea r11,[rsp] - - push rbp - - sub rsp,176 - and rsp,-16 - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$cbc_decrypt_body: - mov rbp,rcx - movups xmm10,XMMWORD[r8] - mov eax,r10d - cmp rdx,0x50 - jbe NEAR $L$cbc_dec_tail - - movups xmm0,XMMWORD[rcx] - movdqu xmm2,XMMWORD[rdi] - movdqu xmm3,XMMWORD[16+rdi] - movdqa xmm11,xmm2 - movdqu xmm4,XMMWORD[32+rdi] - movdqa xmm12,xmm3 - movdqu xmm5,XMMWORD[48+rdi] - movdqa xmm13,xmm4 - movdqu xmm6,XMMWORD[64+rdi] - movdqa xmm14,xmm5 - movdqu xmm7,XMMWORD[80+rdi] - movdqa xmm15,xmm6 - lea r9,[OPENSSL_ia32cap_P] - mov r9d,DWORD[4+r9] - cmp rdx,0x70 - jbe NEAR $L$cbc_dec_six_or_seven - - and r9d,71303168 - sub rdx,0x50 - cmp r9d,4194304 - je NEAR $L$cbc_dec_loop6_enter - sub rdx,0x20 - lea rcx,[112+rcx] - jmp NEAR $L$cbc_dec_loop8_enter -ALIGN 16 -$L$cbc_dec_loop8: - movups XMMWORD[rsi],xmm9 - lea rsi,[16+rsi] -$L$cbc_dec_loop8_enter: - movdqu xmm8,XMMWORD[96+rdi] - pxor xmm2,xmm0 - movdqu xmm9,XMMWORD[112+rdi] - pxor xmm3,xmm0 - movups xmm1,XMMWORD[((16-112))+rcx] - pxor xmm4,xmm0 - mov rbp,-1 - cmp rdx,0x70 - pxor xmm5,xmm0 - pxor xmm6,xmm0 - pxor xmm7,xmm0 - pxor xmm8,xmm0 - -DB 102,15,56,222,209 - pxor xmm9,xmm0 - movups xmm0,XMMWORD[((32-112))+rcx] -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 - adc rbp,0 - and rbp,128 -DB 102,68,15,56,222,201 - add rbp,rdi - movups xmm1,XMMWORD[((48-112))+rcx] -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((64-112))+rcx] - nop -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 - movups xmm1,XMMWORD[((80-112))+rcx] - nop -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((96-112))+rcx] - nop -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 - movups xmm1,XMMWORD[((112-112))+rcx] - nop -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((128-112))+rcx] - nop -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 - movups xmm1,XMMWORD[((144-112))+rcx] - cmp eax,11 -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((160-112))+rcx] - jb NEAR $L$cbc_dec_done -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 - movups xmm1,XMMWORD[((176-112))+rcx] - nop -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((192-112))+rcx] - je NEAR $L$cbc_dec_done -DB 102,15,56,222,209 -DB 102,15,56,222,217 -DB 102,15,56,222,225 -DB 102,15,56,222,233 -DB 102,15,56,222,241 -DB 102,15,56,222,249 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 - movups xmm1,XMMWORD[((208-112))+rcx] - nop -DB 102,15,56,222,208 -DB 102,15,56,222,216 -DB 102,15,56,222,224 -DB 102,15,56,222,232 -DB 102,15,56,222,240 -DB 102,15,56,222,248 -DB 102,68,15,56,222,192 -DB 102,68,15,56,222,200 - movups xmm0,XMMWORD[((224-112))+rcx] - jmp NEAR $L$cbc_dec_done -ALIGN 16 -$L$cbc_dec_done: -DB 102,15,56,222,209 -DB 102,15,56,222,217 - pxor xmm10,xmm0 - pxor xmm11,xmm0 -DB 102,15,56,222,225 -DB 102,15,56,222,233 - pxor xmm12,xmm0 - pxor xmm13,xmm0 -DB 102,15,56,222,241 -DB 102,15,56,222,249 - pxor xmm14,xmm0 - pxor xmm15,xmm0 -DB 102,68,15,56,222,193 -DB 102,68,15,56,222,201 - movdqu xmm1,XMMWORD[80+rdi] - -DB 102,65,15,56,223,210 - movdqu xmm10,XMMWORD[96+rdi] - pxor xmm1,xmm0 -DB 102,65,15,56,223,219 - pxor xmm10,xmm0 - movdqu xmm0,XMMWORD[112+rdi] -DB 102,65,15,56,223,228 - lea rdi,[128+rdi] - movdqu xmm11,XMMWORD[rbp] -DB 102,65,15,56,223,237 -DB 102,65,15,56,223,246 - movdqu xmm12,XMMWORD[16+rbp] - movdqu xmm13,XMMWORD[32+rbp] -DB 102,65,15,56,223,255 -DB 102,68,15,56,223,193 - movdqu xmm14,XMMWORD[48+rbp] - movdqu xmm15,XMMWORD[64+rbp] -DB 102,69,15,56,223,202 - movdqa xmm10,xmm0 - movdqu xmm1,XMMWORD[80+rbp] - movups xmm0,XMMWORD[((-112))+rcx] - - movups XMMWORD[rsi],xmm2 - movdqa xmm2,xmm11 - movups XMMWORD[16+rsi],xmm3 - movdqa xmm3,xmm12 - movups XMMWORD[32+rsi],xmm4 - movdqa xmm4,xmm13 - movups XMMWORD[48+rsi],xmm5 - movdqa xmm5,xmm14 - movups XMMWORD[64+rsi],xmm6 - movdqa xmm6,xmm15 - movups XMMWORD[80+rsi],xmm7 - movdqa xmm7,xmm1 - movups XMMWORD[96+rsi],xmm8 - lea rsi,[112+rsi] - - sub rdx,0x80 - ja NEAR $L$cbc_dec_loop8 - - movaps xmm2,xmm9 - lea rcx,[((-112))+rcx] - add rdx,0x70 - jle NEAR $L$cbc_dec_clear_tail_collected - movups XMMWORD[rsi],xmm9 - lea rsi,[16+rsi] - cmp rdx,0x50 - jbe NEAR $L$cbc_dec_tail - - movaps xmm2,xmm11 -$L$cbc_dec_six_or_seven: - cmp rdx,0x60 - ja NEAR $L$cbc_dec_seven - - movaps xmm8,xmm7 - call _aesni_decrypt6 - pxor xmm2,xmm10 - movaps xmm10,xmm8 - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - pxor xmm5,xmm13 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - pxor xmm6,xmm14 - movdqu XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - pxor xmm7,xmm15 - movdqu XMMWORD[64+rsi],xmm6 - pxor xmm6,xmm6 - lea rsi,[80+rsi] - movdqa xmm2,xmm7 - pxor xmm7,xmm7 - jmp NEAR $L$cbc_dec_tail_collected - -ALIGN 16 -$L$cbc_dec_seven: - movups xmm8,XMMWORD[96+rdi] - xorps xmm9,xmm9 - call _aesni_decrypt8 - movups xmm9,XMMWORD[80+rdi] - pxor xmm2,xmm10 - movups xmm10,XMMWORD[96+rdi] - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - pxor xmm5,xmm13 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - pxor xmm6,xmm14 - movdqu XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - pxor xmm7,xmm15 - movdqu XMMWORD[64+rsi],xmm6 - pxor xmm6,xmm6 - pxor xmm8,xmm9 - movdqu XMMWORD[80+rsi],xmm7 - pxor xmm7,xmm7 - lea rsi,[96+rsi] - movdqa xmm2,xmm8 - pxor xmm8,xmm8 - pxor xmm9,xmm9 - jmp NEAR $L$cbc_dec_tail_collected - -ALIGN 16 -$L$cbc_dec_loop6: - movups XMMWORD[rsi],xmm7 - lea rsi,[16+rsi] - movdqu xmm2,XMMWORD[rdi] - movdqu xmm3,XMMWORD[16+rdi] - movdqa xmm11,xmm2 - movdqu xmm4,XMMWORD[32+rdi] - movdqa xmm12,xmm3 - movdqu xmm5,XMMWORD[48+rdi] - movdqa xmm13,xmm4 - movdqu xmm6,XMMWORD[64+rdi] - movdqa xmm14,xmm5 - movdqu xmm7,XMMWORD[80+rdi] - movdqa xmm15,xmm6 -$L$cbc_dec_loop6_enter: - lea rdi,[96+rdi] - movdqa xmm8,xmm7 - - call _aesni_decrypt6 - - pxor xmm2,xmm10 - movdqa xmm10,xmm8 - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm5,xmm13 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm6,xmm14 - mov rcx,rbp - movdqu XMMWORD[48+rsi],xmm5 - pxor xmm7,xmm15 - mov eax,r10d - movdqu XMMWORD[64+rsi],xmm6 - lea rsi,[80+rsi] - sub rdx,0x60 - ja NEAR $L$cbc_dec_loop6 - - movdqa xmm2,xmm7 - add rdx,0x50 - jle NEAR $L$cbc_dec_clear_tail_collected - movups XMMWORD[rsi],xmm7 - lea rsi,[16+rsi] - -$L$cbc_dec_tail: - movups xmm2,XMMWORD[rdi] - sub rdx,0x10 - jbe NEAR $L$cbc_dec_one - - movups xmm3,XMMWORD[16+rdi] - movaps xmm11,xmm2 - sub rdx,0x10 - jbe NEAR $L$cbc_dec_two - - movups xmm4,XMMWORD[32+rdi] - movaps xmm12,xmm3 - sub rdx,0x10 - jbe NEAR $L$cbc_dec_three - - movups xmm5,XMMWORD[48+rdi] - movaps xmm13,xmm4 - sub rdx,0x10 - jbe NEAR $L$cbc_dec_four - - movups xmm6,XMMWORD[64+rdi] - movaps xmm14,xmm5 - movaps xmm15,xmm6 - xorps xmm7,xmm7 - call _aesni_decrypt6 - pxor xmm2,xmm10 - movaps xmm10,xmm15 - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - pxor xmm5,xmm13 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - pxor xmm6,xmm14 - movdqu XMMWORD[48+rsi],xmm5 - pxor xmm5,xmm5 - lea rsi,[64+rsi] - movdqa xmm2,xmm6 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - sub rdx,0x10 - jmp NEAR $L$cbc_dec_tail_collected - -ALIGN 16 -$L$cbc_dec_one: - movaps xmm11,xmm2 - movups xmm0,XMMWORD[rcx] - movups xmm1,XMMWORD[16+rcx] - lea rcx,[32+rcx] - xorps xmm2,xmm0 -$L$oop_dec1_8: -DB 102,15,56,222,209 - dec eax - movups xmm1,XMMWORD[rcx] - lea rcx,[16+rcx] - jnz NEAR $L$oop_dec1_8 -DB 102,15,56,223,209 - xorps xmm2,xmm10 - movaps xmm10,xmm11 - jmp NEAR $L$cbc_dec_tail_collected -ALIGN 16 -$L$cbc_dec_two: - movaps xmm12,xmm3 - call _aesni_decrypt2 - pxor xmm2,xmm10 - movaps xmm10,xmm12 - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - movdqa xmm2,xmm3 - pxor xmm3,xmm3 - lea rsi,[16+rsi] - jmp NEAR $L$cbc_dec_tail_collected -ALIGN 16 -$L$cbc_dec_three: - movaps xmm13,xmm4 - call _aesni_decrypt3 - pxor xmm2,xmm10 - movaps xmm10,xmm13 - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - movdqa xmm2,xmm4 - pxor xmm4,xmm4 - lea rsi,[32+rsi] - jmp NEAR $L$cbc_dec_tail_collected -ALIGN 16 -$L$cbc_dec_four: - movaps xmm14,xmm5 - call _aesni_decrypt4 - pxor xmm2,xmm10 - movaps xmm10,xmm14 - pxor xmm3,xmm11 - movdqu XMMWORD[rsi],xmm2 - pxor xmm4,xmm12 - movdqu XMMWORD[16+rsi],xmm3 - pxor xmm3,xmm3 - pxor xmm5,xmm13 - movdqu XMMWORD[32+rsi],xmm4 - pxor xmm4,xmm4 - movdqa xmm2,xmm5 - pxor xmm5,xmm5 - lea rsi,[48+rsi] - jmp NEAR $L$cbc_dec_tail_collected - -ALIGN 16 -$L$cbc_dec_clear_tail_collected: - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 -$L$cbc_dec_tail_collected: - movups XMMWORD[r8],xmm10 - and rdx,15 - jnz NEAR $L$cbc_dec_tail_partial - movups XMMWORD[rsi],xmm2 - pxor xmm2,xmm2 - jmp NEAR $L$cbc_dec_ret -ALIGN 16 -$L$cbc_dec_tail_partial: - movaps XMMWORD[rsp],xmm2 - pxor xmm2,xmm2 - mov rcx,16 - mov rdi,rsi - sub rcx,rdx - lea rsi,[rsp] - DD 0x9066A4F3 - movdqa XMMWORD[rsp],xmm2 - -$L$cbc_dec_ret: - xorps xmm0,xmm0 - pxor xmm1,xmm1 - movaps xmm6,XMMWORD[16+rsp] - movaps XMMWORD[16+rsp],xmm0 - movaps xmm7,XMMWORD[32+rsp] - movaps XMMWORD[32+rsp],xmm0 - movaps xmm8,XMMWORD[48+rsp] - movaps XMMWORD[48+rsp],xmm0 - movaps xmm9,XMMWORD[64+rsp] - movaps XMMWORD[64+rsp],xmm0 - movaps xmm10,XMMWORD[80+rsp] - movaps XMMWORD[80+rsp],xmm0 - movaps xmm11,XMMWORD[96+rsp] - movaps XMMWORD[96+rsp],xmm0 - movaps xmm12,XMMWORD[112+rsp] - movaps XMMWORD[112+rsp],xmm0 - movaps xmm13,XMMWORD[128+rsp] - movaps XMMWORD[128+rsp],xmm0 - movaps xmm14,XMMWORD[144+rsp] - movaps XMMWORD[144+rsp],xmm0 - movaps xmm15,XMMWORD[160+rsp] - movaps XMMWORD[160+rsp],xmm0 - mov rbp,QWORD[((-8))+r11] - - lea rsp,[r11] - -$L$cbc_ret: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_aes_hw_cbc_encrypt: -global aes_hw_set_decrypt_key - -ALIGN 16 -aes_hw_set_decrypt_key: - -DB 0x48,0x83,0xEC,0x08 - - call __aesni_set_encrypt_key - shl edx,4 - test eax,eax - jnz NEAR $L$dec_key_ret - lea rcx,[16+rdx*1+r8] - - movups xmm0,XMMWORD[r8] - movups xmm1,XMMWORD[rcx] - movups XMMWORD[rcx],xmm0 - movups XMMWORD[r8],xmm1 - lea r8,[16+r8] - lea rcx,[((-16))+rcx] - -$L$dec_key_inverse: - movups xmm0,XMMWORD[r8] - movups xmm1,XMMWORD[rcx] -DB 102,15,56,219,192 -DB 102,15,56,219,201 - lea r8,[16+r8] - lea rcx,[((-16))+rcx] - movups XMMWORD[16+rcx],xmm0 - movups XMMWORD[(-16)+r8],xmm1 - cmp rcx,r8 - ja NEAR $L$dec_key_inverse - - movups xmm0,XMMWORD[r8] -DB 102,15,56,219,192 - pxor xmm1,xmm1 - movups XMMWORD[rcx],xmm0 - pxor xmm0,xmm0 -$L$dec_key_ret: - add rsp,8 - - DB 0F3h,0C3h ;repret - -$L$SEH_end_set_decrypt_key: - -global aes_hw_set_encrypt_key - -ALIGN 16 -aes_hw_set_encrypt_key: -__aesni_set_encrypt_key: - -%ifdef BORINGSSL_DISPATCH_TEST - mov BYTE[((BORINGSSL_function_hit+3))],1 -%endif -DB 0x48,0x83,0xEC,0x08 - - mov rax,-1 - test rcx,rcx - jz NEAR $L$enc_key_ret - test r8,r8 - jz NEAR $L$enc_key_ret - - movups xmm0,XMMWORD[rcx] - xorps xmm4,xmm4 - lea r10,[OPENSSL_ia32cap_P] - mov r10d,DWORD[4+r10] - and r10d,268437504 - lea rax,[16+r8] - cmp edx,256 - je NEAR $L$14rounds - cmp edx,192 - je NEAR $L$12rounds - cmp edx,128 - jne NEAR $L$bad_keybits - -$L$10rounds: - mov edx,9 - cmp r10d,268435456 - je NEAR $L$10rounds_alt - - movups XMMWORD[r8],xmm0 -DB 102,15,58,223,200,1 - call $L$key_expansion_128_cold -DB 102,15,58,223,200,2 - call $L$key_expansion_128 -DB 102,15,58,223,200,4 - call $L$key_expansion_128 -DB 102,15,58,223,200,8 - call $L$key_expansion_128 -DB 102,15,58,223,200,16 - call $L$key_expansion_128 -DB 102,15,58,223,200,32 - call $L$key_expansion_128 -DB 102,15,58,223,200,64 - call $L$key_expansion_128 -DB 102,15,58,223,200,128 - call $L$key_expansion_128 -DB 102,15,58,223,200,27 - call $L$key_expansion_128 -DB 102,15,58,223,200,54 - call $L$key_expansion_128 - movups XMMWORD[rax],xmm0 - mov DWORD[80+rax],edx - xor eax,eax - jmp NEAR $L$enc_key_ret - -ALIGN 16 -$L$10rounds_alt: - movdqa xmm5,XMMWORD[$L$key_rotate] - mov r10d,8 - movdqa xmm4,XMMWORD[$L$key_rcon1] - movdqa xmm2,xmm0 - movdqu XMMWORD[r8],xmm0 - jmp NEAR $L$oop_key128 - -ALIGN 16 -$L$oop_key128: -DB 102,15,56,0,197 -DB 102,15,56,221,196 - pslld xmm4,1 - lea rax,[16+rax] - - movdqa xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm2,xmm3 - - pxor xmm0,xmm2 - movdqu XMMWORD[(-16)+rax],xmm0 - movdqa xmm2,xmm0 - - dec r10d - jnz NEAR $L$oop_key128 - - movdqa xmm4,XMMWORD[$L$key_rcon1b] - -DB 102,15,56,0,197 -DB 102,15,56,221,196 - pslld xmm4,1 - - movdqa xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm2,xmm3 - - pxor xmm0,xmm2 - movdqu XMMWORD[rax],xmm0 - - movdqa xmm2,xmm0 -DB 102,15,56,0,197 -DB 102,15,56,221,196 - - movdqa xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm3,xmm2 - pslldq xmm2,4 - pxor xmm2,xmm3 - - pxor xmm0,xmm2 - movdqu XMMWORD[16+rax],xmm0 - - mov DWORD[96+rax],edx - xor eax,eax - jmp NEAR $L$enc_key_ret - -ALIGN 16 -$L$12rounds: - movq xmm2,QWORD[16+rcx] - mov edx,11 - cmp r10d,268435456 - je NEAR $L$12rounds_alt - - movups XMMWORD[r8],xmm0 -DB 102,15,58,223,202,1 - call $L$key_expansion_192a_cold -DB 102,15,58,223,202,2 - call $L$key_expansion_192b -DB 102,15,58,223,202,4 - call $L$key_expansion_192a -DB 102,15,58,223,202,8 - call $L$key_expansion_192b -DB 102,15,58,223,202,16 - call $L$key_expansion_192a -DB 102,15,58,223,202,32 - call $L$key_expansion_192b -DB 102,15,58,223,202,64 - call $L$key_expansion_192a -DB 102,15,58,223,202,128 - call $L$key_expansion_192b - movups XMMWORD[rax],xmm0 - mov DWORD[48+rax],edx - xor rax,rax - jmp NEAR $L$enc_key_ret - -ALIGN 16 -$L$12rounds_alt: - movdqa xmm5,XMMWORD[$L$key_rotate192] - movdqa xmm4,XMMWORD[$L$key_rcon1] - mov r10d,8 - movdqu XMMWORD[r8],xmm0 - jmp NEAR $L$oop_key192 - -ALIGN 16 -$L$oop_key192: - movq QWORD[rax],xmm2 - movdqa xmm1,xmm2 -DB 102,15,56,0,213 -DB 102,15,56,221,212 - pslld xmm4,1 - lea rax,[24+rax] - - movdqa xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm0,xmm3 - - pshufd xmm3,xmm0,0xff - pxor xmm3,xmm1 - pslldq xmm1,4 - pxor xmm3,xmm1 - - pxor xmm0,xmm2 - pxor xmm2,xmm3 - movdqu XMMWORD[(-16)+rax],xmm0 - - dec r10d - jnz NEAR $L$oop_key192 - - mov DWORD[32+rax],edx - xor eax,eax - jmp NEAR $L$enc_key_ret - -ALIGN 16 -$L$14rounds: - movups xmm2,XMMWORD[16+rcx] - mov edx,13 - lea rax,[16+rax] - cmp r10d,268435456 - je NEAR $L$14rounds_alt - - movups XMMWORD[r8],xmm0 - movups XMMWORD[16+r8],xmm2 -DB 102,15,58,223,202,1 - call $L$key_expansion_256a_cold -DB 102,15,58,223,200,1 - call $L$key_expansion_256b -DB 102,15,58,223,202,2 - call $L$key_expansion_256a -DB 102,15,58,223,200,2 - call $L$key_expansion_256b -DB 102,15,58,223,202,4 - call $L$key_expansion_256a -DB 102,15,58,223,200,4 - call $L$key_expansion_256b -DB 102,15,58,223,202,8 - call $L$key_expansion_256a -DB 102,15,58,223,200,8 - call $L$key_expansion_256b -DB 102,15,58,223,202,16 - call $L$key_expansion_256a -DB 102,15,58,223,200,16 - call $L$key_expansion_256b -DB 102,15,58,223,202,32 - call $L$key_expansion_256a -DB 102,15,58,223,200,32 - call $L$key_expansion_256b -DB 102,15,58,223,202,64 - call $L$key_expansion_256a - movups XMMWORD[rax],xmm0 - mov DWORD[16+rax],edx - xor rax,rax - jmp NEAR $L$enc_key_ret - -ALIGN 16 -$L$14rounds_alt: - movdqa xmm5,XMMWORD[$L$key_rotate] - movdqa xmm4,XMMWORD[$L$key_rcon1] - mov r10d,7 - movdqu XMMWORD[r8],xmm0 - movdqa xmm1,xmm2 - movdqu XMMWORD[16+r8],xmm2 - jmp NEAR $L$oop_key256 - -ALIGN 16 -$L$oop_key256: -DB 102,15,56,0,213 -DB 102,15,56,221,212 - - movdqa xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm3,xmm0 - pslldq xmm0,4 - pxor xmm0,xmm3 - pslld xmm4,1 - - pxor xmm0,xmm2 - movdqu XMMWORD[rax],xmm0 - - dec r10d - jz NEAR $L$done_key256 - - pshufd xmm2,xmm0,0xff - pxor xmm3,xmm3 -DB 102,15,56,221,211 - - movdqa xmm3,xmm1 - pslldq xmm1,4 - pxor xmm3,xmm1 - pslldq xmm1,4 - pxor xmm3,xmm1 - pslldq xmm1,4 - pxor xmm1,xmm3 - - pxor xmm2,xmm1 - movdqu XMMWORD[16+rax],xmm2 - lea rax,[32+rax] - movdqa xmm1,xmm2 - - jmp NEAR $L$oop_key256 - -$L$done_key256: - mov DWORD[16+rax],edx - xor eax,eax - jmp NEAR $L$enc_key_ret - -ALIGN 16 -$L$bad_keybits: - mov rax,-2 -$L$enc_key_ret: - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - add rsp,8 - - DB 0F3h,0C3h ;repret - -$L$SEH_end_set_encrypt_key: - -ALIGN 16 -$L$key_expansion_128: - movups XMMWORD[rax],xmm0 - lea rax,[16+rax] -$L$key_expansion_128_cold: - shufps xmm4,xmm0,16 - xorps xmm0,xmm4 - shufps xmm4,xmm0,140 - xorps xmm0,xmm4 - shufps xmm1,xmm1,255 - xorps xmm0,xmm1 - DB 0F3h,0C3h ;repret - -ALIGN 16 -$L$key_expansion_192a: - movups XMMWORD[rax],xmm0 - lea rax,[16+rax] -$L$key_expansion_192a_cold: - movaps xmm5,xmm2 -$L$key_expansion_192b_warm: - shufps xmm4,xmm0,16 - movdqa xmm3,xmm2 - xorps xmm0,xmm4 - shufps xmm4,xmm0,140 - pslldq xmm3,4 - xorps xmm0,xmm4 - pshufd xmm1,xmm1,85 - pxor xmm2,xmm3 - pxor xmm0,xmm1 - pshufd xmm3,xmm0,255 - pxor xmm2,xmm3 - DB 0F3h,0C3h ;repret - -ALIGN 16 -$L$key_expansion_192b: - movaps xmm3,xmm0 - shufps xmm5,xmm0,68 - movups XMMWORD[rax],xmm5 - shufps xmm3,xmm2,78 - movups XMMWORD[16+rax],xmm3 - lea rax,[32+rax] - jmp NEAR $L$key_expansion_192b_warm - -ALIGN 16 -$L$key_expansion_256a: - movups XMMWORD[rax],xmm2 - lea rax,[16+rax] -$L$key_expansion_256a_cold: - shufps xmm4,xmm0,16 - xorps xmm0,xmm4 - shufps xmm4,xmm0,140 - xorps xmm0,xmm4 - shufps xmm1,xmm1,255 - xorps xmm0,xmm1 - DB 0F3h,0C3h ;repret - -ALIGN 16 -$L$key_expansion_256b: - movups XMMWORD[rax],xmm0 - lea rax,[16+rax] - - shufps xmm4,xmm2,16 - xorps xmm2,xmm4 - shufps xmm4,xmm2,140 - xorps xmm2,xmm4 - shufps xmm1,xmm1,170 - xorps xmm2,xmm1 - DB 0F3h,0C3h ;repret - - -ALIGN 64 -$L$bswap_mask: -DB 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -$L$increment32: - DD 6,6,6,0 -$L$increment64: - DD 1,0,0,0 -$L$xts_magic: - DD 0x87,0,1,0 -$L$increment1: -DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 -$L$key_rotate: - DD 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d -$L$key_rotate192: - DD 0x04070605,0x04070605,0x04070605,0x04070605 -$L$key_rcon1: - DD 1,1,1,1 -$L$key_rcon1b: - DD 0x1b,0x1b,0x1b,0x1b - -DB 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 -DB 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 -DB 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 -DB 115,108,46,111,114,103,62,0 -ALIGN 64 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -ecb_ccm64_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rsi,[rax] - lea rdi,[512+r8] - mov ecx,8 - DD 0xa548f3fc - lea rax,[88+rax] - - jmp NEAR $L$common_seh_tail - - - -ALIGN 16 -ctr_xts_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov rax,QWORD[208+r8] - - lea rsi,[((-168))+rax] - lea rdi,[512+r8] - mov ecx,20 - DD 0xa548f3fc - - mov rbp,QWORD[((-8))+rax] - mov QWORD[160+r8],rbp - jmp NEAR $L$common_seh_tail - - - -ALIGN 16 -cbc_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[152+r8] - mov rbx,QWORD[248+r8] - - lea r10,[$L$cbc_decrypt_bulk] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[120+r8] - - lea r10,[$L$cbc_decrypt_body] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - lea r10,[$L$cbc_ret] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rsi,[16+rax] - lea rdi,[512+r8] - mov ecx,20 - DD 0xa548f3fc - - mov rax,QWORD[208+r8] - - mov rbp,QWORD[((-8))+rax] - mov QWORD[160+r8],rbp - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_aes_hw_ecb_encrypt wrt ..imagebase - DD $L$SEH_end_aes_hw_ecb_encrypt wrt ..imagebase - DD $L$SEH_info_ecb wrt ..imagebase - - DD $L$SEH_begin_aes_hw_ctr32_encrypt_blocks wrt ..imagebase - DD $L$SEH_end_aes_hw_ctr32_encrypt_blocks wrt ..imagebase - DD $L$SEH_info_ctr32 wrt ..imagebase - DD $L$SEH_begin_aes_hw_cbc_encrypt wrt ..imagebase - DD $L$SEH_end_aes_hw_cbc_encrypt wrt ..imagebase - DD $L$SEH_info_cbc wrt ..imagebase - - DD aes_hw_set_decrypt_key wrt ..imagebase - DD $L$SEH_end_set_decrypt_key wrt ..imagebase - DD $L$SEH_info_key wrt ..imagebase - - DD aes_hw_set_encrypt_key wrt ..imagebase - DD $L$SEH_end_set_encrypt_key wrt ..imagebase - DD $L$SEH_info_key wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_ecb: -DB 9,0,0,0 - DD ecb_ccm64_se_handler wrt ..imagebase - DD $L$ecb_enc_body wrt ..imagebase,$L$ecb_enc_ret wrt ..imagebase -$L$SEH_info_ctr32: -DB 9,0,0,0 - DD ctr_xts_se_handler wrt ..imagebase - DD $L$ctr32_body wrt ..imagebase,$L$ctr32_epilogue wrt ..imagebase -$L$SEH_info_cbc: -DB 9,0,0,0 - DD cbc_se_handler wrt ..imagebase -$L$SEH_info_key: -DB 0x01,0x04,0x01,0x00 -DB 0x04,0x02,0x00,0x00 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm deleted file mode 100644 index 434ba10ed6..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm +++ /dev/null @@ -1,495 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - - - - - -global gcm_gmult_ssse3 -ALIGN 16 -gcm_gmult_ssse3: - -$L$gmult_seh_begin: - sub rsp,40 -$L$gmult_seh_allocstack: - movdqa XMMWORD[rsp],xmm6 -$L$gmult_seh_save_xmm6: - movdqa XMMWORD[16+rsp],xmm10 -$L$gmult_seh_save_xmm10: -$L$gmult_seh_prolog_end: - movdqu xmm0,XMMWORD[rcx] - movdqa xmm10,XMMWORD[$L$reverse_bytes] - movdqa xmm2,XMMWORD[$L$low4_mask] - - -DB 102,65,15,56,0,194 - - - movdqa xmm1,xmm2 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm2 - - - - - pxor xmm2,xmm2 - pxor xmm3,xmm3 - mov rax,5 -$L$oop_row_1: - movdqa xmm4,XMMWORD[rdx] - lea rdx,[16+rdx] - - - movdqa xmm6,xmm2 -DB 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - - - - - movdqa xmm5,xmm4 -DB 102,15,56,0,224 -DB 102,15,56,0,233 - - - pxor xmm2,xmm5 - - - - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - - - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - - sub rax,1 - jnz NEAR $L$oop_row_1 - - - - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov rax,5 -$L$oop_row_2: - movdqa xmm4,XMMWORD[rdx] - lea rdx,[16+rdx] - - - movdqa xmm6,xmm2 -DB 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - - - - - movdqa xmm5,xmm4 -DB 102,15,56,0,224 -DB 102,15,56,0,233 - - - pxor xmm2,xmm5 - - - - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - - - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - - sub rax,1 - jnz NEAR $L$oop_row_2 - - - - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov rax,6 -$L$oop_row_3: - movdqa xmm4,XMMWORD[rdx] - lea rdx,[16+rdx] - - - movdqa xmm6,xmm2 -DB 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - - - - - movdqa xmm5,xmm4 -DB 102,15,56,0,224 -DB 102,15,56,0,233 - - - pxor xmm2,xmm5 - - - - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - - - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - - sub rax,1 - jnz NEAR $L$oop_row_3 - - - - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - -DB 102,65,15,56,0,210 - movdqu XMMWORD[rcx],xmm2 - - - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - movdqa xmm6,XMMWORD[rsp] - movdqa xmm10,XMMWORD[16+rsp] - add rsp,40 - DB 0F3h,0C3h ;repret -$L$gmult_seh_end: - - - - - - - - -global gcm_ghash_ssse3 -ALIGN 16 -gcm_ghash_ssse3: -$L$ghash_seh_begin: - - sub rsp,56 -$L$ghash_seh_allocstack: - movdqa XMMWORD[rsp],xmm6 -$L$ghash_seh_save_xmm6: - movdqa XMMWORD[16+rsp],xmm10 -$L$ghash_seh_save_xmm10: - movdqa XMMWORD[32+rsp],xmm11 -$L$ghash_seh_save_xmm11: -$L$ghash_seh_prolog_end: - movdqu xmm0,XMMWORD[rcx] - movdqa xmm10,XMMWORD[$L$reverse_bytes] - movdqa xmm11,XMMWORD[$L$low4_mask] - - - and r9,-16 - - - -DB 102,65,15,56,0,194 - - - pxor xmm3,xmm3 -$L$oop_ghash: - - movdqu xmm1,XMMWORD[r8] -DB 102,65,15,56,0,202 - pxor xmm0,xmm1 - - - movdqa xmm1,xmm11 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm11 - - - - - pxor xmm2,xmm2 - - mov rax,5 -$L$oop_row_4: - movdqa xmm4,XMMWORD[rdx] - lea rdx,[16+rdx] - - - movdqa xmm6,xmm2 -DB 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - - - - - movdqa xmm5,xmm4 -DB 102,15,56,0,224 -DB 102,15,56,0,233 - - - pxor xmm2,xmm5 - - - - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - - - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - - sub rax,1 - jnz NEAR $L$oop_row_4 - - - - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov rax,5 -$L$oop_row_5: - movdqa xmm4,XMMWORD[rdx] - lea rdx,[16+rdx] - - - movdqa xmm6,xmm2 -DB 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - - - - - movdqa xmm5,xmm4 -DB 102,15,56,0,224 -DB 102,15,56,0,233 - - - pxor xmm2,xmm5 - - - - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - - - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - - sub rax,1 - jnz NEAR $L$oop_row_5 - - - - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - mov rax,6 -$L$oop_row_6: - movdqa xmm4,XMMWORD[rdx] - lea rdx,[16+rdx] - - - movdqa xmm6,xmm2 -DB 102,15,58,15,243,1 - movdqa xmm3,xmm6 - psrldq xmm2,1 - - - - - movdqa xmm5,xmm4 -DB 102,15,56,0,224 -DB 102,15,56,0,233 - - - pxor xmm2,xmm5 - - - - movdqa xmm5,xmm4 - psllq xmm5,60 - movdqa xmm6,xmm5 - pslldq xmm6,8 - pxor xmm3,xmm6 - - - psrldq xmm5,8 - pxor xmm2,xmm5 - psrlq xmm4,4 - pxor xmm2,xmm4 - - sub rax,1 - jnz NEAR $L$oop_row_6 - - - - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,1 - pxor xmm2,xmm3 - psrlq xmm3,5 - pxor xmm2,xmm3 - pxor xmm3,xmm3 - movdqa xmm0,xmm2 - - - lea rdx,[((-256))+rdx] - - - lea r8,[16+r8] - sub r9,16 - jnz NEAR $L$oop_ghash - - -DB 102,65,15,56,0,194 - movdqu XMMWORD[rcx],xmm0 - - - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - movdqa xmm6,XMMWORD[rsp] - movdqa xmm10,XMMWORD[16+rsp] - movdqa xmm11,XMMWORD[32+rsp] - add rsp,56 - DB 0F3h,0C3h ;repret -$L$ghash_seh_end: - - - -ALIGN 16 - - -$L$reverse_bytes: -DB 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 - -$L$low4_mask: - DQ 0x0f0f0f0f0f0f0f0f,0x0f0f0f0f0f0f0f0f -section .pdata rdata align=4 -ALIGN 4 - DD $L$gmult_seh_begin wrt ..imagebase - DD $L$gmult_seh_end wrt ..imagebase - DD $L$gmult_seh_info wrt ..imagebase - - DD $L$ghash_seh_begin wrt ..imagebase - DD $L$ghash_seh_end wrt ..imagebase - DD $L$ghash_seh_info wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$gmult_seh_info: -DB 1 -DB $L$gmult_seh_prolog_end-$L$gmult_seh_begin -DB 5 -DB 0 - -DB $L$gmult_seh_save_xmm10-$L$gmult_seh_begin -DB 168 - DW 1 - -DB $L$gmult_seh_save_xmm6-$L$gmult_seh_begin -DB 104 - DW 0 - -DB $L$gmult_seh_allocstack-$L$gmult_seh_begin -DB 66 - -ALIGN 8 -$L$ghash_seh_info: -DB 1 -DB $L$ghash_seh_prolog_end-$L$ghash_seh_begin -DB 7 -DB 0 - -DB $L$ghash_seh_save_xmm11-$L$ghash_seh_begin -DB 184 - DW 2 - -DB $L$ghash_seh_save_xmm10-$L$ghash_seh_begin -DB 168 - DW 1 - -DB $L$ghash_seh_save_xmm6-$L$ghash_seh_begin -DB 104 - DW 0 - -DB $L$ghash_seh_allocstack-$L$ghash_seh_begin -DB 98 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/ghash-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/ghash-x86_64.asm deleted file mode 100644 index fdf914f284..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/ghash-x86_64.asm +++ /dev/null @@ -1,2078 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - -EXTERN OPENSSL_ia32cap_P - -global gcm_gmult_4bit - -ALIGN 16 -gcm_gmult_4bit: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_gcm_gmult_4bit: - mov rdi,rcx - mov rsi,rdx - - - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,280 - -$L$gmult_prologue: - - movzx r8,BYTE[15+rdi] - lea r11,[$L$rem_4bit] - xor rax,rax - xor rbx,rbx - mov al,r8b - mov bl,r8b - shl al,4 - mov rcx,14 - mov r8,QWORD[8+rax*1+rsi] - mov r9,QWORD[rax*1+rsi] - and bl,0xf0 - mov rdx,r8 - jmp NEAR $L$oop1 - -ALIGN 16 -$L$oop1: - shr r8,4 - and rdx,0xf - mov r10,r9 - mov al,BYTE[rcx*1+rdi] - shr r9,4 - xor r8,QWORD[8+rbx*1+rsi] - shl r10,60 - xor r9,QWORD[rbx*1+rsi] - mov bl,al - xor r9,QWORD[rdx*8+r11] - mov rdx,r8 - shl al,4 - xor r8,r10 - dec rcx - js NEAR $L$break1 - - shr r8,4 - and rdx,0xf - mov r10,r9 - shr r9,4 - xor r8,QWORD[8+rax*1+rsi] - shl r10,60 - xor r9,QWORD[rax*1+rsi] - and bl,0xf0 - xor r9,QWORD[rdx*8+r11] - mov rdx,r8 - xor r8,r10 - jmp NEAR $L$oop1 - -ALIGN 16 -$L$break1: - shr r8,4 - and rdx,0xf - mov r10,r9 - shr r9,4 - xor r8,QWORD[8+rax*1+rsi] - shl r10,60 - xor r9,QWORD[rax*1+rsi] - and bl,0xf0 - xor r9,QWORD[rdx*8+r11] - mov rdx,r8 - xor r8,r10 - - shr r8,4 - and rdx,0xf - mov r10,r9 - shr r9,4 - xor r8,QWORD[8+rbx*1+rsi] - shl r10,60 - xor r9,QWORD[rbx*1+rsi] - xor r8,r10 - xor r9,QWORD[rdx*8+r11] - - bswap r8 - bswap r9 - mov QWORD[8+rdi],r8 - mov QWORD[rdi],r9 - - lea rsi,[((280+48))+rsp] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$gmult_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_gcm_gmult_4bit: -global gcm_ghash_4bit - -ALIGN 16 -gcm_ghash_4bit: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_gcm_ghash_4bit: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - - - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,280 - -$L$ghash_prologue: - mov r14,rdx - mov r15,rcx - sub rsi,-128 - lea rbp,[((16+128))+rsp] - xor edx,edx - mov r8,QWORD[((0+0-128))+rsi] - mov rax,QWORD[((0+8-128))+rsi] - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov r9,QWORD[((16+0-128))+rsi] - shl dl,4 - mov rbx,QWORD[((16+8-128))+rsi] - shl r10,60 - mov BYTE[rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[rbp],r8 - mov r8,QWORD[((32+0-128))+rsi] - shl dl,4 - mov QWORD[((0-128))+rbp],rax - mov rax,QWORD[((32+8-128))+rsi] - shl r10,60 - mov BYTE[1+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[8+rbp],r9 - mov r9,QWORD[((48+0-128))+rsi] - shl dl,4 - mov QWORD[((8-128))+rbp],rbx - mov rbx,QWORD[((48+8-128))+rsi] - shl r10,60 - mov BYTE[2+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[16+rbp],r8 - mov r8,QWORD[((64+0-128))+rsi] - shl dl,4 - mov QWORD[((16-128))+rbp],rax - mov rax,QWORD[((64+8-128))+rsi] - shl r10,60 - mov BYTE[3+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[24+rbp],r9 - mov r9,QWORD[((80+0-128))+rsi] - shl dl,4 - mov QWORD[((24-128))+rbp],rbx - mov rbx,QWORD[((80+8-128))+rsi] - shl r10,60 - mov BYTE[4+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[32+rbp],r8 - mov r8,QWORD[((96+0-128))+rsi] - shl dl,4 - mov QWORD[((32-128))+rbp],rax - mov rax,QWORD[((96+8-128))+rsi] - shl r10,60 - mov BYTE[5+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[40+rbp],r9 - mov r9,QWORD[((112+0-128))+rsi] - shl dl,4 - mov QWORD[((40-128))+rbp],rbx - mov rbx,QWORD[((112+8-128))+rsi] - shl r10,60 - mov BYTE[6+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[48+rbp],r8 - mov r8,QWORD[((128+0-128))+rsi] - shl dl,4 - mov QWORD[((48-128))+rbp],rax - mov rax,QWORD[((128+8-128))+rsi] - shl r10,60 - mov BYTE[7+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[56+rbp],r9 - mov r9,QWORD[((144+0-128))+rsi] - shl dl,4 - mov QWORD[((56-128))+rbp],rbx - mov rbx,QWORD[((144+8-128))+rsi] - shl r10,60 - mov BYTE[8+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[64+rbp],r8 - mov r8,QWORD[((160+0-128))+rsi] - shl dl,4 - mov QWORD[((64-128))+rbp],rax - mov rax,QWORD[((160+8-128))+rsi] - shl r10,60 - mov BYTE[9+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[72+rbp],r9 - mov r9,QWORD[((176+0-128))+rsi] - shl dl,4 - mov QWORD[((72-128))+rbp],rbx - mov rbx,QWORD[((176+8-128))+rsi] - shl r10,60 - mov BYTE[10+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[80+rbp],r8 - mov r8,QWORD[((192+0-128))+rsi] - shl dl,4 - mov QWORD[((80-128))+rbp],rax - mov rax,QWORD[((192+8-128))+rsi] - shl r10,60 - mov BYTE[11+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[88+rbp],r9 - mov r9,QWORD[((208+0-128))+rsi] - shl dl,4 - mov QWORD[((88-128))+rbp],rbx - mov rbx,QWORD[((208+8-128))+rsi] - shl r10,60 - mov BYTE[12+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[96+rbp],r8 - mov r8,QWORD[((224+0-128))+rsi] - shl dl,4 - mov QWORD[((96-128))+rbp],rax - mov rax,QWORD[((224+8-128))+rsi] - shl r10,60 - mov BYTE[13+rsp],dl - or rbx,r10 - mov dl,al - shr rax,4 - mov r10,r8 - shr r8,4 - mov QWORD[104+rbp],r9 - mov r9,QWORD[((240+0-128))+rsi] - shl dl,4 - mov QWORD[((104-128))+rbp],rbx - mov rbx,QWORD[((240+8-128))+rsi] - shl r10,60 - mov BYTE[14+rsp],dl - or rax,r10 - mov dl,bl - shr rbx,4 - mov r10,r9 - shr r9,4 - mov QWORD[112+rbp],r8 - shl dl,4 - mov QWORD[((112-128))+rbp],rax - shl r10,60 - mov BYTE[15+rsp],dl - or rbx,r10 - mov QWORD[120+rbp],r9 - mov QWORD[((120-128))+rbp],rbx - add rsi,-128 - mov r8,QWORD[8+rdi] - mov r9,QWORD[rdi] - add r15,r14 - lea r11,[$L$rem_8bit] - jmp NEAR $L$outer_loop -ALIGN 16 -$L$outer_loop: - xor r9,QWORD[r14] - mov rdx,QWORD[8+r14] - lea r14,[16+r14] - xor rdx,r8 - mov QWORD[rdi],r9 - mov QWORD[8+rdi],rdx - shr rdx,32 - xor rax,rax - rol edx,8 - mov al,dl - movzx ebx,dl - shl al,4 - shr ebx,4 - rol edx,8 - mov r8,QWORD[8+rax*1+rsi] - mov r9,QWORD[rax*1+rsi] - mov al,dl - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - xor r12,r8 - mov r10,r9 - shr r8,8 - movzx r12,r12b - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - mov edx,DWORD[8+rdi] - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - mov edx,DWORD[4+rdi] - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - mov edx,DWORD[rdi] - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - shr ecx,4 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r12,WORD[r12*2+r11] - movzx ebx,dl - shl al,4 - movzx r13,BYTE[rcx*1+rsp] - shr ebx,4 - shl r12,48 - xor r13,r8 - mov r10,r9 - xor r9,r12 - shr r8,8 - movzx r13,r13b - shr r9,8 - xor r8,QWORD[((-128))+rcx*8+rbp] - shl r10,56 - xor r9,QWORD[rcx*8+rbp] - rol edx,8 - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - mov al,dl - xor r8,r10 - movzx r13,WORD[r13*2+r11] - movzx ecx,dl - shl al,4 - movzx r12,BYTE[rbx*1+rsp] - and ecx,240 - shl r13,48 - xor r12,r8 - mov r10,r9 - xor r9,r13 - shr r8,8 - movzx r12,r12b - mov edx,DWORD[((-4))+rdi] - shr r9,8 - xor r8,QWORD[((-128))+rbx*8+rbp] - shl r10,56 - xor r9,QWORD[rbx*8+rbp] - movzx r12,WORD[r12*2+r11] - xor r8,QWORD[8+rax*1+rsi] - xor r9,QWORD[rax*1+rsi] - shl r12,48 - xor r8,r10 - xor r9,r12 - movzx r13,r8b - shr r8,4 - mov r10,r9 - shl r13b,4 - shr r9,4 - xor r8,QWORD[8+rcx*1+rsi] - movzx r13,WORD[r13*2+r11] - shl r10,60 - xor r9,QWORD[rcx*1+rsi] - xor r8,r10 - shl r13,48 - bswap r8 - xor r9,r13 - bswap r9 - cmp r14,r15 - jb NEAR $L$outer_loop - mov QWORD[8+rdi],r8 - mov QWORD[rdi],r9 - - lea rsi,[((280+48))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$ghash_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_gcm_ghash_4bit: -global gcm_init_clmul - -ALIGN 16 -gcm_init_clmul: - -$L$_init_clmul: -$L$SEH_begin_gcm_init_clmul: - -DB 0x48,0x83,0xec,0x18 -DB 0x0f,0x29,0x34,0x24 - movdqu xmm2,XMMWORD[rdx] - pshufd xmm2,xmm2,78 - - - pshufd xmm4,xmm2,255 - movdqa xmm3,xmm2 - psllq xmm2,1 - pxor xmm5,xmm5 - psrlq xmm3,63 - pcmpgtd xmm5,xmm4 - pslldq xmm3,8 - por xmm2,xmm3 - - - pand xmm5,XMMWORD[$L$0x1c2_polynomial] - pxor xmm2,xmm5 - - - pshufd xmm6,xmm2,78 - movdqa xmm0,xmm2 - pxor xmm6,xmm2 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pxor xmm3,xmm0 -DB 102,15,58,68,194,0 -DB 102,15,58,68,202,17 -DB 102,15,58,68,222,0 - pxor xmm3,xmm0 - pxor xmm3,xmm1 - - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - pshufd xmm3,xmm2,78 - pshufd xmm4,xmm0,78 - pxor xmm3,xmm2 - movdqu XMMWORD[rcx],xmm2 - pxor xmm4,xmm0 - movdqu XMMWORD[16+rcx],xmm0 -DB 102,15,58,15,227,8 - movdqu XMMWORD[32+rcx],xmm4 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pxor xmm3,xmm0 -DB 102,15,58,68,194,0 -DB 102,15,58,68,202,17 -DB 102,15,58,68,222,0 - pxor xmm3,xmm0 - pxor xmm3,xmm1 - - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - movdqa xmm5,xmm0 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pxor xmm3,xmm0 -DB 102,15,58,68,194,0 -DB 102,15,58,68,202,17 -DB 102,15,58,68,222,0 - pxor xmm3,xmm0 - pxor xmm3,xmm1 - - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - pshufd xmm3,xmm5,78 - pshufd xmm4,xmm0,78 - pxor xmm3,xmm5 - movdqu XMMWORD[48+rcx],xmm5 - pxor xmm4,xmm0 - movdqu XMMWORD[64+rcx],xmm0 -DB 102,15,58,15,227,8 - movdqu XMMWORD[80+rcx],xmm4 - movaps xmm6,XMMWORD[rsp] - lea rsp,[24+rsp] -$L$SEH_end_gcm_init_clmul: - DB 0F3h,0C3h ;repret - - -global gcm_gmult_clmul - -ALIGN 16 -gcm_gmult_clmul: - -$L$_gmult_clmul: - movdqu xmm0,XMMWORD[rcx] - movdqa xmm5,XMMWORD[$L$bswap_mask] - movdqu xmm2,XMMWORD[rdx] - movdqu xmm4,XMMWORD[32+rdx] -DB 102,15,56,0,197 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pxor xmm3,xmm0 -DB 102,15,58,68,194,0 -DB 102,15,58,68,202,17 -DB 102,15,58,68,220,0 - pxor xmm3,xmm0 - pxor xmm3,xmm1 - - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 -DB 102,15,56,0,197 - movdqu XMMWORD[rcx],xmm0 - DB 0F3h,0C3h ;repret - - -global gcm_ghash_clmul - -ALIGN 32 -gcm_ghash_clmul: - -$L$_ghash_clmul: - lea rax,[((-136))+rsp] -$L$SEH_begin_gcm_ghash_clmul: - -DB 0x48,0x8d,0x60,0xe0 -DB 0x0f,0x29,0x70,0xe0 -DB 0x0f,0x29,0x78,0xf0 -DB 0x44,0x0f,0x29,0x00 -DB 0x44,0x0f,0x29,0x48,0x10 -DB 0x44,0x0f,0x29,0x50,0x20 -DB 0x44,0x0f,0x29,0x58,0x30 -DB 0x44,0x0f,0x29,0x60,0x40 -DB 0x44,0x0f,0x29,0x68,0x50 -DB 0x44,0x0f,0x29,0x70,0x60 -DB 0x44,0x0f,0x29,0x78,0x70 - movdqa xmm10,XMMWORD[$L$bswap_mask] - - movdqu xmm0,XMMWORD[rcx] - movdqu xmm2,XMMWORD[rdx] - movdqu xmm7,XMMWORD[32+rdx] -DB 102,65,15,56,0,194 - - sub r9,0x10 - jz NEAR $L$odd_tail - - movdqu xmm6,XMMWORD[16+rdx] - lea rax,[OPENSSL_ia32cap_P] - mov eax,DWORD[4+rax] - cmp r9,0x30 - jb NEAR $L$skip4x - - and eax,71303168 - cmp eax,4194304 - je NEAR $L$skip4x - - sub r9,0x30 - mov rax,0xA040608020C0E000 - movdqu xmm14,XMMWORD[48+rdx] - movdqu xmm15,XMMWORD[64+rdx] - - - - - movdqu xmm3,XMMWORD[48+r8] - movdqu xmm11,XMMWORD[32+r8] -DB 102,65,15,56,0,218 -DB 102,69,15,56,0,218 - movdqa xmm5,xmm3 - pshufd xmm4,xmm3,78 - pxor xmm4,xmm3 -DB 102,15,58,68,218,0 -DB 102,15,58,68,234,17 -DB 102,15,58,68,231,0 - - movdqa xmm13,xmm11 - pshufd xmm12,xmm11,78 - pxor xmm12,xmm11 -DB 102,68,15,58,68,222,0 -DB 102,68,15,58,68,238,17 -DB 102,68,15,58,68,231,16 - xorps xmm3,xmm11 - xorps xmm5,xmm13 - movups xmm7,XMMWORD[80+rdx] - xorps xmm4,xmm12 - - movdqu xmm11,XMMWORD[16+r8] - movdqu xmm8,XMMWORD[r8] -DB 102,69,15,56,0,218 -DB 102,69,15,56,0,194 - movdqa xmm13,xmm11 - pshufd xmm12,xmm11,78 - pxor xmm0,xmm8 - pxor xmm12,xmm11 -DB 102,69,15,58,68,222,0 - movdqa xmm1,xmm0 - pshufd xmm8,xmm0,78 - pxor xmm8,xmm0 -DB 102,69,15,58,68,238,17 -DB 102,68,15,58,68,231,0 - xorps xmm3,xmm11 - xorps xmm5,xmm13 - - lea r8,[64+r8] - sub r9,0x40 - jc NEAR $L$tail4x - - jmp NEAR $L$mod4_loop -ALIGN 32 -$L$mod4_loop: -DB 102,65,15,58,68,199,0 - xorps xmm4,xmm12 - movdqu xmm11,XMMWORD[48+r8] -DB 102,69,15,56,0,218 -DB 102,65,15,58,68,207,17 - xorps xmm0,xmm3 - movdqu xmm3,XMMWORD[32+r8] - movdqa xmm13,xmm11 -DB 102,68,15,58,68,199,16 - pshufd xmm12,xmm11,78 - xorps xmm1,xmm5 - pxor xmm12,xmm11 -DB 102,65,15,56,0,218 - movups xmm7,XMMWORD[32+rdx] - xorps xmm8,xmm4 -DB 102,68,15,58,68,218,0 - pshufd xmm4,xmm3,78 - - pxor xmm8,xmm0 - movdqa xmm5,xmm3 - pxor xmm8,xmm1 - pxor xmm4,xmm3 - movdqa xmm9,xmm8 -DB 102,68,15,58,68,234,17 - pslldq xmm8,8 - psrldq xmm9,8 - pxor xmm0,xmm8 - movdqa xmm8,XMMWORD[$L$7_mask] - pxor xmm1,xmm9 -DB 102,76,15,110,200 - - pand xmm8,xmm0 -DB 102,69,15,56,0,200 - pxor xmm9,xmm0 -DB 102,68,15,58,68,231,0 - psllq xmm9,57 - movdqa xmm8,xmm9 - pslldq xmm9,8 -DB 102,15,58,68,222,0 - psrldq xmm8,8 - pxor xmm0,xmm9 - pxor xmm1,xmm8 - movdqu xmm8,XMMWORD[r8] - - movdqa xmm9,xmm0 - psrlq xmm0,1 -DB 102,15,58,68,238,17 - xorps xmm3,xmm11 - movdqu xmm11,XMMWORD[16+r8] -DB 102,69,15,56,0,218 -DB 102,15,58,68,231,16 - xorps xmm5,xmm13 - movups xmm7,XMMWORD[80+rdx] -DB 102,69,15,56,0,194 - pxor xmm1,xmm9 - pxor xmm9,xmm0 - psrlq xmm0,5 - - movdqa xmm13,xmm11 - pxor xmm4,xmm12 - pshufd xmm12,xmm11,78 - pxor xmm0,xmm9 - pxor xmm1,xmm8 - pxor xmm12,xmm11 -DB 102,69,15,58,68,222,0 - psrlq xmm0,1 - pxor xmm0,xmm1 - movdqa xmm1,xmm0 -DB 102,69,15,58,68,238,17 - xorps xmm3,xmm11 - pshufd xmm8,xmm0,78 - pxor xmm8,xmm0 - -DB 102,68,15,58,68,231,0 - xorps xmm5,xmm13 - - lea r8,[64+r8] - sub r9,0x40 - jnc NEAR $L$mod4_loop - -$L$tail4x: -DB 102,65,15,58,68,199,0 -DB 102,65,15,58,68,207,17 -DB 102,68,15,58,68,199,16 - xorps xmm4,xmm12 - xorps xmm0,xmm3 - xorps xmm1,xmm5 - pxor xmm1,xmm0 - pxor xmm8,xmm4 - - pxor xmm8,xmm1 - pxor xmm1,xmm0 - - movdqa xmm9,xmm8 - psrldq xmm8,8 - pslldq xmm9,8 - pxor xmm1,xmm8 - pxor xmm0,xmm9 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - add r9,0x40 - jz NEAR $L$done - movdqu xmm7,XMMWORD[32+rdx] - sub r9,0x10 - jz NEAR $L$odd_tail -$L$skip4x: - - - - - - movdqu xmm8,XMMWORD[r8] - movdqu xmm3,XMMWORD[16+r8] -DB 102,69,15,56,0,194 -DB 102,65,15,56,0,218 - pxor xmm0,xmm8 - - movdqa xmm5,xmm3 - pshufd xmm4,xmm3,78 - pxor xmm4,xmm3 -DB 102,15,58,68,218,0 -DB 102,15,58,68,234,17 -DB 102,15,58,68,231,0 - - lea r8,[32+r8] - nop - sub r9,0x20 - jbe NEAR $L$even_tail - nop - jmp NEAR $L$mod_loop - -ALIGN 32 -$L$mod_loop: - movdqa xmm1,xmm0 - movdqa xmm8,xmm4 - pshufd xmm4,xmm0,78 - pxor xmm4,xmm0 - -DB 102,15,58,68,198,0 -DB 102,15,58,68,206,17 -DB 102,15,58,68,231,16 - - pxor xmm0,xmm3 - pxor xmm1,xmm5 - movdqu xmm9,XMMWORD[r8] - pxor xmm8,xmm0 -DB 102,69,15,56,0,202 - movdqu xmm3,XMMWORD[16+r8] - - pxor xmm8,xmm1 - pxor xmm1,xmm9 - pxor xmm4,xmm8 -DB 102,65,15,56,0,218 - movdqa xmm8,xmm4 - psrldq xmm8,8 - pslldq xmm4,8 - pxor xmm1,xmm8 - pxor xmm0,xmm4 - - movdqa xmm5,xmm3 - - movdqa xmm9,xmm0 - movdqa xmm8,xmm0 - psllq xmm0,5 - pxor xmm8,xmm0 -DB 102,15,58,68,218,0 - psllq xmm0,1 - pxor xmm0,xmm8 - psllq xmm0,57 - movdqa xmm8,xmm0 - pslldq xmm0,8 - psrldq xmm8,8 - pxor xmm0,xmm9 - pshufd xmm4,xmm5,78 - pxor xmm1,xmm8 - pxor xmm4,xmm5 - - movdqa xmm9,xmm0 - psrlq xmm0,1 -DB 102,15,58,68,234,17 - pxor xmm1,xmm9 - pxor xmm9,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm9 - lea r8,[32+r8] - psrlq xmm0,1 -DB 102,15,58,68,231,0 - pxor xmm0,xmm1 - - sub r9,0x20 - ja NEAR $L$mod_loop - -$L$even_tail: - movdqa xmm1,xmm0 - movdqa xmm8,xmm4 - pshufd xmm4,xmm0,78 - pxor xmm4,xmm0 - -DB 102,15,58,68,198,0 -DB 102,15,58,68,206,17 -DB 102,15,58,68,231,16 - - pxor xmm0,xmm3 - pxor xmm1,xmm5 - pxor xmm8,xmm0 - pxor xmm8,xmm1 - pxor xmm4,xmm8 - movdqa xmm8,xmm4 - psrldq xmm8,8 - pslldq xmm4,8 - pxor xmm1,xmm8 - pxor xmm0,xmm4 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 - test r9,r9 - jnz NEAR $L$done - -$L$odd_tail: - movdqu xmm8,XMMWORD[r8] -DB 102,69,15,56,0,194 - pxor xmm0,xmm8 - movdqa xmm1,xmm0 - pshufd xmm3,xmm0,78 - pxor xmm3,xmm0 -DB 102,15,58,68,194,0 -DB 102,15,58,68,202,17 -DB 102,15,58,68,223,0 - pxor xmm3,xmm0 - pxor xmm3,xmm1 - - movdqa xmm4,xmm3 - psrldq xmm3,8 - pslldq xmm4,8 - pxor xmm1,xmm3 - pxor xmm0,xmm4 - - movdqa xmm4,xmm0 - movdqa xmm3,xmm0 - psllq xmm0,5 - pxor xmm3,xmm0 - psllq xmm0,1 - pxor xmm0,xmm3 - psllq xmm0,57 - movdqa xmm3,xmm0 - pslldq xmm0,8 - psrldq xmm3,8 - pxor xmm0,xmm4 - pxor xmm1,xmm3 - - - movdqa xmm4,xmm0 - psrlq xmm0,1 - pxor xmm1,xmm4 - pxor xmm4,xmm0 - psrlq xmm0,5 - pxor xmm0,xmm4 - psrlq xmm0,1 - pxor xmm0,xmm1 -$L$done: -DB 102,65,15,56,0,194 - movdqu XMMWORD[rcx],xmm0 - movaps xmm6,XMMWORD[rsp] - movaps xmm7,XMMWORD[16+rsp] - movaps xmm8,XMMWORD[32+rsp] - movaps xmm9,XMMWORD[48+rsp] - movaps xmm10,XMMWORD[64+rsp] - movaps xmm11,XMMWORD[80+rsp] - movaps xmm12,XMMWORD[96+rsp] - movaps xmm13,XMMWORD[112+rsp] - movaps xmm14,XMMWORD[128+rsp] - movaps xmm15,XMMWORD[144+rsp] - lea rsp,[168+rsp] -$L$SEH_end_gcm_ghash_clmul: - DB 0F3h,0C3h ;repret - - -global gcm_init_avx - -ALIGN 32 -gcm_init_avx: - -$L$SEH_begin_gcm_init_avx: - -DB 0x48,0x83,0xec,0x18 -DB 0x0f,0x29,0x34,0x24 - vzeroupper - - vmovdqu xmm2,XMMWORD[rdx] - vpshufd xmm2,xmm2,78 - - - vpshufd xmm4,xmm2,255 - vpsrlq xmm3,xmm2,63 - vpsllq xmm2,xmm2,1 - vpxor xmm5,xmm5,xmm5 - vpcmpgtd xmm5,xmm5,xmm4 - vpslldq xmm3,xmm3,8 - vpor xmm2,xmm2,xmm3 - - - vpand xmm5,xmm5,XMMWORD[$L$0x1c2_polynomial] - vpxor xmm2,xmm2,xmm5 - - vpunpckhqdq xmm6,xmm2,xmm2 - vmovdqa xmm0,xmm2 - vpxor xmm6,xmm6,xmm2 - mov r10,4 - jmp NEAR $L$init_start_avx -ALIGN 32 -$L$init_loop_avx: - vpalignr xmm5,xmm4,xmm3,8 - vmovdqu XMMWORD[(-16)+rcx],xmm5 - vpunpckhqdq xmm3,xmm0,xmm0 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm1,xmm0,xmm2,0x11 - vpclmulqdq xmm0,xmm0,xmm2,0x00 - vpclmulqdq xmm3,xmm3,xmm6,0x00 - vpxor xmm4,xmm1,xmm0 - vpxor xmm3,xmm3,xmm4 - - vpslldq xmm4,xmm3,8 - vpsrldq xmm3,xmm3,8 - vpxor xmm0,xmm0,xmm4 - vpxor xmm1,xmm1,xmm3 - vpsllq xmm3,xmm0,57 - vpsllq xmm4,xmm0,62 - vpxor xmm4,xmm4,xmm3 - vpsllq xmm3,xmm0,63 - vpxor xmm4,xmm4,xmm3 - vpslldq xmm3,xmm4,8 - vpsrldq xmm4,xmm4,8 - vpxor xmm0,xmm0,xmm3 - vpxor xmm1,xmm1,xmm4 - - vpsrlq xmm4,xmm0,1 - vpxor xmm1,xmm1,xmm0 - vpxor xmm0,xmm0,xmm4 - vpsrlq xmm4,xmm4,5 - vpxor xmm0,xmm0,xmm4 - vpsrlq xmm0,xmm0,1 - vpxor xmm0,xmm0,xmm1 -$L$init_start_avx: - vmovdqa xmm5,xmm0 - vpunpckhqdq xmm3,xmm0,xmm0 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm1,xmm0,xmm2,0x11 - vpclmulqdq xmm0,xmm0,xmm2,0x00 - vpclmulqdq xmm3,xmm3,xmm6,0x00 - vpxor xmm4,xmm1,xmm0 - vpxor xmm3,xmm3,xmm4 - - vpslldq xmm4,xmm3,8 - vpsrldq xmm3,xmm3,8 - vpxor xmm0,xmm0,xmm4 - vpxor xmm1,xmm1,xmm3 - vpsllq xmm3,xmm0,57 - vpsllq xmm4,xmm0,62 - vpxor xmm4,xmm4,xmm3 - vpsllq xmm3,xmm0,63 - vpxor xmm4,xmm4,xmm3 - vpslldq xmm3,xmm4,8 - vpsrldq xmm4,xmm4,8 - vpxor xmm0,xmm0,xmm3 - vpxor xmm1,xmm1,xmm4 - - vpsrlq xmm4,xmm0,1 - vpxor xmm1,xmm1,xmm0 - vpxor xmm0,xmm0,xmm4 - vpsrlq xmm4,xmm4,5 - vpxor xmm0,xmm0,xmm4 - vpsrlq xmm0,xmm0,1 - vpxor xmm0,xmm0,xmm1 - vpshufd xmm3,xmm5,78 - vpshufd xmm4,xmm0,78 - vpxor xmm3,xmm3,xmm5 - vmovdqu XMMWORD[rcx],xmm5 - vpxor xmm4,xmm4,xmm0 - vmovdqu XMMWORD[16+rcx],xmm0 - lea rcx,[48+rcx] - sub r10,1 - jnz NEAR $L$init_loop_avx - - vpalignr xmm5,xmm3,xmm4,8 - vmovdqu XMMWORD[(-16)+rcx],xmm5 - - vzeroupper - movaps xmm6,XMMWORD[rsp] - lea rsp,[24+rsp] -$L$SEH_end_gcm_init_avx: - DB 0F3h,0C3h ;repret - - -global gcm_gmult_avx - -ALIGN 32 -gcm_gmult_avx: - - jmp NEAR $L$_gmult_clmul - - -global gcm_ghash_avx - -ALIGN 32 -gcm_ghash_avx: - - lea rax,[((-136))+rsp] -$L$SEH_begin_gcm_ghash_avx: - -DB 0x48,0x8d,0x60,0xe0 -DB 0x0f,0x29,0x70,0xe0 -DB 0x0f,0x29,0x78,0xf0 -DB 0x44,0x0f,0x29,0x00 -DB 0x44,0x0f,0x29,0x48,0x10 -DB 0x44,0x0f,0x29,0x50,0x20 -DB 0x44,0x0f,0x29,0x58,0x30 -DB 0x44,0x0f,0x29,0x60,0x40 -DB 0x44,0x0f,0x29,0x68,0x50 -DB 0x44,0x0f,0x29,0x70,0x60 -DB 0x44,0x0f,0x29,0x78,0x70 - vzeroupper - - vmovdqu xmm10,XMMWORD[rcx] - lea r10,[$L$0x1c2_polynomial] - lea rdx,[64+rdx] - vmovdqu xmm13,XMMWORD[$L$bswap_mask] - vpshufb xmm10,xmm10,xmm13 - cmp r9,0x80 - jb NEAR $L$short_avx - sub r9,0x80 - - vmovdqu xmm14,XMMWORD[112+r8] - vmovdqu xmm6,XMMWORD[((0-64))+rdx] - vpshufb xmm14,xmm14,xmm13 - vmovdqu xmm7,XMMWORD[((32-64))+rdx] - - vpunpckhqdq xmm9,xmm14,xmm14 - vmovdqu xmm15,XMMWORD[96+r8] - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpxor xmm9,xmm9,xmm14 - vpshufb xmm15,xmm15,xmm13 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((16-64))+rdx] - vpunpckhqdq xmm8,xmm15,xmm15 - vmovdqu xmm14,XMMWORD[80+r8] - vpclmulqdq xmm2,xmm9,xmm7,0x00 - vpxor xmm8,xmm8,xmm15 - - vpshufb xmm14,xmm14,xmm13 - vpclmulqdq xmm3,xmm15,xmm6,0x00 - vpunpckhqdq xmm9,xmm14,xmm14 - vpclmulqdq xmm4,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((48-64))+rdx] - vpxor xmm9,xmm9,xmm14 - vmovdqu xmm15,XMMWORD[64+r8] - vpclmulqdq xmm5,xmm8,xmm7,0x10 - vmovdqu xmm7,XMMWORD[((80-64))+rdx] - - vpshufb xmm15,xmm15,xmm13 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpxor xmm4,xmm4,xmm1 - vpunpckhqdq xmm8,xmm15,xmm15 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((64-64))+rdx] - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm9,xmm7,0x00 - vpxor xmm8,xmm8,xmm15 - - vmovdqu xmm14,XMMWORD[48+r8] - vpxor xmm0,xmm0,xmm3 - vpclmulqdq xmm3,xmm15,xmm6,0x00 - vpxor xmm1,xmm1,xmm4 - vpshufb xmm14,xmm14,xmm13 - vpclmulqdq xmm4,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((96-64))+rdx] - vpxor xmm2,xmm2,xmm5 - vpunpckhqdq xmm9,xmm14,xmm14 - vpclmulqdq xmm5,xmm8,xmm7,0x10 - vmovdqu xmm7,XMMWORD[((128-64))+rdx] - vpxor xmm9,xmm9,xmm14 - - vmovdqu xmm15,XMMWORD[32+r8] - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpxor xmm4,xmm4,xmm1 - vpshufb xmm15,xmm15,xmm13 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((112-64))+rdx] - vpxor xmm5,xmm5,xmm2 - vpunpckhqdq xmm8,xmm15,xmm15 - vpclmulqdq xmm2,xmm9,xmm7,0x00 - vpxor xmm8,xmm8,xmm15 - - vmovdqu xmm14,XMMWORD[16+r8] - vpxor xmm0,xmm0,xmm3 - vpclmulqdq xmm3,xmm15,xmm6,0x00 - vpxor xmm1,xmm1,xmm4 - vpshufb xmm14,xmm14,xmm13 - vpclmulqdq xmm4,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((144-64))+rdx] - vpxor xmm2,xmm2,xmm5 - vpunpckhqdq xmm9,xmm14,xmm14 - vpclmulqdq xmm5,xmm8,xmm7,0x10 - vmovdqu xmm7,XMMWORD[((176-64))+rdx] - vpxor xmm9,xmm9,xmm14 - - vmovdqu xmm15,XMMWORD[r8] - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpxor xmm4,xmm4,xmm1 - vpshufb xmm15,xmm15,xmm13 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((160-64))+rdx] - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm9,xmm7,0x10 - - lea r8,[128+r8] - cmp r9,0x80 - jb NEAR $L$tail_avx - - vpxor xmm15,xmm15,xmm10 - sub r9,0x80 - jmp NEAR $L$oop8x_avx - -ALIGN 32 -$L$oop8x_avx: - vpunpckhqdq xmm8,xmm15,xmm15 - vmovdqu xmm14,XMMWORD[112+r8] - vpxor xmm3,xmm3,xmm0 - vpxor xmm8,xmm8,xmm15 - vpclmulqdq xmm10,xmm15,xmm6,0x00 - vpshufb xmm14,xmm14,xmm13 - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm11,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((0-64))+rdx] - vpunpckhqdq xmm9,xmm14,xmm14 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm12,xmm8,xmm7,0x00 - vmovdqu xmm7,XMMWORD[((32-64))+rdx] - vpxor xmm9,xmm9,xmm14 - - vmovdqu xmm15,XMMWORD[96+r8] - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpxor xmm10,xmm10,xmm3 - vpshufb xmm15,xmm15,xmm13 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vxorps xmm11,xmm11,xmm4 - vmovdqu xmm6,XMMWORD[((16-64))+rdx] - vpunpckhqdq xmm8,xmm15,xmm15 - vpclmulqdq xmm2,xmm9,xmm7,0x00 - vpxor xmm12,xmm12,xmm5 - vxorps xmm8,xmm8,xmm15 - - vmovdqu xmm14,XMMWORD[80+r8] - vpxor xmm12,xmm12,xmm10 - vpclmulqdq xmm3,xmm15,xmm6,0x00 - vpxor xmm12,xmm12,xmm11 - vpslldq xmm9,xmm12,8 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm4,xmm15,xmm6,0x11 - vpsrldq xmm12,xmm12,8 - vpxor xmm10,xmm10,xmm9 - vmovdqu xmm6,XMMWORD[((48-64))+rdx] - vpshufb xmm14,xmm14,xmm13 - vxorps xmm11,xmm11,xmm12 - vpxor xmm4,xmm4,xmm1 - vpunpckhqdq xmm9,xmm14,xmm14 - vpclmulqdq xmm5,xmm8,xmm7,0x10 - vmovdqu xmm7,XMMWORD[((80-64))+rdx] - vpxor xmm9,xmm9,xmm14 - vpxor xmm5,xmm5,xmm2 - - vmovdqu xmm15,XMMWORD[64+r8] - vpalignr xmm12,xmm10,xmm10,8 - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpshufb xmm15,xmm15,xmm13 - vpxor xmm0,xmm0,xmm3 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((64-64))+rdx] - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm2,xmm9,xmm7,0x00 - vxorps xmm8,xmm8,xmm15 - vpxor xmm2,xmm2,xmm5 - - vmovdqu xmm14,XMMWORD[48+r8] - vpclmulqdq xmm10,xmm10,XMMWORD[r10],0x10 - vpclmulqdq xmm3,xmm15,xmm6,0x00 - vpshufb xmm14,xmm14,xmm13 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm4,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((96-64))+rdx] - vpunpckhqdq xmm9,xmm14,xmm14 - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm5,xmm8,xmm7,0x10 - vmovdqu xmm7,XMMWORD[((128-64))+rdx] - vpxor xmm9,xmm9,xmm14 - vpxor xmm5,xmm5,xmm2 - - vmovdqu xmm15,XMMWORD[32+r8] - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpshufb xmm15,xmm15,xmm13 - vpxor xmm0,xmm0,xmm3 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((112-64))+rdx] - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm1,xmm1,xmm4 - vpclmulqdq xmm2,xmm9,xmm7,0x00 - vpxor xmm8,xmm8,xmm15 - vpxor xmm2,xmm2,xmm5 - vxorps xmm10,xmm10,xmm12 - - vmovdqu xmm14,XMMWORD[16+r8] - vpalignr xmm12,xmm10,xmm10,8 - vpclmulqdq xmm3,xmm15,xmm6,0x00 - vpshufb xmm14,xmm14,xmm13 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm4,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((144-64))+rdx] - vpclmulqdq xmm10,xmm10,XMMWORD[r10],0x10 - vxorps xmm12,xmm12,xmm11 - vpunpckhqdq xmm9,xmm14,xmm14 - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm5,xmm8,xmm7,0x10 - vmovdqu xmm7,XMMWORD[((176-64))+rdx] - vpxor xmm9,xmm9,xmm14 - vpxor xmm5,xmm5,xmm2 - - vmovdqu xmm15,XMMWORD[r8] - vpclmulqdq xmm0,xmm14,xmm6,0x00 - vpshufb xmm15,xmm15,xmm13 - vpclmulqdq xmm1,xmm14,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((160-64))+rdx] - vpxor xmm15,xmm15,xmm12 - vpclmulqdq xmm2,xmm9,xmm7,0x10 - vpxor xmm15,xmm15,xmm10 - - lea r8,[128+r8] - sub r9,0x80 - jnc NEAR $L$oop8x_avx - - add r9,0x80 - jmp NEAR $L$tail_no_xor_avx - -ALIGN 32 -$L$short_avx: - vmovdqu xmm14,XMMWORD[((-16))+r9*1+r8] - lea r8,[r9*1+r8] - vmovdqu xmm6,XMMWORD[((0-64))+rdx] - vmovdqu xmm7,XMMWORD[((32-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - - vmovdqa xmm3,xmm0 - vmovdqa xmm4,xmm1 - vmovdqa xmm5,xmm2 - sub r9,0x10 - jz NEAR $L$tail_avx - - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vmovdqu xmm14,XMMWORD[((-32))+r8] - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((16-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - vpsrldq xmm7,xmm7,8 - sub r9,0x10 - jz NEAR $L$tail_avx - - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vmovdqu xmm14,XMMWORD[((-48))+r8] - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((48-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - vmovdqu xmm7,XMMWORD[((80-64))+rdx] - sub r9,0x10 - jz NEAR $L$tail_avx - - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vmovdqu xmm14,XMMWORD[((-64))+r8] - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((64-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - vpsrldq xmm7,xmm7,8 - sub r9,0x10 - jz NEAR $L$tail_avx - - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vmovdqu xmm14,XMMWORD[((-80))+r8] - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((96-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - vmovdqu xmm7,XMMWORD[((128-64))+rdx] - sub r9,0x10 - jz NEAR $L$tail_avx - - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vmovdqu xmm14,XMMWORD[((-96))+r8] - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((112-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - vpsrldq xmm7,xmm7,8 - sub r9,0x10 - jz NEAR $L$tail_avx - - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vmovdqu xmm14,XMMWORD[((-112))+r8] - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vmovdqu xmm6,XMMWORD[((144-64))+rdx] - vpshufb xmm15,xmm14,xmm13 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - vmovq xmm7,QWORD[((184-64))+rdx] - sub r9,0x10 - jmp NEAR $L$tail_avx - -ALIGN 32 -$L$tail_avx: - vpxor xmm15,xmm15,xmm10 -$L$tail_no_xor_avx: - vpunpckhqdq xmm8,xmm15,xmm15 - vpxor xmm3,xmm3,xmm0 - vpclmulqdq xmm0,xmm15,xmm6,0x00 - vpxor xmm8,xmm8,xmm15 - vpxor xmm4,xmm4,xmm1 - vpclmulqdq xmm1,xmm15,xmm6,0x11 - vpxor xmm5,xmm5,xmm2 - vpclmulqdq xmm2,xmm8,xmm7,0x00 - - vmovdqu xmm12,XMMWORD[r10] - - vpxor xmm10,xmm3,xmm0 - vpxor xmm11,xmm4,xmm1 - vpxor xmm5,xmm5,xmm2 - - vpxor xmm5,xmm5,xmm10 - vpxor xmm5,xmm5,xmm11 - vpslldq xmm9,xmm5,8 - vpsrldq xmm5,xmm5,8 - vpxor xmm10,xmm10,xmm9 - vpxor xmm11,xmm11,xmm5 - - vpclmulqdq xmm9,xmm10,xmm12,0x10 - vpalignr xmm10,xmm10,xmm10,8 - vpxor xmm10,xmm10,xmm9 - - vpclmulqdq xmm9,xmm10,xmm12,0x10 - vpalignr xmm10,xmm10,xmm10,8 - vpxor xmm10,xmm10,xmm11 - vpxor xmm10,xmm10,xmm9 - - cmp r9,0 - jne NEAR $L$short_avx - - vpshufb xmm10,xmm10,xmm13 - vmovdqu XMMWORD[rcx],xmm10 - vzeroupper - movaps xmm6,XMMWORD[rsp] - movaps xmm7,XMMWORD[16+rsp] - movaps xmm8,XMMWORD[32+rsp] - movaps xmm9,XMMWORD[48+rsp] - movaps xmm10,XMMWORD[64+rsp] - movaps xmm11,XMMWORD[80+rsp] - movaps xmm12,XMMWORD[96+rsp] - movaps xmm13,XMMWORD[112+rsp] - movaps xmm14,XMMWORD[128+rsp] - movaps xmm15,XMMWORD[144+rsp] - lea rsp,[168+rsp] -$L$SEH_end_gcm_ghash_avx: - DB 0F3h,0C3h ;repret - - -ALIGN 64 -$L$bswap_mask: -DB 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 -$L$0x1c2_polynomial: -DB 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 -$L$7_mask: - DD 7,0,7,0 -$L$7_mask_poly: - DD 7,0,450,0 -ALIGN 64 - -$L$rem_4bit: - DD 0,0,0,471859200,0,943718400,0,610271232 - DD 0,1887436800,0,1822425088,0,1220542464,0,1423966208 - DD 0,3774873600,0,4246732800,0,3644850176,0,3311403008 - DD 0,2441084928,0,2376073216,0,2847932416,0,3051356160 - -$L$rem_8bit: - DW 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E - DW 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E - DW 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E - DW 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E - DW 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E - DW 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E - DW 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E - DW 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E - DW 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE - DW 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE - DW 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE - DW 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE - DW 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E - DW 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E - DW 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE - DW 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE - DW 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E - DW 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E - DW 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E - DW 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E - DW 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E - DW 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E - DW 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E - DW 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E - DW 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE - DW 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE - DW 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE - DW 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE - DW 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E - DW 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E - DW 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE - DW 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE - -DB 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52 -DB 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 -DB 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 -DB 114,103,62,0 -ALIGN 64 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$in_prologue - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$in_prologue - - lea rax,[((48+280))+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$in_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_gcm_gmult_4bit wrt ..imagebase - DD $L$SEH_end_gcm_gmult_4bit wrt ..imagebase - DD $L$SEH_info_gcm_gmult_4bit wrt ..imagebase - - DD $L$SEH_begin_gcm_ghash_4bit wrt ..imagebase - DD $L$SEH_end_gcm_ghash_4bit wrt ..imagebase - DD $L$SEH_info_gcm_ghash_4bit wrt ..imagebase - - DD $L$SEH_begin_gcm_init_clmul wrt ..imagebase - DD $L$SEH_end_gcm_init_clmul wrt ..imagebase - DD $L$SEH_info_gcm_init_clmul wrt ..imagebase - - DD $L$SEH_begin_gcm_ghash_clmul wrt ..imagebase - DD $L$SEH_end_gcm_ghash_clmul wrt ..imagebase - DD $L$SEH_info_gcm_ghash_clmul wrt ..imagebase - DD $L$SEH_begin_gcm_init_avx wrt ..imagebase - DD $L$SEH_end_gcm_init_avx wrt ..imagebase - DD $L$SEH_info_gcm_init_clmul wrt ..imagebase - - DD $L$SEH_begin_gcm_ghash_avx wrt ..imagebase - DD $L$SEH_end_gcm_ghash_avx wrt ..imagebase - DD $L$SEH_info_gcm_ghash_clmul wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_gcm_gmult_4bit: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$gmult_prologue wrt ..imagebase,$L$gmult_epilogue wrt ..imagebase -$L$SEH_info_gcm_ghash_4bit: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$ghash_prologue wrt ..imagebase,$L$ghash_epilogue wrt ..imagebase -$L$SEH_info_gcm_init_clmul: -DB 0x01,0x08,0x03,0x00 -DB 0x08,0x68,0x00,0x00 -DB 0x04,0x22,0x00,0x00 -$L$SEH_info_gcm_ghash_clmul: -DB 0x01,0x33,0x16,0x00 -DB 0x33,0xf8,0x09,0x00 -DB 0x2e,0xe8,0x08,0x00 -DB 0x29,0xd8,0x07,0x00 -DB 0x24,0xc8,0x06,0x00 -DB 0x1f,0xb8,0x05,0x00 -DB 0x1a,0xa8,0x04,0x00 -DB 0x15,0x98,0x03,0x00 -DB 0x10,0x88,0x02,0x00 -DB 0x0c,0x78,0x01,0x00 -DB 0x08,0x68,0x00,0x00 -DB 0x04,0x01,0x15,0x00 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/md5-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/md5-x86_64.asm deleted file mode 100644 index 646201bb58..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/md5-x86_64.asm +++ /dev/null @@ -1,796 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - -ALIGN 16 - -global md5_block_asm_data_order - -md5_block_asm_data_order: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_md5_block_asm_data_order: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push rbp - - push rbx - - push r12 - - push r14 - - push r15 - -$L$prologue: - - - - - mov rbp,rdi - shl rdx,6 - lea rdi,[rdx*1+rsi] - mov eax,DWORD[rbp] - mov ebx,DWORD[4+rbp] - mov ecx,DWORD[8+rbp] - mov edx,DWORD[12+rbp] - - - - - - - - cmp rsi,rdi - je NEAR $L$end - - -$L$loop: - mov r8d,eax - mov r9d,ebx - mov r14d,ecx - mov r15d,edx - mov r10d,DWORD[rsi] - mov r11d,edx - xor r11d,ecx - lea eax,[((-680876936))+r10*1+rax] - and r11d,ebx - xor r11d,edx - mov r10d,DWORD[4+rsi] - add eax,r11d - rol eax,7 - mov r11d,ecx - add eax,ebx - xor r11d,ebx - lea edx,[((-389564586))+r10*1+rdx] - and r11d,eax - xor r11d,ecx - mov r10d,DWORD[8+rsi] - add edx,r11d - rol edx,12 - mov r11d,ebx - add edx,eax - xor r11d,eax - lea ecx,[606105819+r10*1+rcx] - and r11d,edx - xor r11d,ebx - mov r10d,DWORD[12+rsi] - add ecx,r11d - rol ecx,17 - mov r11d,eax - add ecx,edx - xor r11d,edx - lea ebx,[((-1044525330))+r10*1+rbx] - and r11d,ecx - xor r11d,eax - mov r10d,DWORD[16+rsi] - add ebx,r11d - rol ebx,22 - mov r11d,edx - add ebx,ecx - xor r11d,ecx - lea eax,[((-176418897))+r10*1+rax] - and r11d,ebx - xor r11d,edx - mov r10d,DWORD[20+rsi] - add eax,r11d - rol eax,7 - mov r11d,ecx - add eax,ebx - xor r11d,ebx - lea edx,[1200080426+r10*1+rdx] - and r11d,eax - xor r11d,ecx - mov r10d,DWORD[24+rsi] - add edx,r11d - rol edx,12 - mov r11d,ebx - add edx,eax - xor r11d,eax - lea ecx,[((-1473231341))+r10*1+rcx] - and r11d,edx - xor r11d,ebx - mov r10d,DWORD[28+rsi] - add ecx,r11d - rol ecx,17 - mov r11d,eax - add ecx,edx - xor r11d,edx - lea ebx,[((-45705983))+r10*1+rbx] - and r11d,ecx - xor r11d,eax - mov r10d,DWORD[32+rsi] - add ebx,r11d - rol ebx,22 - mov r11d,edx - add ebx,ecx - xor r11d,ecx - lea eax,[1770035416+r10*1+rax] - and r11d,ebx - xor r11d,edx - mov r10d,DWORD[36+rsi] - add eax,r11d - rol eax,7 - mov r11d,ecx - add eax,ebx - xor r11d,ebx - lea edx,[((-1958414417))+r10*1+rdx] - and r11d,eax - xor r11d,ecx - mov r10d,DWORD[40+rsi] - add edx,r11d - rol edx,12 - mov r11d,ebx - add edx,eax - xor r11d,eax - lea ecx,[((-42063))+r10*1+rcx] - and r11d,edx - xor r11d,ebx - mov r10d,DWORD[44+rsi] - add ecx,r11d - rol ecx,17 - mov r11d,eax - add ecx,edx - xor r11d,edx - lea ebx,[((-1990404162))+r10*1+rbx] - and r11d,ecx - xor r11d,eax - mov r10d,DWORD[48+rsi] - add ebx,r11d - rol ebx,22 - mov r11d,edx - add ebx,ecx - xor r11d,ecx - lea eax,[1804603682+r10*1+rax] - and r11d,ebx - xor r11d,edx - mov r10d,DWORD[52+rsi] - add eax,r11d - rol eax,7 - mov r11d,ecx - add eax,ebx - xor r11d,ebx - lea edx,[((-40341101))+r10*1+rdx] - and r11d,eax - xor r11d,ecx - mov r10d,DWORD[56+rsi] - add edx,r11d - rol edx,12 - mov r11d,ebx - add edx,eax - xor r11d,eax - lea ecx,[((-1502002290))+r10*1+rcx] - and r11d,edx - xor r11d,ebx - mov r10d,DWORD[60+rsi] - add ecx,r11d - rol ecx,17 - mov r11d,eax - add ecx,edx - xor r11d,edx - lea ebx,[1236535329+r10*1+rbx] - and r11d,ecx - xor r11d,eax - mov r10d,DWORD[rsi] - add ebx,r11d - rol ebx,22 - mov r11d,edx - add ebx,ecx - mov r10d,DWORD[4+rsi] - mov r11d,edx - mov r12d,edx - not r11d - lea eax,[((-165796510))+r10*1+rax] - and r12d,ebx - and r11d,ecx - mov r10d,DWORD[24+rsi] - or r12d,r11d - mov r11d,ecx - add eax,r12d - mov r12d,ecx - rol eax,5 - add eax,ebx - not r11d - lea edx,[((-1069501632))+r10*1+rdx] - and r12d,eax - and r11d,ebx - mov r10d,DWORD[44+rsi] - or r12d,r11d - mov r11d,ebx - add edx,r12d - mov r12d,ebx - rol edx,9 - add edx,eax - not r11d - lea ecx,[643717713+r10*1+rcx] - and r12d,edx - and r11d,eax - mov r10d,DWORD[rsi] - or r12d,r11d - mov r11d,eax - add ecx,r12d - mov r12d,eax - rol ecx,14 - add ecx,edx - not r11d - lea ebx,[((-373897302))+r10*1+rbx] - and r12d,ecx - and r11d,edx - mov r10d,DWORD[20+rsi] - or r12d,r11d - mov r11d,edx - add ebx,r12d - mov r12d,edx - rol ebx,20 - add ebx,ecx - not r11d - lea eax,[((-701558691))+r10*1+rax] - and r12d,ebx - and r11d,ecx - mov r10d,DWORD[40+rsi] - or r12d,r11d - mov r11d,ecx - add eax,r12d - mov r12d,ecx - rol eax,5 - add eax,ebx - not r11d - lea edx,[38016083+r10*1+rdx] - and r12d,eax - and r11d,ebx - mov r10d,DWORD[60+rsi] - or r12d,r11d - mov r11d,ebx - add edx,r12d - mov r12d,ebx - rol edx,9 - add edx,eax - not r11d - lea ecx,[((-660478335))+r10*1+rcx] - and r12d,edx - and r11d,eax - mov r10d,DWORD[16+rsi] - or r12d,r11d - mov r11d,eax - add ecx,r12d - mov r12d,eax - rol ecx,14 - add ecx,edx - not r11d - lea ebx,[((-405537848))+r10*1+rbx] - and r12d,ecx - and r11d,edx - mov r10d,DWORD[36+rsi] - or r12d,r11d - mov r11d,edx - add ebx,r12d - mov r12d,edx - rol ebx,20 - add ebx,ecx - not r11d - lea eax,[568446438+r10*1+rax] - and r12d,ebx - and r11d,ecx - mov r10d,DWORD[56+rsi] - or r12d,r11d - mov r11d,ecx - add eax,r12d - mov r12d,ecx - rol eax,5 - add eax,ebx - not r11d - lea edx,[((-1019803690))+r10*1+rdx] - and r12d,eax - and r11d,ebx - mov r10d,DWORD[12+rsi] - or r12d,r11d - mov r11d,ebx - add edx,r12d - mov r12d,ebx - rol edx,9 - add edx,eax - not r11d - lea ecx,[((-187363961))+r10*1+rcx] - and r12d,edx - and r11d,eax - mov r10d,DWORD[32+rsi] - or r12d,r11d - mov r11d,eax - add ecx,r12d - mov r12d,eax - rol ecx,14 - add ecx,edx - not r11d - lea ebx,[1163531501+r10*1+rbx] - and r12d,ecx - and r11d,edx - mov r10d,DWORD[52+rsi] - or r12d,r11d - mov r11d,edx - add ebx,r12d - mov r12d,edx - rol ebx,20 - add ebx,ecx - not r11d - lea eax,[((-1444681467))+r10*1+rax] - and r12d,ebx - and r11d,ecx - mov r10d,DWORD[8+rsi] - or r12d,r11d - mov r11d,ecx - add eax,r12d - mov r12d,ecx - rol eax,5 - add eax,ebx - not r11d - lea edx,[((-51403784))+r10*1+rdx] - and r12d,eax - and r11d,ebx - mov r10d,DWORD[28+rsi] - or r12d,r11d - mov r11d,ebx - add edx,r12d - mov r12d,ebx - rol edx,9 - add edx,eax - not r11d - lea ecx,[1735328473+r10*1+rcx] - and r12d,edx - and r11d,eax - mov r10d,DWORD[48+rsi] - or r12d,r11d - mov r11d,eax - add ecx,r12d - mov r12d,eax - rol ecx,14 - add ecx,edx - not r11d - lea ebx,[((-1926607734))+r10*1+rbx] - and r12d,ecx - and r11d,edx - mov r10d,DWORD[rsi] - or r12d,r11d - mov r11d,edx - add ebx,r12d - mov r12d,edx - rol ebx,20 - add ebx,ecx - mov r10d,DWORD[20+rsi] - mov r11d,ecx - lea eax,[((-378558))+r10*1+rax] - mov r10d,DWORD[32+rsi] - xor r11d,edx - xor r11d,ebx - add eax,r11d - rol eax,4 - mov r11d,ebx - add eax,ebx - lea edx,[((-2022574463))+r10*1+rdx] - mov r10d,DWORD[44+rsi] - xor r11d,ecx - xor r11d,eax - add edx,r11d - rol edx,11 - mov r11d,eax - add edx,eax - lea ecx,[1839030562+r10*1+rcx] - mov r10d,DWORD[56+rsi] - xor r11d,ebx - xor r11d,edx - add ecx,r11d - rol ecx,16 - mov r11d,edx - add ecx,edx - lea ebx,[((-35309556))+r10*1+rbx] - mov r10d,DWORD[4+rsi] - xor r11d,eax - xor r11d,ecx - add ebx,r11d - rol ebx,23 - mov r11d,ecx - add ebx,ecx - lea eax,[((-1530992060))+r10*1+rax] - mov r10d,DWORD[16+rsi] - xor r11d,edx - xor r11d,ebx - add eax,r11d - rol eax,4 - mov r11d,ebx - add eax,ebx - lea edx,[1272893353+r10*1+rdx] - mov r10d,DWORD[28+rsi] - xor r11d,ecx - xor r11d,eax - add edx,r11d - rol edx,11 - mov r11d,eax - add edx,eax - lea ecx,[((-155497632))+r10*1+rcx] - mov r10d,DWORD[40+rsi] - xor r11d,ebx - xor r11d,edx - add ecx,r11d - rol ecx,16 - mov r11d,edx - add ecx,edx - lea ebx,[((-1094730640))+r10*1+rbx] - mov r10d,DWORD[52+rsi] - xor r11d,eax - xor r11d,ecx - add ebx,r11d - rol ebx,23 - mov r11d,ecx - add ebx,ecx - lea eax,[681279174+r10*1+rax] - mov r10d,DWORD[rsi] - xor r11d,edx - xor r11d,ebx - add eax,r11d - rol eax,4 - mov r11d,ebx - add eax,ebx - lea edx,[((-358537222))+r10*1+rdx] - mov r10d,DWORD[12+rsi] - xor r11d,ecx - xor r11d,eax - add edx,r11d - rol edx,11 - mov r11d,eax - add edx,eax - lea ecx,[((-722521979))+r10*1+rcx] - mov r10d,DWORD[24+rsi] - xor r11d,ebx - xor r11d,edx - add ecx,r11d - rol ecx,16 - mov r11d,edx - add ecx,edx - lea ebx,[76029189+r10*1+rbx] - mov r10d,DWORD[36+rsi] - xor r11d,eax - xor r11d,ecx - add ebx,r11d - rol ebx,23 - mov r11d,ecx - add ebx,ecx - lea eax,[((-640364487))+r10*1+rax] - mov r10d,DWORD[48+rsi] - xor r11d,edx - xor r11d,ebx - add eax,r11d - rol eax,4 - mov r11d,ebx - add eax,ebx - lea edx,[((-421815835))+r10*1+rdx] - mov r10d,DWORD[60+rsi] - xor r11d,ecx - xor r11d,eax - add edx,r11d - rol edx,11 - mov r11d,eax - add edx,eax - lea ecx,[530742520+r10*1+rcx] - mov r10d,DWORD[8+rsi] - xor r11d,ebx - xor r11d,edx - add ecx,r11d - rol ecx,16 - mov r11d,edx - add ecx,edx - lea ebx,[((-995338651))+r10*1+rbx] - mov r10d,DWORD[rsi] - xor r11d,eax - xor r11d,ecx - add ebx,r11d - rol ebx,23 - mov r11d,ecx - add ebx,ecx - mov r10d,DWORD[rsi] - mov r11d,0xffffffff - xor r11d,edx - lea eax,[((-198630844))+r10*1+rax] - or r11d,ebx - xor r11d,ecx - add eax,r11d - mov r10d,DWORD[28+rsi] - mov r11d,0xffffffff - rol eax,6 - xor r11d,ecx - add eax,ebx - lea edx,[1126891415+r10*1+rdx] - or r11d,eax - xor r11d,ebx - add edx,r11d - mov r10d,DWORD[56+rsi] - mov r11d,0xffffffff - rol edx,10 - xor r11d,ebx - add edx,eax - lea ecx,[((-1416354905))+r10*1+rcx] - or r11d,edx - xor r11d,eax - add ecx,r11d - mov r10d,DWORD[20+rsi] - mov r11d,0xffffffff - rol ecx,15 - xor r11d,eax - add ecx,edx - lea ebx,[((-57434055))+r10*1+rbx] - or r11d,ecx - xor r11d,edx - add ebx,r11d - mov r10d,DWORD[48+rsi] - mov r11d,0xffffffff - rol ebx,21 - xor r11d,edx - add ebx,ecx - lea eax,[1700485571+r10*1+rax] - or r11d,ebx - xor r11d,ecx - add eax,r11d - mov r10d,DWORD[12+rsi] - mov r11d,0xffffffff - rol eax,6 - xor r11d,ecx - add eax,ebx - lea edx,[((-1894986606))+r10*1+rdx] - or r11d,eax - xor r11d,ebx - add edx,r11d - mov r10d,DWORD[40+rsi] - mov r11d,0xffffffff - rol edx,10 - xor r11d,ebx - add edx,eax - lea ecx,[((-1051523))+r10*1+rcx] - or r11d,edx - xor r11d,eax - add ecx,r11d - mov r10d,DWORD[4+rsi] - mov r11d,0xffffffff - rol ecx,15 - xor r11d,eax - add ecx,edx - lea ebx,[((-2054922799))+r10*1+rbx] - or r11d,ecx - xor r11d,edx - add ebx,r11d - mov r10d,DWORD[32+rsi] - mov r11d,0xffffffff - rol ebx,21 - xor r11d,edx - add ebx,ecx - lea eax,[1873313359+r10*1+rax] - or r11d,ebx - xor r11d,ecx - add eax,r11d - mov r10d,DWORD[60+rsi] - mov r11d,0xffffffff - rol eax,6 - xor r11d,ecx - add eax,ebx - lea edx,[((-30611744))+r10*1+rdx] - or r11d,eax - xor r11d,ebx - add edx,r11d - mov r10d,DWORD[24+rsi] - mov r11d,0xffffffff - rol edx,10 - xor r11d,ebx - add edx,eax - lea ecx,[((-1560198380))+r10*1+rcx] - or r11d,edx - xor r11d,eax - add ecx,r11d - mov r10d,DWORD[52+rsi] - mov r11d,0xffffffff - rol ecx,15 - xor r11d,eax - add ecx,edx - lea ebx,[1309151649+r10*1+rbx] - or r11d,ecx - xor r11d,edx - add ebx,r11d - mov r10d,DWORD[16+rsi] - mov r11d,0xffffffff - rol ebx,21 - xor r11d,edx - add ebx,ecx - lea eax,[((-145523070))+r10*1+rax] - or r11d,ebx - xor r11d,ecx - add eax,r11d - mov r10d,DWORD[44+rsi] - mov r11d,0xffffffff - rol eax,6 - xor r11d,ecx - add eax,ebx - lea edx,[((-1120210379))+r10*1+rdx] - or r11d,eax - xor r11d,ebx - add edx,r11d - mov r10d,DWORD[8+rsi] - mov r11d,0xffffffff - rol edx,10 - xor r11d,ebx - add edx,eax - lea ecx,[718787259+r10*1+rcx] - or r11d,edx - xor r11d,eax - add ecx,r11d - mov r10d,DWORD[36+rsi] - mov r11d,0xffffffff - rol ecx,15 - xor r11d,eax - add ecx,edx - lea ebx,[((-343485551))+r10*1+rbx] - or r11d,ecx - xor r11d,edx - add ebx,r11d - mov r10d,DWORD[rsi] - mov r11d,0xffffffff - rol ebx,21 - xor r11d,edx - add ebx,ecx - - add eax,r8d - add ebx,r9d - add ecx,r14d - add edx,r15d - - - add rsi,64 - cmp rsi,rdi - jb NEAR $L$loop - - -$L$end: - mov DWORD[rbp],eax - mov DWORD[4+rbp],ebx - mov DWORD[8+rbp],ecx - mov DWORD[12+rbp],edx - - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r12,QWORD[16+rsp] - - mov rbx,QWORD[24+rsp] - - mov rbp,QWORD[32+rsp] - - add rsp,40 - -$L$epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_md5_block_asm_data_order: -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - lea r10,[$L$prologue] - cmp rbx,r10 - jb NEAR $L$in_prologue - - mov rax,QWORD[152+r8] - - lea r10,[$L$epilogue] - cmp rbx,r10 - jae NEAR $L$in_prologue - - lea rax,[40+rax] - - mov rbp,QWORD[((-8))+rax] - mov rbx,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r14,QWORD[((-32))+rax] - mov r15,QWORD[((-40))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$in_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_md5_block_asm_data_order wrt ..imagebase - DD $L$SEH_end_md5_block_asm_data_order wrt ..imagebase - DD $L$SEH_info_md5_block_asm_data_order wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_md5_block_asm_data_order: -DB 9,0,0,0 - DD se_handler wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm deleted file mode 100644 index 215f5d2a49..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm +++ /dev/null @@ -1,4984 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - -EXTERN OPENSSL_ia32cap_P - - -ALIGN 64 -$L$poly: - DQ 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 - -$L$One: - DD 1,1,1,1,1,1,1,1 -$L$Two: - DD 2,2,2,2,2,2,2,2 -$L$Three: - DD 3,3,3,3,3,3,3,3 -$L$ONE_mont: - DQ 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe - - -$L$ord: - DQ 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 -$L$ordK: - DQ 0xccd1c8aaee00bc4f - - - -global ecp_nistz256_neg - -ALIGN 32 -ecp_nistz256_neg: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_neg: - mov rdi,rcx - mov rsi,rdx - - - - push r12 - - push r13 - -$L$neg_body: - - xor r8,r8 - xor r9,r9 - xor r10,r10 - xor r11,r11 - xor r13,r13 - - sub r8,QWORD[rsi] - sbb r9,QWORD[8+rsi] - sbb r10,QWORD[16+rsi] - mov rax,r8 - sbb r11,QWORD[24+rsi] - lea rsi,[$L$poly] - mov rdx,r9 - sbb r13,0 - - add r8,QWORD[rsi] - mov rcx,r10 - adc r9,QWORD[8+rsi] - adc r10,QWORD[16+rsi] - mov r12,r11 - adc r11,QWORD[24+rsi] - test r13,r13 - - cmovz r8,rax - cmovz r9,rdx - mov QWORD[rdi],r8 - cmovz r10,rcx - mov QWORD[8+rdi],r9 - cmovz r11,r12 - mov QWORD[16+rdi],r10 - mov QWORD[24+rdi],r11 - - mov r13,QWORD[rsp] - - mov r12,QWORD[8+rsp] - - lea rsp,[16+rsp] - -$L$neg_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_neg: - - - - - - -global ecp_nistz256_ord_mul_mont - -ALIGN 32 -ecp_nistz256_ord_mul_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_ord_mul_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$ecp_nistz256_ord_mul_montx - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - -$L$ord_mul_body: - - mov rax,QWORD[rdx] - mov rbx,rdx - lea r14,[$L$ord] - mov r15,QWORD[$L$ordK] - - - mov rcx,rax - mul QWORD[rsi] - mov r8,rax - mov rax,rcx - mov r9,rdx - - mul QWORD[8+rsi] - add r9,rax - mov rax,rcx - adc rdx,0 - mov r10,rdx - - mul QWORD[16+rsi] - add r10,rax - mov rax,rcx - adc rdx,0 - - mov r13,r8 - imul r8,r15 - - mov r11,rdx - mul QWORD[24+rsi] - add r11,rax - mov rax,r8 - adc rdx,0 - mov r12,rdx - - - mul QWORD[r14] - mov rbp,r8 - add r13,rax - mov rax,r8 - adc rdx,0 - mov rcx,rdx - - sub r10,r8 - sbb r8,0 - - mul QWORD[8+r14] - add r9,rcx - adc rdx,0 - add r9,rax - mov rax,rbp - adc r10,rdx - mov rdx,rbp - adc r8,0 - - shl rax,32 - shr rdx,32 - sub r11,rax - mov rax,QWORD[8+rbx] - sbb rbp,rdx - - add r11,r8 - adc r12,rbp - adc r13,0 - - - mov rcx,rax - mul QWORD[rsi] - add r9,rax - mov rax,rcx - adc rdx,0 - mov rbp,rdx - - mul QWORD[8+rsi] - add r10,rbp - adc rdx,0 - add r10,rax - mov rax,rcx - adc rdx,0 - mov rbp,rdx - - mul QWORD[16+rsi] - add r11,rbp - adc rdx,0 - add r11,rax - mov rax,rcx - adc rdx,0 - - mov rcx,r9 - imul r9,r15 - - mov rbp,rdx - mul QWORD[24+rsi] - add r12,rbp - adc rdx,0 - xor r8,r8 - add r12,rax - mov rax,r9 - adc r13,rdx - adc r8,0 - - - mul QWORD[r14] - mov rbp,r9 - add rcx,rax - mov rax,r9 - adc rcx,rdx - - sub r11,r9 - sbb r9,0 - - mul QWORD[8+r14] - add r10,rcx - adc rdx,0 - add r10,rax - mov rax,rbp - adc r11,rdx - mov rdx,rbp - adc r9,0 - - shl rax,32 - shr rdx,32 - sub r12,rax - mov rax,QWORD[16+rbx] - sbb rbp,rdx - - add r12,r9 - adc r13,rbp - adc r8,0 - - - mov rcx,rax - mul QWORD[rsi] - add r10,rax - mov rax,rcx - adc rdx,0 - mov rbp,rdx - - mul QWORD[8+rsi] - add r11,rbp - adc rdx,0 - add r11,rax - mov rax,rcx - adc rdx,0 - mov rbp,rdx - - mul QWORD[16+rsi] - add r12,rbp - adc rdx,0 - add r12,rax - mov rax,rcx - adc rdx,0 - - mov rcx,r10 - imul r10,r15 - - mov rbp,rdx - mul QWORD[24+rsi] - add r13,rbp - adc rdx,0 - xor r9,r9 - add r13,rax - mov rax,r10 - adc r8,rdx - adc r9,0 - - - mul QWORD[r14] - mov rbp,r10 - add rcx,rax - mov rax,r10 - adc rcx,rdx - - sub r12,r10 - sbb r10,0 - - mul QWORD[8+r14] - add r11,rcx - adc rdx,0 - add r11,rax - mov rax,rbp - adc r12,rdx - mov rdx,rbp - adc r10,0 - - shl rax,32 - shr rdx,32 - sub r13,rax - mov rax,QWORD[24+rbx] - sbb rbp,rdx - - add r13,r10 - adc r8,rbp - adc r9,0 - - - mov rcx,rax - mul QWORD[rsi] - add r11,rax - mov rax,rcx - adc rdx,0 - mov rbp,rdx - - mul QWORD[8+rsi] - add r12,rbp - adc rdx,0 - add r12,rax - mov rax,rcx - adc rdx,0 - mov rbp,rdx - - mul QWORD[16+rsi] - add r13,rbp - adc rdx,0 - add r13,rax - mov rax,rcx - adc rdx,0 - - mov rcx,r11 - imul r11,r15 - - mov rbp,rdx - mul QWORD[24+rsi] - add r8,rbp - adc rdx,0 - xor r10,r10 - add r8,rax - mov rax,r11 - adc r9,rdx - adc r10,0 - - - mul QWORD[r14] - mov rbp,r11 - add rcx,rax - mov rax,r11 - adc rcx,rdx - - sub r13,r11 - sbb r11,0 - - mul QWORD[8+r14] - add r12,rcx - adc rdx,0 - add r12,rax - mov rax,rbp - adc r13,rdx - mov rdx,rbp - adc r11,0 - - shl rax,32 - shr rdx,32 - sub r8,rax - sbb rbp,rdx - - add r8,r11 - adc r9,rbp - adc r10,0 - - - mov rsi,r12 - sub r12,QWORD[r14] - mov r11,r13 - sbb r13,QWORD[8+r14] - mov rcx,r8 - sbb r8,QWORD[16+r14] - mov rbp,r9 - sbb r9,QWORD[24+r14] - sbb r10,0 - - cmovc r12,rsi - cmovc r13,r11 - cmovc r8,rcx - cmovc r9,rbp - - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r13,QWORD[16+rsp] - - mov r12,QWORD[24+rsp] - - mov rbx,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - lea rsp,[48+rsp] - -$L$ord_mul_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_ord_mul_mont: - - - - - - - -global ecp_nistz256_ord_sqr_mont - -ALIGN 32 -ecp_nistz256_ord_sqr_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_ord_sqr_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$ecp_nistz256_ord_sqr_montx - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - -$L$ord_sqr_body: - - mov r8,QWORD[rsi] - mov rax,QWORD[8+rsi] - mov r14,QWORD[16+rsi] - mov r15,QWORD[24+rsi] - lea rsi,[$L$ord] - mov rbx,rdx - jmp NEAR $L$oop_ord_sqr - -ALIGN 32 -$L$oop_ord_sqr: - - mov rbp,rax - mul r8 - mov r9,rax -DB 102,72,15,110,205 - mov rax,r14 - mov r10,rdx - - mul r8 - add r10,rax - mov rax,r15 -DB 102,73,15,110,214 - adc rdx,0 - mov r11,rdx - - mul r8 - add r11,rax - mov rax,r15 -DB 102,73,15,110,223 - adc rdx,0 - mov r12,rdx - - - mul r14 - mov r13,rax - mov rax,r14 - mov r14,rdx - - - mul rbp - add r11,rax - mov rax,r15 - adc rdx,0 - mov r15,rdx - - mul rbp - add r12,rax - adc rdx,0 - - add r12,r15 - adc r13,rdx - adc r14,0 - - - xor r15,r15 - mov rax,r8 - add r9,r9 - adc r10,r10 - adc r11,r11 - adc r12,r12 - adc r13,r13 - adc r14,r14 - adc r15,0 - - - mul rax - mov r8,rax -DB 102,72,15,126,200 - mov rbp,rdx - - mul rax - add r9,rbp - adc r10,rax -DB 102,72,15,126,208 - adc rdx,0 - mov rbp,rdx - - mul rax - add r11,rbp - adc r12,rax -DB 102,72,15,126,216 - adc rdx,0 - mov rbp,rdx - - mov rcx,r8 - imul r8,QWORD[32+rsi] - - mul rax - add r13,rbp - adc r14,rax - mov rax,QWORD[rsi] - adc r15,rdx - - - mul r8 - mov rbp,r8 - add rcx,rax - mov rax,QWORD[8+rsi] - adc rcx,rdx - - sub r10,r8 - sbb rbp,0 - - mul r8 - add r9,rcx - adc rdx,0 - add r9,rax - mov rax,r8 - adc r10,rdx - mov rdx,r8 - adc rbp,0 - - mov rcx,r9 - imul r9,QWORD[32+rsi] - - shl rax,32 - shr rdx,32 - sub r11,rax - mov rax,QWORD[rsi] - sbb r8,rdx - - add r11,rbp - adc r8,0 - - - mul r9 - mov rbp,r9 - add rcx,rax - mov rax,QWORD[8+rsi] - adc rcx,rdx - - sub r11,r9 - sbb rbp,0 - - mul r9 - add r10,rcx - adc rdx,0 - add r10,rax - mov rax,r9 - adc r11,rdx - mov rdx,r9 - adc rbp,0 - - mov rcx,r10 - imul r10,QWORD[32+rsi] - - shl rax,32 - shr rdx,32 - sub r8,rax - mov rax,QWORD[rsi] - sbb r9,rdx - - add r8,rbp - adc r9,0 - - - mul r10 - mov rbp,r10 - add rcx,rax - mov rax,QWORD[8+rsi] - adc rcx,rdx - - sub r8,r10 - sbb rbp,0 - - mul r10 - add r11,rcx - adc rdx,0 - add r11,rax - mov rax,r10 - adc r8,rdx - mov rdx,r10 - adc rbp,0 - - mov rcx,r11 - imul r11,QWORD[32+rsi] - - shl rax,32 - shr rdx,32 - sub r9,rax - mov rax,QWORD[rsi] - sbb r10,rdx - - add r9,rbp - adc r10,0 - - - mul r11 - mov rbp,r11 - add rcx,rax - mov rax,QWORD[8+rsi] - adc rcx,rdx - - sub r9,r11 - sbb rbp,0 - - mul r11 - add r8,rcx - adc rdx,0 - add r8,rax - mov rax,r11 - adc r9,rdx - mov rdx,r11 - adc rbp,0 - - shl rax,32 - shr rdx,32 - sub r10,rax - sbb r11,rdx - - add r10,rbp - adc r11,0 - - - xor rdx,rdx - add r8,r12 - adc r9,r13 - mov r12,r8 - adc r10,r14 - adc r11,r15 - mov rax,r9 - adc rdx,0 - - - sub r8,QWORD[rsi] - mov r14,r10 - sbb r9,QWORD[8+rsi] - sbb r10,QWORD[16+rsi] - mov r15,r11 - sbb r11,QWORD[24+rsi] - sbb rdx,0 - - cmovc r8,r12 - cmovnc rax,r9 - cmovnc r14,r10 - cmovnc r15,r11 - - dec rbx - jnz NEAR $L$oop_ord_sqr - - mov QWORD[rdi],r8 - mov QWORD[8+rdi],rax - pxor xmm1,xmm1 - mov QWORD[16+rdi],r14 - pxor xmm2,xmm2 - mov QWORD[24+rdi],r15 - pxor xmm3,xmm3 - - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r13,QWORD[16+rsp] - - mov r12,QWORD[24+rsp] - - mov rbx,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - lea rsp,[48+rsp] - -$L$ord_sqr_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_ord_sqr_mont: - - -ALIGN 32 -ecp_nistz256_ord_mul_montx: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_ord_mul_montx: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$ecp_nistz256_ord_mul_montx: - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - -$L$ord_mulx_body: - - mov rbx,rdx - mov rdx,QWORD[rdx] - mov r9,QWORD[rsi] - mov r10,QWORD[8+rsi] - mov r11,QWORD[16+rsi] - mov r12,QWORD[24+rsi] - lea rsi,[((-128))+rsi] - lea r14,[(($L$ord-128))] - mov r15,QWORD[$L$ordK] - - - mulx r9,r8,r9 - mulx r10,rcx,r10 - mulx r11,rbp,r11 - add r9,rcx - mulx r12,rcx,r12 - mov rdx,r8 - mulx rax,rdx,r15 - adc r10,rbp - adc r11,rcx - adc r12,0 - - - xor r13,r13 - mulx rbp,rcx,QWORD[((0+128))+r14] - adcx r8,rcx - adox r9,rbp - - mulx rbp,rcx,QWORD[((8+128))+r14] - adcx r9,rcx - adox r10,rbp - - mulx rbp,rcx,QWORD[((16+128))+r14] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((24+128))+r14] - mov rdx,QWORD[8+rbx] - adcx r11,rcx - adox r12,rbp - adcx r12,r8 - adox r13,r8 - adc r13,0 - - - mulx rbp,rcx,QWORD[((0+128))+rsi] - adcx r9,rcx - adox r10,rbp - - mulx rbp,rcx,QWORD[((8+128))+rsi] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((16+128))+rsi] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((24+128))+rsi] - mov rdx,r9 - mulx rax,rdx,r15 - adcx r12,rcx - adox r13,rbp - - adcx r13,r8 - adox r8,r8 - adc r8,0 - - - mulx rbp,rcx,QWORD[((0+128))+r14] - adcx r9,rcx - adox r10,rbp - - mulx rbp,rcx,QWORD[((8+128))+r14] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((16+128))+r14] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((24+128))+r14] - mov rdx,QWORD[16+rbx] - adcx r12,rcx - adox r13,rbp - adcx r13,r9 - adox r8,r9 - adc r8,0 - - - mulx rbp,rcx,QWORD[((0+128))+rsi] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((8+128))+rsi] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((16+128))+rsi] - adcx r12,rcx - adox r13,rbp - - mulx rbp,rcx,QWORD[((24+128))+rsi] - mov rdx,r10 - mulx rax,rdx,r15 - adcx r13,rcx - adox r8,rbp - - adcx r8,r9 - adox r9,r9 - adc r9,0 - - - mulx rbp,rcx,QWORD[((0+128))+r14] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((8+128))+r14] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((16+128))+r14] - adcx r12,rcx - adox r13,rbp - - mulx rbp,rcx,QWORD[((24+128))+r14] - mov rdx,QWORD[24+rbx] - adcx r13,rcx - adox r8,rbp - adcx r8,r10 - adox r9,r10 - adc r9,0 - - - mulx rbp,rcx,QWORD[((0+128))+rsi] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((8+128))+rsi] - adcx r12,rcx - adox r13,rbp - - mulx rbp,rcx,QWORD[((16+128))+rsi] - adcx r13,rcx - adox r8,rbp - - mulx rbp,rcx,QWORD[((24+128))+rsi] - mov rdx,r11 - mulx rax,rdx,r15 - adcx r8,rcx - adox r9,rbp - - adcx r9,r10 - adox r10,r10 - adc r10,0 - - - mulx rbp,rcx,QWORD[((0+128))+r14] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((8+128))+r14] - adcx r12,rcx - adox r13,rbp - - mulx rbp,rcx,QWORD[((16+128))+r14] - adcx r13,rcx - adox r8,rbp - - mulx rbp,rcx,QWORD[((24+128))+r14] - lea r14,[128+r14] - mov rbx,r12 - adcx r8,rcx - adox r9,rbp - mov rdx,r13 - adcx r9,r11 - adox r10,r11 - adc r10,0 - - - - mov rcx,r8 - sub r12,QWORD[r14] - sbb r13,QWORD[8+r14] - sbb r8,QWORD[16+r14] - mov rbp,r9 - sbb r9,QWORD[24+r14] - sbb r10,0 - - cmovc r12,rbx - cmovc r13,rdx - cmovc r8,rcx - cmovc r9,rbp - - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r13,QWORD[16+rsp] - - mov r12,QWORD[24+rsp] - - mov rbx,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - lea rsp,[48+rsp] - -$L$ord_mulx_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_ord_mul_montx: - - -ALIGN 32 -ecp_nistz256_ord_sqr_montx: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_ord_sqr_montx: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$ecp_nistz256_ord_sqr_montx: - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - -$L$ord_sqrx_body: - - mov rbx,rdx - mov rdx,QWORD[rsi] - mov r14,QWORD[8+rsi] - mov r15,QWORD[16+rsi] - mov r8,QWORD[24+rsi] - lea rsi,[$L$ord] - jmp NEAR $L$oop_ord_sqrx - -ALIGN 32 -$L$oop_ord_sqrx: - mulx r10,r9,r14 - mulx r11,rcx,r15 - mov rax,rdx -DB 102,73,15,110,206 - mulx r12,rbp,r8 - mov rdx,r14 - add r10,rcx -DB 102,73,15,110,215 - adc r11,rbp - adc r12,0 - xor r13,r13 - - mulx rbp,rcx,r15 - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,r8 - mov rdx,r15 - adcx r12,rcx - adox r13,rbp - adc r13,0 - - mulx r14,rcx,r8 - mov rdx,rax -DB 102,73,15,110,216 - xor r15,r15 - adcx r9,r9 - adox r13,rcx - adcx r10,r10 - adox r14,r15 - - - mulx rbp,r8,rdx -DB 102,72,15,126,202 - adcx r11,r11 - adox r9,rbp - adcx r12,r12 - mulx rax,rcx,rdx -DB 102,72,15,126,210 - adcx r13,r13 - adox r10,rcx - adcx r14,r14 - mulx rbp,rcx,rdx -DB 0x67 -DB 102,72,15,126,218 - adox r11,rax - adcx r15,r15 - adox r12,rcx - adox r13,rbp - mulx rax,rcx,rdx - adox r14,rcx - adox r15,rax - - - mov rdx,r8 - mulx rcx,rdx,QWORD[32+rsi] - - xor rax,rax - mulx rbp,rcx,QWORD[rsi] - adcx r8,rcx - adox r9,rbp - mulx rbp,rcx,QWORD[8+rsi] - adcx r9,rcx - adox r10,rbp - mulx rbp,rcx,QWORD[16+rsi] - adcx r10,rcx - adox r11,rbp - mulx rbp,rcx,QWORD[24+rsi] - adcx r11,rcx - adox r8,rbp - adcx r8,rax - - - mov rdx,r9 - mulx rcx,rdx,QWORD[32+rsi] - - mulx rbp,rcx,QWORD[rsi] - adox r9,rcx - adcx r10,rbp - mulx rbp,rcx,QWORD[8+rsi] - adox r10,rcx - adcx r11,rbp - mulx rbp,rcx,QWORD[16+rsi] - adox r11,rcx - adcx r8,rbp - mulx rbp,rcx,QWORD[24+rsi] - adox r8,rcx - adcx r9,rbp - adox r9,rax - - - mov rdx,r10 - mulx rcx,rdx,QWORD[32+rsi] - - mulx rbp,rcx,QWORD[rsi] - adcx r10,rcx - adox r11,rbp - mulx rbp,rcx,QWORD[8+rsi] - adcx r11,rcx - adox r8,rbp - mulx rbp,rcx,QWORD[16+rsi] - adcx r8,rcx - adox r9,rbp - mulx rbp,rcx,QWORD[24+rsi] - adcx r9,rcx - adox r10,rbp - adcx r10,rax - - - mov rdx,r11 - mulx rcx,rdx,QWORD[32+rsi] - - mulx rbp,rcx,QWORD[rsi] - adox r11,rcx - adcx r8,rbp - mulx rbp,rcx,QWORD[8+rsi] - adox r8,rcx - adcx r9,rbp - mulx rbp,rcx,QWORD[16+rsi] - adox r9,rcx - adcx r10,rbp - mulx rbp,rcx,QWORD[24+rsi] - adox r10,rcx - adcx r11,rbp - adox r11,rax - - - add r12,r8 - adc r9,r13 - mov rdx,r12 - adc r10,r14 - adc r11,r15 - mov r14,r9 - adc rax,0 - - - sub r12,QWORD[rsi] - mov r15,r10 - sbb r9,QWORD[8+rsi] - sbb r10,QWORD[16+rsi] - mov r8,r11 - sbb r11,QWORD[24+rsi] - sbb rax,0 - - cmovnc rdx,r12 - cmovnc r14,r9 - cmovnc r15,r10 - cmovnc r8,r11 - - dec rbx - jnz NEAR $L$oop_ord_sqrx - - mov QWORD[rdi],rdx - mov QWORD[8+rdi],r14 - pxor xmm1,xmm1 - mov QWORD[16+rdi],r15 - pxor xmm2,xmm2 - mov QWORD[24+rdi],r8 - pxor xmm3,xmm3 - - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r13,QWORD[16+rsp] - - mov r12,QWORD[24+rsp] - - mov rbx,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - lea rsp,[48+rsp] - -$L$ord_sqrx_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_ord_sqr_montx: - - - - - - -global ecp_nistz256_mul_mont - -ALIGN 32 -ecp_nistz256_mul_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_mul_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 -$L$mul_mont: - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - -$L$mul_body: - cmp ecx,0x80100 - je NEAR $L$mul_montx - mov rbx,rdx - mov rax,QWORD[rdx] - mov r9,QWORD[rsi] - mov r10,QWORD[8+rsi] - mov r11,QWORD[16+rsi] - mov r12,QWORD[24+rsi] - - call __ecp_nistz256_mul_montq - jmp NEAR $L$mul_mont_done - -ALIGN 32 -$L$mul_montx: - mov rbx,rdx - mov rdx,QWORD[rdx] - mov r9,QWORD[rsi] - mov r10,QWORD[8+rsi] - mov r11,QWORD[16+rsi] - mov r12,QWORD[24+rsi] - lea rsi,[((-128))+rsi] - - call __ecp_nistz256_mul_montx -$L$mul_mont_done: - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r13,QWORD[16+rsp] - - mov r12,QWORD[24+rsp] - - mov rbx,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - lea rsp,[48+rsp] - -$L$mul_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_mul_mont: - - -ALIGN 32 -__ecp_nistz256_mul_montq: - - - - mov rbp,rax - mul r9 - mov r14,QWORD[(($L$poly+8))] - mov r8,rax - mov rax,rbp - mov r9,rdx - - mul r10 - mov r15,QWORD[(($L$poly+24))] - add r9,rax - mov rax,rbp - adc rdx,0 - mov r10,rdx - - mul r11 - add r10,rax - mov rax,rbp - adc rdx,0 - mov r11,rdx - - mul r12 - add r11,rax - mov rax,r8 - adc rdx,0 - xor r13,r13 - mov r12,rdx - - - - - - - - - - - mov rbp,r8 - shl r8,32 - mul r15 - shr rbp,32 - add r9,r8 - adc r10,rbp - adc r11,rax - mov rax,QWORD[8+rbx] - adc r12,rdx - adc r13,0 - xor r8,r8 - - - - mov rbp,rax - mul QWORD[rsi] - add r9,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[8+rsi] - add r10,rcx - adc rdx,0 - add r10,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[16+rsi] - add r11,rcx - adc rdx,0 - add r11,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[24+rsi] - add r12,rcx - adc rdx,0 - add r12,rax - mov rax,r9 - adc r13,rdx - adc r8,0 - - - - mov rbp,r9 - shl r9,32 - mul r15 - shr rbp,32 - add r10,r9 - adc r11,rbp - adc r12,rax - mov rax,QWORD[16+rbx] - adc r13,rdx - adc r8,0 - xor r9,r9 - - - - mov rbp,rax - mul QWORD[rsi] - add r10,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[8+rsi] - add r11,rcx - adc rdx,0 - add r11,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[16+rsi] - add r12,rcx - adc rdx,0 - add r12,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[24+rsi] - add r13,rcx - adc rdx,0 - add r13,rax - mov rax,r10 - adc r8,rdx - adc r9,0 - - - - mov rbp,r10 - shl r10,32 - mul r15 - shr rbp,32 - add r11,r10 - adc r12,rbp - adc r13,rax - mov rax,QWORD[24+rbx] - adc r8,rdx - adc r9,0 - xor r10,r10 - - - - mov rbp,rax - mul QWORD[rsi] - add r11,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[8+rsi] - add r12,rcx - adc rdx,0 - add r12,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[16+rsi] - add r13,rcx - adc rdx,0 - add r13,rax - mov rax,rbp - adc rdx,0 - mov rcx,rdx - - mul QWORD[24+rsi] - add r8,rcx - adc rdx,0 - add r8,rax - mov rax,r11 - adc r9,rdx - adc r10,0 - - - - mov rbp,r11 - shl r11,32 - mul r15 - shr rbp,32 - add r12,r11 - adc r13,rbp - mov rcx,r12 - adc r8,rax - adc r9,rdx - mov rbp,r13 - adc r10,0 - - - - sub r12,-1 - mov rbx,r8 - sbb r13,r14 - sbb r8,0 - mov rdx,r9 - sbb r9,r15 - sbb r10,0 - - cmovc r12,rcx - cmovc r13,rbp - mov QWORD[rdi],r12 - cmovc r8,rbx - mov QWORD[8+rdi],r13 - cmovc r9,rdx - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - - - - - - - - -global ecp_nistz256_sqr_mont - -ALIGN 32 -ecp_nistz256_sqr_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_sqr_mont: - mov rdi,rcx - mov rsi,rdx - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - -$L$sqr_body: - cmp ecx,0x80100 - je NEAR $L$sqr_montx - mov rax,QWORD[rsi] - mov r14,QWORD[8+rsi] - mov r15,QWORD[16+rsi] - mov r8,QWORD[24+rsi] - - call __ecp_nistz256_sqr_montq - jmp NEAR $L$sqr_mont_done - -ALIGN 32 -$L$sqr_montx: - mov rdx,QWORD[rsi] - mov r14,QWORD[8+rsi] - mov r15,QWORD[16+rsi] - mov r8,QWORD[24+rsi] - lea rsi,[((-128))+rsi] - - call __ecp_nistz256_sqr_montx -$L$sqr_mont_done: - mov r15,QWORD[rsp] - - mov r14,QWORD[8+rsp] - - mov r13,QWORD[16+rsp] - - mov r12,QWORD[24+rsp] - - mov rbx,QWORD[32+rsp] - - mov rbp,QWORD[40+rsp] - - lea rsp,[48+rsp] - -$L$sqr_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_sqr_mont: - - -ALIGN 32 -__ecp_nistz256_sqr_montq: - - mov r13,rax - mul r14 - mov r9,rax - mov rax,r15 - mov r10,rdx - - mul r13 - add r10,rax - mov rax,r8 - adc rdx,0 - mov r11,rdx - - mul r13 - add r11,rax - mov rax,r15 - adc rdx,0 - mov r12,rdx - - - mul r14 - add r11,rax - mov rax,r8 - adc rdx,0 - mov rbp,rdx - - mul r14 - add r12,rax - mov rax,r8 - adc rdx,0 - add r12,rbp - mov r13,rdx - adc r13,0 - - - mul r15 - xor r15,r15 - add r13,rax - mov rax,QWORD[rsi] - mov r14,rdx - adc r14,0 - - add r9,r9 - adc r10,r10 - adc r11,r11 - adc r12,r12 - adc r13,r13 - adc r14,r14 - adc r15,0 - - mul rax - mov r8,rax - mov rax,QWORD[8+rsi] - mov rcx,rdx - - mul rax - add r9,rcx - adc r10,rax - mov rax,QWORD[16+rsi] - adc rdx,0 - mov rcx,rdx - - mul rax - add r11,rcx - adc r12,rax - mov rax,QWORD[24+rsi] - adc rdx,0 - mov rcx,rdx - - mul rax - add r13,rcx - adc r14,rax - mov rax,r8 - adc r15,rdx - - mov rsi,QWORD[(($L$poly+8))] - mov rbp,QWORD[(($L$poly+24))] - - - - - mov rcx,r8 - shl r8,32 - mul rbp - shr rcx,32 - add r9,r8 - adc r10,rcx - adc r11,rax - mov rax,r9 - adc rdx,0 - - - - mov rcx,r9 - shl r9,32 - mov r8,rdx - mul rbp - shr rcx,32 - add r10,r9 - adc r11,rcx - adc r8,rax - mov rax,r10 - adc rdx,0 - - - - mov rcx,r10 - shl r10,32 - mov r9,rdx - mul rbp - shr rcx,32 - add r11,r10 - adc r8,rcx - adc r9,rax - mov rax,r11 - adc rdx,0 - - - - mov rcx,r11 - shl r11,32 - mov r10,rdx - mul rbp - shr rcx,32 - add r8,r11 - adc r9,rcx - adc r10,rax - adc rdx,0 - xor r11,r11 - - - - add r12,r8 - adc r13,r9 - mov r8,r12 - adc r14,r10 - adc r15,rdx - mov r9,r13 - adc r11,0 - - sub r12,-1 - mov r10,r14 - sbb r13,rsi - sbb r14,0 - mov rcx,r15 - sbb r15,rbp - sbb r11,0 - - cmovc r12,r8 - cmovc r13,r9 - mov QWORD[rdi],r12 - cmovc r14,r10 - mov QWORD[8+rdi],r13 - cmovc r15,rcx - mov QWORD[16+rdi],r14 - mov QWORD[24+rdi],r15 - - DB 0F3h,0C3h ;repret - - - -ALIGN 32 -__ecp_nistz256_mul_montx: - - - - mulx r9,r8,r9 - mulx r10,rcx,r10 - mov r14,32 - xor r13,r13 - mulx r11,rbp,r11 - mov r15,QWORD[(($L$poly+24))] - adc r9,rcx - mulx r12,rcx,r12 - mov rdx,r8 - adc r10,rbp - shlx rbp,r8,r14 - adc r11,rcx - shrx rcx,r8,r14 - adc r12,0 - - - - add r9,rbp - adc r10,rcx - - mulx rbp,rcx,r15 - mov rdx,QWORD[8+rbx] - adc r11,rcx - adc r12,rbp - adc r13,0 - xor r8,r8 - - - - mulx rbp,rcx,QWORD[((0+128))+rsi] - adcx r9,rcx - adox r10,rbp - - mulx rbp,rcx,QWORD[((8+128))+rsi] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((16+128))+rsi] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((24+128))+rsi] - mov rdx,r9 - adcx r12,rcx - shlx rcx,r9,r14 - adox r13,rbp - shrx rbp,r9,r14 - - adcx r13,r8 - adox r8,r8 - adc r8,0 - - - - add r10,rcx - adc r11,rbp - - mulx rbp,rcx,r15 - mov rdx,QWORD[16+rbx] - adc r12,rcx - adc r13,rbp - adc r8,0 - xor r9,r9 - - - - mulx rbp,rcx,QWORD[((0+128))+rsi] - adcx r10,rcx - adox r11,rbp - - mulx rbp,rcx,QWORD[((8+128))+rsi] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((16+128))+rsi] - adcx r12,rcx - adox r13,rbp - - mulx rbp,rcx,QWORD[((24+128))+rsi] - mov rdx,r10 - adcx r13,rcx - shlx rcx,r10,r14 - adox r8,rbp - shrx rbp,r10,r14 - - adcx r8,r9 - adox r9,r9 - adc r9,0 - - - - add r11,rcx - adc r12,rbp - - mulx rbp,rcx,r15 - mov rdx,QWORD[24+rbx] - adc r13,rcx - adc r8,rbp - adc r9,0 - xor r10,r10 - - - - mulx rbp,rcx,QWORD[((0+128))+rsi] - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,QWORD[((8+128))+rsi] - adcx r12,rcx - adox r13,rbp - - mulx rbp,rcx,QWORD[((16+128))+rsi] - adcx r13,rcx - adox r8,rbp - - mulx rbp,rcx,QWORD[((24+128))+rsi] - mov rdx,r11 - adcx r8,rcx - shlx rcx,r11,r14 - adox r9,rbp - shrx rbp,r11,r14 - - adcx r9,r10 - adox r10,r10 - adc r10,0 - - - - add r12,rcx - adc r13,rbp - - mulx rbp,rcx,r15 - mov rbx,r12 - mov r14,QWORD[(($L$poly+8))] - adc r8,rcx - mov rdx,r13 - adc r9,rbp - adc r10,0 - - - - xor eax,eax - mov rcx,r8 - sbb r12,-1 - sbb r13,r14 - sbb r8,0 - mov rbp,r9 - sbb r9,r15 - sbb r10,0 - - cmovc r12,rbx - cmovc r13,rdx - mov QWORD[rdi],r12 - cmovc r8,rcx - mov QWORD[8+rdi],r13 - cmovc r9,rbp - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_sqr_montx: - - mulx r10,r9,r14 - mulx r11,rcx,r15 - xor eax,eax - adc r10,rcx - mulx r12,rbp,r8 - mov rdx,r14 - adc r11,rbp - adc r12,0 - xor r13,r13 - - - mulx rbp,rcx,r15 - adcx r11,rcx - adox r12,rbp - - mulx rbp,rcx,r8 - mov rdx,r15 - adcx r12,rcx - adox r13,rbp - adc r13,0 - - - mulx r14,rcx,r8 - mov rdx,QWORD[((0+128))+rsi] - xor r15,r15 - adcx r9,r9 - adox r13,rcx - adcx r10,r10 - adox r14,r15 - - mulx rbp,r8,rdx - mov rdx,QWORD[((8+128))+rsi] - adcx r11,r11 - adox r9,rbp - adcx r12,r12 - mulx rax,rcx,rdx - mov rdx,QWORD[((16+128))+rsi] - adcx r13,r13 - adox r10,rcx - adcx r14,r14 -DB 0x67 - mulx rbp,rcx,rdx - mov rdx,QWORD[((24+128))+rsi] - adox r11,rax - adcx r15,r15 - adox r12,rcx - mov rsi,32 - adox r13,rbp -DB 0x67,0x67 - mulx rax,rcx,rdx - mov rdx,QWORD[(($L$poly+24))] - adox r14,rcx - shlx rcx,r8,rsi - adox r15,rax - shrx rax,r8,rsi - mov rbp,rdx - - - add r9,rcx - adc r10,rax - - mulx r8,rcx,r8 - adc r11,rcx - shlx rcx,r9,rsi - adc r8,0 - shrx rax,r9,rsi - - - add r10,rcx - adc r11,rax - - mulx r9,rcx,r9 - adc r8,rcx - shlx rcx,r10,rsi - adc r9,0 - shrx rax,r10,rsi - - - add r11,rcx - adc r8,rax - - mulx r10,rcx,r10 - adc r9,rcx - shlx rcx,r11,rsi - adc r10,0 - shrx rax,r11,rsi - - - add r8,rcx - adc r9,rax - - mulx r11,rcx,r11 - adc r10,rcx - adc r11,0 - - xor rdx,rdx - add r12,r8 - mov rsi,QWORD[(($L$poly+8))] - adc r13,r9 - mov r8,r12 - adc r14,r10 - adc r15,r11 - mov r9,r13 - adc rdx,0 - - sub r12,-1 - mov r10,r14 - sbb r13,rsi - sbb r14,0 - mov r11,r15 - sbb r15,rbp - sbb rdx,0 - - cmovc r12,r8 - cmovc r13,r9 - mov QWORD[rdi],r12 - cmovc r14,r10 - mov QWORD[8+rdi],r13 - cmovc r15,r11 - mov QWORD[16+rdi],r14 - mov QWORD[24+rdi],r15 - - DB 0F3h,0C3h ;repret - - - - -global ecp_nistz256_select_w5 - -ALIGN 32 -ecp_nistz256_select_w5: - - lea rax,[OPENSSL_ia32cap_P] - mov rax,QWORD[8+rax] - test eax,32 - jnz NEAR $L$avx2_select_w5 - lea rax,[((-136))+rsp] -$L$SEH_begin_ecp_nistz256_select_w5: -DB 0x48,0x8d,0x60,0xe0 -DB 0x0f,0x29,0x70,0xe0 -DB 0x0f,0x29,0x78,0xf0 -DB 0x44,0x0f,0x29,0x00 -DB 0x44,0x0f,0x29,0x48,0x10 -DB 0x44,0x0f,0x29,0x50,0x20 -DB 0x44,0x0f,0x29,0x58,0x30 -DB 0x44,0x0f,0x29,0x60,0x40 -DB 0x44,0x0f,0x29,0x68,0x50 -DB 0x44,0x0f,0x29,0x70,0x60 -DB 0x44,0x0f,0x29,0x78,0x70 - movdqa xmm0,XMMWORD[$L$One] - movd xmm1,r8d - - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - - movdqa xmm8,xmm0 - pshufd xmm1,xmm1,0 - - mov rax,16 -$L$select_loop_sse_w5: - - movdqa xmm15,xmm8 - paddd xmm8,xmm0 - pcmpeqd xmm15,xmm1 - - movdqa xmm9,XMMWORD[rdx] - movdqa xmm10,XMMWORD[16+rdx] - movdqa xmm11,XMMWORD[32+rdx] - movdqa xmm12,XMMWORD[48+rdx] - movdqa xmm13,XMMWORD[64+rdx] - movdqa xmm14,XMMWORD[80+rdx] - lea rdx,[96+rdx] - - pand xmm9,xmm15 - pand xmm10,xmm15 - por xmm2,xmm9 - pand xmm11,xmm15 - por xmm3,xmm10 - pand xmm12,xmm15 - por xmm4,xmm11 - pand xmm13,xmm15 - por xmm5,xmm12 - pand xmm14,xmm15 - por xmm6,xmm13 - por xmm7,xmm14 - - dec rax - jnz NEAR $L$select_loop_sse_w5 - - movdqu XMMWORD[rcx],xmm2 - movdqu XMMWORD[16+rcx],xmm3 - movdqu XMMWORD[32+rcx],xmm4 - movdqu XMMWORD[48+rcx],xmm5 - movdqu XMMWORD[64+rcx],xmm6 - movdqu XMMWORD[80+rcx],xmm7 - movaps xmm6,XMMWORD[rsp] - movaps xmm7,XMMWORD[16+rsp] - movaps xmm8,XMMWORD[32+rsp] - movaps xmm9,XMMWORD[48+rsp] - movaps xmm10,XMMWORD[64+rsp] - movaps xmm11,XMMWORD[80+rsp] - movaps xmm12,XMMWORD[96+rsp] - movaps xmm13,XMMWORD[112+rsp] - movaps xmm14,XMMWORD[128+rsp] - movaps xmm15,XMMWORD[144+rsp] - lea rsp,[168+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_select_w5: - - - - -global ecp_nistz256_select_w7 - -ALIGN 32 -ecp_nistz256_select_w7: - - lea rax,[OPENSSL_ia32cap_P] - mov rax,QWORD[8+rax] - test eax,32 - jnz NEAR $L$avx2_select_w7 - lea rax,[((-136))+rsp] -$L$SEH_begin_ecp_nistz256_select_w7: -DB 0x48,0x8d,0x60,0xe0 -DB 0x0f,0x29,0x70,0xe0 -DB 0x0f,0x29,0x78,0xf0 -DB 0x44,0x0f,0x29,0x00 -DB 0x44,0x0f,0x29,0x48,0x10 -DB 0x44,0x0f,0x29,0x50,0x20 -DB 0x44,0x0f,0x29,0x58,0x30 -DB 0x44,0x0f,0x29,0x60,0x40 -DB 0x44,0x0f,0x29,0x68,0x50 -DB 0x44,0x0f,0x29,0x70,0x60 -DB 0x44,0x0f,0x29,0x78,0x70 - movdqa xmm8,XMMWORD[$L$One] - movd xmm1,r8d - - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - - movdqa xmm0,xmm8 - pshufd xmm1,xmm1,0 - mov rax,64 - -$L$select_loop_sse_w7: - movdqa xmm15,xmm8 - paddd xmm8,xmm0 - movdqa xmm9,XMMWORD[rdx] - movdqa xmm10,XMMWORD[16+rdx] - pcmpeqd xmm15,xmm1 - movdqa xmm11,XMMWORD[32+rdx] - movdqa xmm12,XMMWORD[48+rdx] - lea rdx,[64+rdx] - - pand xmm9,xmm15 - pand xmm10,xmm15 - por xmm2,xmm9 - pand xmm11,xmm15 - por xmm3,xmm10 - pand xmm12,xmm15 - por xmm4,xmm11 - prefetcht0 [255+rdx] - por xmm5,xmm12 - - dec rax - jnz NEAR $L$select_loop_sse_w7 - - movdqu XMMWORD[rcx],xmm2 - movdqu XMMWORD[16+rcx],xmm3 - movdqu XMMWORD[32+rcx],xmm4 - movdqu XMMWORD[48+rcx],xmm5 - movaps xmm6,XMMWORD[rsp] - movaps xmm7,XMMWORD[16+rsp] - movaps xmm8,XMMWORD[32+rsp] - movaps xmm9,XMMWORD[48+rsp] - movaps xmm10,XMMWORD[64+rsp] - movaps xmm11,XMMWORD[80+rsp] - movaps xmm12,XMMWORD[96+rsp] - movaps xmm13,XMMWORD[112+rsp] - movaps xmm14,XMMWORD[128+rsp] - movaps xmm15,XMMWORD[144+rsp] - lea rsp,[168+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_select_w7: - - - - -ALIGN 32 -ecp_nistz256_avx2_select_w5: - -$L$avx2_select_w5: - vzeroupper - lea rax,[((-136))+rsp] - mov r11,rsp -$L$SEH_begin_ecp_nistz256_avx2_select_w5: -DB 0x48,0x8d,0x60,0xe0 -DB 0xc5,0xf8,0x29,0x70,0xe0 -DB 0xc5,0xf8,0x29,0x78,0xf0 -DB 0xc5,0x78,0x29,0x40,0x00 -DB 0xc5,0x78,0x29,0x48,0x10 -DB 0xc5,0x78,0x29,0x50,0x20 -DB 0xc5,0x78,0x29,0x58,0x30 -DB 0xc5,0x78,0x29,0x60,0x40 -DB 0xc5,0x78,0x29,0x68,0x50 -DB 0xc5,0x78,0x29,0x70,0x60 -DB 0xc5,0x78,0x29,0x78,0x70 - vmovdqa ymm0,YMMWORD[$L$Two] - - vpxor ymm2,ymm2,ymm2 - vpxor ymm3,ymm3,ymm3 - vpxor ymm4,ymm4,ymm4 - - vmovdqa ymm5,YMMWORD[$L$One] - vmovdqa ymm10,YMMWORD[$L$Two] - - vmovd xmm1,r8d - vpermd ymm1,ymm2,ymm1 - - mov rax,8 -$L$select_loop_avx2_w5: - - vmovdqa ymm6,YMMWORD[rdx] - vmovdqa ymm7,YMMWORD[32+rdx] - vmovdqa ymm8,YMMWORD[64+rdx] - - vmovdqa ymm11,YMMWORD[96+rdx] - vmovdqa ymm12,YMMWORD[128+rdx] - vmovdqa ymm13,YMMWORD[160+rdx] - - vpcmpeqd ymm9,ymm5,ymm1 - vpcmpeqd ymm14,ymm10,ymm1 - - vpaddd ymm5,ymm5,ymm0 - vpaddd ymm10,ymm10,ymm0 - lea rdx,[192+rdx] - - vpand ymm6,ymm6,ymm9 - vpand ymm7,ymm7,ymm9 - vpand ymm8,ymm8,ymm9 - vpand ymm11,ymm11,ymm14 - vpand ymm12,ymm12,ymm14 - vpand ymm13,ymm13,ymm14 - - vpxor ymm2,ymm2,ymm6 - vpxor ymm3,ymm3,ymm7 - vpxor ymm4,ymm4,ymm8 - vpxor ymm2,ymm2,ymm11 - vpxor ymm3,ymm3,ymm12 - vpxor ymm4,ymm4,ymm13 - - dec rax - jnz NEAR $L$select_loop_avx2_w5 - - vmovdqu YMMWORD[rcx],ymm2 - vmovdqu YMMWORD[32+rcx],ymm3 - vmovdqu YMMWORD[64+rcx],ymm4 - vzeroupper - movaps xmm6,XMMWORD[rsp] - movaps xmm7,XMMWORD[16+rsp] - movaps xmm8,XMMWORD[32+rsp] - movaps xmm9,XMMWORD[48+rsp] - movaps xmm10,XMMWORD[64+rsp] - movaps xmm11,XMMWORD[80+rsp] - movaps xmm12,XMMWORD[96+rsp] - movaps xmm13,XMMWORD[112+rsp] - movaps xmm14,XMMWORD[128+rsp] - movaps xmm15,XMMWORD[144+rsp] - lea rsp,[r11] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_avx2_select_w5: - - - - -global ecp_nistz256_avx2_select_w7 - -ALIGN 32 -ecp_nistz256_avx2_select_w7: - -$L$avx2_select_w7: - vzeroupper - mov r11,rsp - lea rax,[((-136))+rsp] -$L$SEH_begin_ecp_nistz256_avx2_select_w7: -DB 0x48,0x8d,0x60,0xe0 -DB 0xc5,0xf8,0x29,0x70,0xe0 -DB 0xc5,0xf8,0x29,0x78,0xf0 -DB 0xc5,0x78,0x29,0x40,0x00 -DB 0xc5,0x78,0x29,0x48,0x10 -DB 0xc5,0x78,0x29,0x50,0x20 -DB 0xc5,0x78,0x29,0x58,0x30 -DB 0xc5,0x78,0x29,0x60,0x40 -DB 0xc5,0x78,0x29,0x68,0x50 -DB 0xc5,0x78,0x29,0x70,0x60 -DB 0xc5,0x78,0x29,0x78,0x70 - vmovdqa ymm0,YMMWORD[$L$Three] - - vpxor ymm2,ymm2,ymm2 - vpxor ymm3,ymm3,ymm3 - - vmovdqa ymm4,YMMWORD[$L$One] - vmovdqa ymm8,YMMWORD[$L$Two] - vmovdqa ymm12,YMMWORD[$L$Three] - - vmovd xmm1,r8d - vpermd ymm1,ymm2,ymm1 - - - mov rax,21 -$L$select_loop_avx2_w7: - - vmovdqa ymm5,YMMWORD[rdx] - vmovdqa ymm6,YMMWORD[32+rdx] - - vmovdqa ymm9,YMMWORD[64+rdx] - vmovdqa ymm10,YMMWORD[96+rdx] - - vmovdqa ymm13,YMMWORD[128+rdx] - vmovdqa ymm14,YMMWORD[160+rdx] - - vpcmpeqd ymm7,ymm4,ymm1 - vpcmpeqd ymm11,ymm8,ymm1 - vpcmpeqd ymm15,ymm12,ymm1 - - vpaddd ymm4,ymm4,ymm0 - vpaddd ymm8,ymm8,ymm0 - vpaddd ymm12,ymm12,ymm0 - lea rdx,[192+rdx] - - vpand ymm5,ymm5,ymm7 - vpand ymm6,ymm6,ymm7 - vpand ymm9,ymm9,ymm11 - vpand ymm10,ymm10,ymm11 - vpand ymm13,ymm13,ymm15 - vpand ymm14,ymm14,ymm15 - - vpxor ymm2,ymm2,ymm5 - vpxor ymm3,ymm3,ymm6 - vpxor ymm2,ymm2,ymm9 - vpxor ymm3,ymm3,ymm10 - vpxor ymm2,ymm2,ymm13 - vpxor ymm3,ymm3,ymm14 - - dec rax - jnz NEAR $L$select_loop_avx2_w7 - - - vmovdqa ymm5,YMMWORD[rdx] - vmovdqa ymm6,YMMWORD[32+rdx] - - vpcmpeqd ymm7,ymm4,ymm1 - - vpand ymm5,ymm5,ymm7 - vpand ymm6,ymm6,ymm7 - - vpxor ymm2,ymm2,ymm5 - vpxor ymm3,ymm3,ymm6 - - vmovdqu YMMWORD[rcx],ymm2 - vmovdqu YMMWORD[32+rcx],ymm3 - vzeroupper - movaps xmm6,XMMWORD[rsp] - movaps xmm7,XMMWORD[16+rsp] - movaps xmm8,XMMWORD[32+rsp] - movaps xmm9,XMMWORD[48+rsp] - movaps xmm10,XMMWORD[64+rsp] - movaps xmm11,XMMWORD[80+rsp] - movaps xmm12,XMMWORD[96+rsp] - movaps xmm13,XMMWORD[112+rsp] - movaps xmm14,XMMWORD[128+rsp] - movaps xmm15,XMMWORD[144+rsp] - lea rsp,[r11] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_avx2_select_w7: - - -ALIGN 32 -__ecp_nistz256_add_toq: - - xor r11,r11 - add r12,QWORD[rbx] - adc r13,QWORD[8+rbx] - mov rax,r12 - adc r8,QWORD[16+rbx] - adc r9,QWORD[24+rbx] - mov rbp,r13 - adc r11,0 - - sub r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - cmovc r13,rbp - mov QWORD[rdi],r12 - cmovc r8,rcx - mov QWORD[8+rdi],r13 - cmovc r9,r10 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_sub_fromq: - - sub r12,QWORD[rbx] - sbb r13,QWORD[8+rbx] - mov rax,r12 - sbb r8,QWORD[16+rbx] - sbb r9,QWORD[24+rbx] - mov rbp,r13 - sbb r11,r11 - - add r12,-1 - mov rcx,r8 - adc r13,r14 - adc r8,0 - mov r10,r9 - adc r9,r15 - test r11,r11 - - cmovz r12,rax - cmovz r13,rbp - mov QWORD[rdi],r12 - cmovz r8,rcx - mov QWORD[8+rdi],r13 - cmovz r9,r10 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_subq: - - sub rax,r12 - sbb rbp,r13 - mov r12,rax - sbb rcx,r8 - sbb r10,r9 - mov r13,rbp - sbb r11,r11 - - add rax,-1 - mov r8,rcx - adc rbp,r14 - adc rcx,0 - mov r9,r10 - adc r10,r15 - test r11,r11 - - cmovnz r12,rax - cmovnz r13,rbp - cmovnz r8,rcx - cmovnz r9,r10 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_mul_by_2q: - - xor r11,r11 - add r12,r12 - adc r13,r13 - mov rax,r12 - adc r8,r8 - adc r9,r9 - mov rbp,r13 - adc r11,0 - - sub r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - cmovc r13,rbp - mov QWORD[rdi],r12 - cmovc r8,rcx - mov QWORD[8+rdi],r13 - cmovc r9,r10 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - -global ecp_nistz256_point_double - -ALIGN 32 -ecp_nistz256_point_double: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_point_double: - mov rdi,rcx - mov rsi,rdx - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$point_doublex - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,32*5+8 - -$L$point_doubleq_body: - -$L$point_double_shortcutq: - movdqu xmm0,XMMWORD[rsi] - mov rbx,rsi - movdqu xmm1,XMMWORD[16+rsi] - mov r12,QWORD[((32+0))+rsi] - mov r13,QWORD[((32+8))+rsi] - mov r8,QWORD[((32+16))+rsi] - mov r9,QWORD[((32+24))+rsi] - mov r14,QWORD[(($L$poly+8))] - mov r15,QWORD[(($L$poly+24))] - movdqa XMMWORD[96+rsp],xmm0 - movdqa XMMWORD[(96+16)+rsp],xmm1 - lea r10,[32+rdi] - lea r11,[64+rdi] -DB 102,72,15,110,199 -DB 102,73,15,110,202 -DB 102,73,15,110,211 - - lea rdi,[rsp] - call __ecp_nistz256_mul_by_2q - - mov rax,QWORD[((64+0))+rsi] - mov r14,QWORD[((64+8))+rsi] - mov r15,QWORD[((64+16))+rsi] - mov r8,QWORD[((64+24))+rsi] - lea rsi,[((64-0))+rsi] - lea rdi,[64+rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[((0+0))+rsp] - mov r14,QWORD[((8+0))+rsp] - lea rsi,[((0+0))+rsp] - mov r15,QWORD[((16+0))+rsp] - mov r8,QWORD[((24+0))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[32+rbx] - mov r9,QWORD[((64+0))+rbx] - mov r10,QWORD[((64+8))+rbx] - mov r11,QWORD[((64+16))+rbx] - mov r12,QWORD[((64+24))+rbx] - lea rsi,[((64-0))+rbx] - lea rbx,[32+rbx] -DB 102,72,15,126,215 - call __ecp_nistz256_mul_montq - call __ecp_nistz256_mul_by_2q - - mov r12,QWORD[((96+0))+rsp] - mov r13,QWORD[((96+8))+rsp] - lea rbx,[64+rsp] - mov r8,QWORD[((96+16))+rsp] - mov r9,QWORD[((96+24))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_add_toq - - mov r12,QWORD[((96+0))+rsp] - mov r13,QWORD[((96+8))+rsp] - lea rbx,[64+rsp] - mov r8,QWORD[((96+16))+rsp] - mov r9,QWORD[((96+24))+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_sub_fromq - - mov rax,QWORD[((0+0))+rsp] - mov r14,QWORD[((8+0))+rsp] - lea rsi,[((0+0))+rsp] - mov r15,QWORD[((16+0))+rsp] - mov r8,QWORD[((24+0))+rsp] -DB 102,72,15,126,207 - call __ecp_nistz256_sqr_montq - xor r9,r9 - mov rax,r12 - add r12,-1 - mov r10,r13 - adc r13,rsi - mov rcx,r14 - adc r14,0 - mov r8,r15 - adc r15,rbp - adc r9,0 - xor rsi,rsi - test rax,1 - - cmovz r12,rax - cmovz r13,r10 - cmovz r14,rcx - cmovz r15,r8 - cmovz r9,rsi - - mov rax,r13 - shr r12,1 - shl rax,63 - mov r10,r14 - shr r13,1 - or r12,rax - shl r10,63 - mov rcx,r15 - shr r14,1 - or r13,r10 - shl rcx,63 - mov QWORD[rdi],r12 - shr r15,1 - mov QWORD[8+rdi],r13 - shl r9,63 - or r14,rcx - or r15,r9 - mov QWORD[16+rdi],r14 - mov QWORD[24+rdi],r15 - mov rax,QWORD[64+rsp] - lea rbx,[64+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montq - - lea rdi,[128+rsp] - call __ecp_nistz256_mul_by_2q - - lea rbx,[32+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_add_toq - - mov rax,QWORD[96+rsp] - lea rbx,[96+rsp] - mov r9,QWORD[((0+0))+rsp] - mov r10,QWORD[((8+0))+rsp] - lea rsi,[((0+0))+rsp] - mov r11,QWORD[((16+0))+rsp] - mov r12,QWORD[((24+0))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_mul_montq - - lea rdi,[128+rsp] - call __ecp_nistz256_mul_by_2q - - mov rax,QWORD[((0+32))+rsp] - mov r14,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r15,QWORD[((16+32))+rsp] - mov r8,QWORD[((24+32))+rsp] -DB 102,72,15,126,199 - call __ecp_nistz256_sqr_montq - - lea rbx,[128+rsp] - mov r8,r14 - mov r9,r15 - mov r14,rsi - mov r15,rbp - call __ecp_nistz256_sub_fromq - - mov rax,QWORD[((0+0))+rsp] - mov rbp,QWORD[((0+8))+rsp] - mov rcx,QWORD[((0+16))+rsp] - mov r10,QWORD[((0+24))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_subq - - mov rax,QWORD[32+rsp] - lea rbx,[32+rsp] - mov r14,r12 - xor ecx,ecx - mov QWORD[((0+0))+rsp],r12 - mov r10,r13 - mov QWORD[((0+8))+rsp],r13 - cmovz r11,r8 - mov QWORD[((0+16))+rsp],r8 - lea rsi,[((0-0))+rsp] - cmovz r12,r9 - mov QWORD[((0+24))+rsp],r9 - mov r9,r14 - lea rdi,[rsp] - call __ecp_nistz256_mul_montq - -DB 102,72,15,126,203 -DB 102,72,15,126,207 - call __ecp_nistz256_sub_fromq - - lea rsi,[((160+56))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbx,QWORD[((-16))+rsi] - - mov rbp,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$point_doubleq_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_point_double: -global ecp_nistz256_point_add - -ALIGN 32 -ecp_nistz256_point_add: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_point_add: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$point_addx - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,32*18+8 - -$L$point_addq_body: - - movdqu xmm0,XMMWORD[rsi] - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm3,XMMWORD[48+rsi] - movdqu xmm4,XMMWORD[64+rsi] - movdqu xmm5,XMMWORD[80+rsi] - mov rbx,rsi - mov rsi,rdx - movdqa XMMWORD[384+rsp],xmm0 - movdqa XMMWORD[(384+16)+rsp],xmm1 - movdqa XMMWORD[416+rsp],xmm2 - movdqa XMMWORD[(416+16)+rsp],xmm3 - movdqa XMMWORD[448+rsp],xmm4 - movdqa XMMWORD[(448+16)+rsp],xmm5 - por xmm5,xmm4 - - movdqu xmm0,XMMWORD[rsi] - pshufd xmm3,xmm5,0xb1 - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - por xmm5,xmm3 - movdqu xmm3,XMMWORD[48+rsi] - mov rax,QWORD[((64+0))+rsi] - mov r14,QWORD[((64+8))+rsi] - mov r15,QWORD[((64+16))+rsi] - mov r8,QWORD[((64+24))+rsi] - movdqa XMMWORD[480+rsp],xmm0 - pshufd xmm4,xmm5,0x1e - movdqa XMMWORD[(480+16)+rsp],xmm1 - movdqu xmm0,XMMWORD[64+rsi] - movdqu xmm1,XMMWORD[80+rsi] - movdqa XMMWORD[512+rsp],xmm2 - movdqa XMMWORD[(512+16)+rsp],xmm3 - por xmm5,xmm4 - pxor xmm4,xmm4 - por xmm1,xmm0 -DB 102,72,15,110,199 - - lea rsi,[((64-0))+rsi] - mov QWORD[((544+0))+rsp],rax - mov QWORD[((544+8))+rsp],r14 - mov QWORD[((544+16))+rsp],r15 - mov QWORD[((544+24))+rsp],r8 - lea rdi,[96+rsp] - call __ecp_nistz256_sqr_montq - - pcmpeqd xmm5,xmm4 - pshufd xmm4,xmm1,0xb1 - por xmm4,xmm1 - pshufd xmm5,xmm5,0 - pshufd xmm3,xmm4,0x1e - por xmm4,xmm3 - pxor xmm3,xmm3 - pcmpeqd xmm4,xmm3 - pshufd xmm4,xmm4,0 - mov rax,QWORD[((64+0))+rbx] - mov r14,QWORD[((64+8))+rbx] - mov r15,QWORD[((64+16))+rbx] - mov r8,QWORD[((64+24))+rbx] -DB 102,72,15,110,203 - - lea rsi,[((64-0))+rbx] - lea rdi,[32+rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[544+rsp] - lea rbx,[544+rsp] - mov r9,QWORD[((0+96))+rsp] - mov r10,QWORD[((8+96))+rsp] - lea rsi,[((0+96))+rsp] - mov r11,QWORD[((16+96))+rsp] - mov r12,QWORD[((24+96))+rsp] - lea rdi,[224+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[448+rsp] - lea rbx,[448+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[416+rsp] - lea rbx,[416+rsp] - mov r9,QWORD[((0+224))+rsp] - mov r10,QWORD[((8+224))+rsp] - lea rsi,[((0+224))+rsp] - mov r11,QWORD[((16+224))+rsp] - mov r12,QWORD[((24+224))+rsp] - lea rdi,[224+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[512+rsp] - lea rbx,[512+rsp] - mov r9,QWORD[((0+256))+rsp] - mov r10,QWORD[((8+256))+rsp] - lea rsi,[((0+256))+rsp] - mov r11,QWORD[((16+256))+rsp] - mov r12,QWORD[((24+256))+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_mul_montq - - lea rbx,[224+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_sub_fromq - - or r12,r13 - movdqa xmm2,xmm4 - or r12,r8 - or r12,r9 - por xmm2,xmm5 -DB 102,73,15,110,220 - - mov rax,QWORD[384+rsp] - lea rbx,[384+rsp] - mov r9,QWORD[((0+96))+rsp] - mov r10,QWORD[((8+96))+rsp] - lea rsi,[((0+96))+rsp] - mov r11,QWORD[((16+96))+rsp] - mov r12,QWORD[((24+96))+rsp] - lea rdi,[160+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[480+rsp] - lea rbx,[480+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[192+rsp] - call __ecp_nistz256_mul_montq - - lea rbx,[160+rsp] - lea rdi,[rsp] - call __ecp_nistz256_sub_fromq - - or r12,r13 - or r12,r8 - or r12,r9 - -DB 102,73,15,126,208 -DB 102,73,15,126,217 - or r12,r8 -DB 0x3e - jnz NEAR $L$add_proceedq - - - - test r9,r9 - jz NEAR $L$add_doubleq - - - - - - -DB 102,72,15,126,199 - pxor xmm0,xmm0 - movdqu XMMWORD[rdi],xmm0 - movdqu XMMWORD[16+rdi],xmm0 - movdqu XMMWORD[32+rdi],xmm0 - movdqu XMMWORD[48+rdi],xmm0 - movdqu XMMWORD[64+rdi],xmm0 - movdqu XMMWORD[80+rdi],xmm0 - jmp NEAR $L$add_doneq - -ALIGN 32 -$L$add_doubleq: -DB 102,72,15,126,206 -DB 102,72,15,126,199 - add rsp,416 - - jmp NEAR $L$point_double_shortcutq - - -ALIGN 32 -$L$add_proceedq: - mov rax,QWORD[((0+64))+rsp] - mov r14,QWORD[((8+64))+rsp] - lea rsi,[((0+64))+rsp] - mov r15,QWORD[((16+64))+rsp] - mov r8,QWORD[((24+64))+rsp] - lea rdi,[96+rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[448+rsp] - lea rbx,[448+rsp] - mov r9,QWORD[((0+0))+rsp] - mov r10,QWORD[((8+0))+rsp] - lea rsi,[((0+0))+rsp] - mov r11,QWORD[((16+0))+rsp] - mov r12,QWORD[((24+0))+rsp] - lea rdi,[352+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[((0+0))+rsp] - mov r14,QWORD[((8+0))+rsp] - lea rsi,[((0+0))+rsp] - mov r15,QWORD[((16+0))+rsp] - mov r8,QWORD[((24+0))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[544+rsp] - lea rbx,[544+rsp] - mov r9,QWORD[((0+352))+rsp] - mov r10,QWORD[((8+352))+rsp] - lea rsi,[((0+352))+rsp] - mov r11,QWORD[((16+352))+rsp] - mov r12,QWORD[((24+352))+rsp] - lea rdi,[352+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[rsp] - lea rbx,[rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[128+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[160+rsp] - lea rbx,[160+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[192+rsp] - call __ecp_nistz256_mul_montq - - - - - xor r11,r11 - add r12,r12 - lea rsi,[96+rsp] - adc r13,r13 - mov rax,r12 - adc r8,r8 - adc r9,r9 - mov rbp,r13 - adc r11,0 - - sub r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - mov rax,QWORD[rsi] - cmovc r13,rbp - mov rbp,QWORD[8+rsi] - cmovc r8,rcx - mov rcx,QWORD[16+rsi] - cmovc r9,r10 - mov r10,QWORD[24+rsi] - - call __ecp_nistz256_subq - - lea rbx,[128+rsp] - lea rdi,[288+rsp] - call __ecp_nistz256_sub_fromq - - mov rax,QWORD[((192+0))+rsp] - mov rbp,QWORD[((192+8))+rsp] - mov rcx,QWORD[((192+16))+rsp] - mov r10,QWORD[((192+24))+rsp] - lea rdi,[320+rsp] - - call __ecp_nistz256_subq - - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - mov rax,QWORD[128+rsp] - lea rbx,[128+rsp] - mov r9,QWORD[((0+224))+rsp] - mov r10,QWORD[((8+224))+rsp] - lea rsi,[((0+224))+rsp] - mov r11,QWORD[((16+224))+rsp] - mov r12,QWORD[((24+224))+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[320+rsp] - lea rbx,[320+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((0+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[320+rsp] - call __ecp_nistz256_mul_montq - - lea rbx,[256+rsp] - lea rdi,[320+rsp] - call __ecp_nistz256_sub_fromq - -DB 102,72,15,126,199 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[352+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((352+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[544+rsp] - pand xmm3,XMMWORD[((544+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[448+rsp] - pand xmm3,XMMWORD[((448+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[64+rdi],xmm2 - movdqu XMMWORD[80+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[288+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((288+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[480+rsp] - pand xmm3,XMMWORD[((480+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[384+rsp] - pand xmm3,XMMWORD[((384+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[rdi],xmm2 - movdqu XMMWORD[16+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[320+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((320+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[512+rsp] - pand xmm3,XMMWORD[((512+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[416+rsp] - pand xmm3,XMMWORD[((416+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm3 - -$L$add_doneq: - lea rsi,[((576+56))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbx,QWORD[((-16))+rsi] - - mov rbp,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$point_addq_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_point_add: -global ecp_nistz256_point_add_affine - -ALIGN 32 -ecp_nistz256_point_add_affine: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_point_add_affine: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$point_add_affinex - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,32*15+8 - -$L$add_affineq_body: - - movdqu xmm0,XMMWORD[rsi] - mov rbx,rdx - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm3,XMMWORD[48+rsi] - movdqu xmm4,XMMWORD[64+rsi] - movdqu xmm5,XMMWORD[80+rsi] - mov rax,QWORD[((64+0))+rsi] - mov r14,QWORD[((64+8))+rsi] - mov r15,QWORD[((64+16))+rsi] - mov r8,QWORD[((64+24))+rsi] - movdqa XMMWORD[320+rsp],xmm0 - movdqa XMMWORD[(320+16)+rsp],xmm1 - movdqa XMMWORD[352+rsp],xmm2 - movdqa XMMWORD[(352+16)+rsp],xmm3 - movdqa XMMWORD[384+rsp],xmm4 - movdqa XMMWORD[(384+16)+rsp],xmm5 - por xmm5,xmm4 - - movdqu xmm0,XMMWORD[rbx] - pshufd xmm3,xmm5,0xb1 - movdqu xmm1,XMMWORD[16+rbx] - movdqu xmm2,XMMWORD[32+rbx] - por xmm5,xmm3 - movdqu xmm3,XMMWORD[48+rbx] - movdqa XMMWORD[416+rsp],xmm0 - pshufd xmm4,xmm5,0x1e - movdqa XMMWORD[(416+16)+rsp],xmm1 - por xmm1,xmm0 -DB 102,72,15,110,199 - movdqa XMMWORD[448+rsp],xmm2 - movdqa XMMWORD[(448+16)+rsp],xmm3 - por xmm3,xmm2 - por xmm5,xmm4 - pxor xmm4,xmm4 - por xmm3,xmm1 - - lea rsi,[((64-0))+rsi] - lea rdi,[32+rsp] - call __ecp_nistz256_sqr_montq - - pcmpeqd xmm5,xmm4 - pshufd xmm4,xmm3,0xb1 - mov rax,QWORD[rbx] - - mov r9,r12 - por xmm4,xmm3 - pshufd xmm5,xmm5,0 - pshufd xmm3,xmm4,0x1e - mov r10,r13 - por xmm4,xmm3 - pxor xmm3,xmm3 - mov r11,r14 - pcmpeqd xmm4,xmm3 - pshufd xmm4,xmm4,0 - - lea rsi,[((32-0))+rsp] - mov r12,r15 - lea rdi,[rsp] - call __ecp_nistz256_mul_montq - - lea rbx,[320+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_sub_fromq - - mov rax,QWORD[384+rsp] - lea rbx,[384+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[384+rsp] - lea rbx,[384+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((0+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[288+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[448+rsp] - lea rbx,[448+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((0+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montq - - lea rbx,[352+rsp] - lea rdi,[96+rsp] - call __ecp_nistz256_sub_fromq - - mov rax,QWORD[((0+64))+rsp] - mov r14,QWORD[((8+64))+rsp] - lea rsi,[((0+64))+rsp] - mov r15,QWORD[((16+64))+rsp] - mov r8,QWORD[((24+64))+rsp] - lea rdi,[128+rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[((0+96))+rsp] - mov r14,QWORD[((8+96))+rsp] - lea rsi,[((0+96))+rsp] - mov r15,QWORD[((16+96))+rsp] - mov r8,QWORD[((24+96))+rsp] - lea rdi,[192+rsp] - call __ecp_nistz256_sqr_montq - - mov rax,QWORD[128+rsp] - lea rbx,[128+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((0+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[160+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[320+rsp] - lea rbx,[320+rsp] - mov r9,QWORD[((0+128))+rsp] - mov r10,QWORD[((8+128))+rsp] - lea rsi,[((0+128))+rsp] - mov r11,QWORD[((16+128))+rsp] - mov r12,QWORD[((24+128))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_mul_montq - - - - - xor r11,r11 - add r12,r12 - lea rsi,[192+rsp] - adc r13,r13 - mov rax,r12 - adc r8,r8 - adc r9,r9 - mov rbp,r13 - adc r11,0 - - sub r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - mov rax,QWORD[rsi] - cmovc r13,rbp - mov rbp,QWORD[8+rsi] - cmovc r8,rcx - mov rcx,QWORD[16+rsi] - cmovc r9,r10 - mov r10,QWORD[24+rsi] - - call __ecp_nistz256_subq - - lea rbx,[160+rsp] - lea rdi,[224+rsp] - call __ecp_nistz256_sub_fromq - - mov rax,QWORD[((0+0))+rsp] - mov rbp,QWORD[((0+8))+rsp] - mov rcx,QWORD[((0+16))+rsp] - mov r10,QWORD[((0+24))+rsp] - lea rdi,[64+rsp] - - call __ecp_nistz256_subq - - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - mov rax,QWORD[352+rsp] - lea rbx,[352+rsp] - mov r9,QWORD[((0+160))+rsp] - mov r10,QWORD[((8+160))+rsp] - lea rsi,[((0+160))+rsp] - mov r11,QWORD[((16+160))+rsp] - mov r12,QWORD[((24+160))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montq - - mov rax,QWORD[96+rsp] - lea rbx,[96+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((0+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_mul_montq - - lea rbx,[32+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_sub_fromq - -DB 102,72,15,126,199 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[288+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((288+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[$L$ONE_mont] - pand xmm3,XMMWORD[(($L$ONE_mont+16))] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[384+rsp] - pand xmm3,XMMWORD[((384+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[64+rdi],xmm2 - movdqu XMMWORD[80+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[224+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((224+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[416+rsp] - pand xmm3,XMMWORD[((416+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[320+rsp] - pand xmm3,XMMWORD[((320+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[rdi],xmm2 - movdqu XMMWORD[16+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[256+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((256+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[448+rsp] - pand xmm3,XMMWORD[((448+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[352+rsp] - pand xmm3,XMMWORD[((352+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm3 - - lea rsi,[((480+56))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbx,QWORD[((-16))+rsi] - - mov rbp,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$add_affineq_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_point_add_affine: - -ALIGN 32 -__ecp_nistz256_add_tox: - - xor r11,r11 - adc r12,QWORD[rbx] - adc r13,QWORD[8+rbx] - mov rax,r12 - adc r8,QWORD[16+rbx] - adc r9,QWORD[24+rbx] - mov rbp,r13 - adc r11,0 - - xor r10,r10 - sbb r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - cmovc r13,rbp - mov QWORD[rdi],r12 - cmovc r8,rcx - mov QWORD[8+rdi],r13 - cmovc r9,r10 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_sub_fromx: - - xor r11,r11 - sbb r12,QWORD[rbx] - sbb r13,QWORD[8+rbx] - mov rax,r12 - sbb r8,QWORD[16+rbx] - sbb r9,QWORD[24+rbx] - mov rbp,r13 - sbb r11,0 - - xor r10,r10 - adc r12,-1 - mov rcx,r8 - adc r13,r14 - adc r8,0 - mov r10,r9 - adc r9,r15 - - bt r11,0 - cmovnc r12,rax - cmovnc r13,rbp - mov QWORD[rdi],r12 - cmovnc r8,rcx - mov QWORD[8+rdi],r13 - cmovnc r9,r10 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_subx: - - xor r11,r11 - sbb rax,r12 - sbb rbp,r13 - mov r12,rax - sbb rcx,r8 - sbb r10,r9 - mov r13,rbp - sbb r11,0 - - xor r9,r9 - adc rax,-1 - mov r8,rcx - adc rbp,r14 - adc rcx,0 - mov r9,r10 - adc r10,r15 - - bt r11,0 - cmovc r12,rax - cmovc r13,rbp - cmovc r8,rcx - cmovc r9,r10 - - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -__ecp_nistz256_mul_by_2x: - - xor r11,r11 - adc r12,r12 - adc r13,r13 - mov rax,r12 - adc r8,r8 - adc r9,r9 - mov rbp,r13 - adc r11,0 - - xor r10,r10 - sbb r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - cmovc r13,rbp - mov QWORD[rdi],r12 - cmovc r8,rcx - mov QWORD[8+rdi],r13 - cmovc r9,r10 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - - DB 0F3h,0C3h ;repret - - - -ALIGN 32 -ecp_nistz256_point_doublex: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_point_doublex: - mov rdi,rcx - mov rsi,rdx - - - -$L$point_doublex: - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,32*5+8 - -$L$point_doublex_body: - -$L$point_double_shortcutx: - movdqu xmm0,XMMWORD[rsi] - mov rbx,rsi - movdqu xmm1,XMMWORD[16+rsi] - mov r12,QWORD[((32+0))+rsi] - mov r13,QWORD[((32+8))+rsi] - mov r8,QWORD[((32+16))+rsi] - mov r9,QWORD[((32+24))+rsi] - mov r14,QWORD[(($L$poly+8))] - mov r15,QWORD[(($L$poly+24))] - movdqa XMMWORD[96+rsp],xmm0 - movdqa XMMWORD[(96+16)+rsp],xmm1 - lea r10,[32+rdi] - lea r11,[64+rdi] -DB 102,72,15,110,199 -DB 102,73,15,110,202 -DB 102,73,15,110,211 - - lea rdi,[rsp] - call __ecp_nistz256_mul_by_2x - - mov rdx,QWORD[((64+0))+rsi] - mov r14,QWORD[((64+8))+rsi] - mov r15,QWORD[((64+16))+rsi] - mov r8,QWORD[((64+24))+rsi] - lea rsi,[((64-128))+rsi] - lea rdi,[64+rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[((0+0))+rsp] - mov r14,QWORD[((8+0))+rsp] - lea rsi,[((-128+0))+rsp] - mov r15,QWORD[((16+0))+rsp] - mov r8,QWORD[((24+0))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[32+rbx] - mov r9,QWORD[((64+0))+rbx] - mov r10,QWORD[((64+8))+rbx] - mov r11,QWORD[((64+16))+rbx] - mov r12,QWORD[((64+24))+rbx] - lea rsi,[((64-128))+rbx] - lea rbx,[32+rbx] -DB 102,72,15,126,215 - call __ecp_nistz256_mul_montx - call __ecp_nistz256_mul_by_2x - - mov r12,QWORD[((96+0))+rsp] - mov r13,QWORD[((96+8))+rsp] - lea rbx,[64+rsp] - mov r8,QWORD[((96+16))+rsp] - mov r9,QWORD[((96+24))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_add_tox - - mov r12,QWORD[((96+0))+rsp] - mov r13,QWORD[((96+8))+rsp] - lea rbx,[64+rsp] - mov r8,QWORD[((96+16))+rsp] - mov r9,QWORD[((96+24))+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_sub_fromx - - mov rdx,QWORD[((0+0))+rsp] - mov r14,QWORD[((8+0))+rsp] - lea rsi,[((-128+0))+rsp] - mov r15,QWORD[((16+0))+rsp] - mov r8,QWORD[((24+0))+rsp] -DB 102,72,15,126,207 - call __ecp_nistz256_sqr_montx - xor r9,r9 - mov rax,r12 - add r12,-1 - mov r10,r13 - adc r13,rsi - mov rcx,r14 - adc r14,0 - mov r8,r15 - adc r15,rbp - adc r9,0 - xor rsi,rsi - test rax,1 - - cmovz r12,rax - cmovz r13,r10 - cmovz r14,rcx - cmovz r15,r8 - cmovz r9,rsi - - mov rax,r13 - shr r12,1 - shl rax,63 - mov r10,r14 - shr r13,1 - or r12,rax - shl r10,63 - mov rcx,r15 - shr r14,1 - or r13,r10 - shl rcx,63 - mov QWORD[rdi],r12 - shr r15,1 - mov QWORD[8+rdi],r13 - shl r9,63 - or r14,rcx - or r15,r9 - mov QWORD[16+rdi],r14 - mov QWORD[24+rdi],r15 - mov rdx,QWORD[64+rsp] - lea rbx,[64+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montx - - lea rdi,[128+rsp] - call __ecp_nistz256_mul_by_2x - - lea rbx,[32+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_add_tox - - mov rdx,QWORD[96+rsp] - lea rbx,[96+rsp] - mov r9,QWORD[((0+0))+rsp] - mov r10,QWORD[((8+0))+rsp] - lea rsi,[((-128+0))+rsp] - mov r11,QWORD[((16+0))+rsp] - mov r12,QWORD[((24+0))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_mul_montx - - lea rdi,[128+rsp] - call __ecp_nistz256_mul_by_2x - - mov rdx,QWORD[((0+32))+rsp] - mov r14,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r15,QWORD[((16+32))+rsp] - mov r8,QWORD[((24+32))+rsp] -DB 102,72,15,126,199 - call __ecp_nistz256_sqr_montx - - lea rbx,[128+rsp] - mov r8,r14 - mov r9,r15 - mov r14,rsi - mov r15,rbp - call __ecp_nistz256_sub_fromx - - mov rax,QWORD[((0+0))+rsp] - mov rbp,QWORD[((0+8))+rsp] - mov rcx,QWORD[((0+16))+rsp] - mov r10,QWORD[((0+24))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_subx - - mov rdx,QWORD[32+rsp] - lea rbx,[32+rsp] - mov r14,r12 - xor ecx,ecx - mov QWORD[((0+0))+rsp],r12 - mov r10,r13 - mov QWORD[((0+8))+rsp],r13 - cmovz r11,r8 - mov QWORD[((0+16))+rsp],r8 - lea rsi,[((0-128))+rsp] - cmovz r12,r9 - mov QWORD[((0+24))+rsp],r9 - mov r9,r14 - lea rdi,[rsp] - call __ecp_nistz256_mul_montx - -DB 102,72,15,126,203 -DB 102,72,15,126,207 - call __ecp_nistz256_sub_fromx - - lea rsi,[((160+56))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbx,QWORD[((-16))+rsi] - - mov rbp,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$point_doublex_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_point_doublex: - -ALIGN 32 -ecp_nistz256_point_addx: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_point_addx: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$point_addx: - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,32*18+8 - -$L$point_addx_body: - - movdqu xmm0,XMMWORD[rsi] - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm3,XMMWORD[48+rsi] - movdqu xmm4,XMMWORD[64+rsi] - movdqu xmm5,XMMWORD[80+rsi] - mov rbx,rsi - mov rsi,rdx - movdqa XMMWORD[384+rsp],xmm0 - movdqa XMMWORD[(384+16)+rsp],xmm1 - movdqa XMMWORD[416+rsp],xmm2 - movdqa XMMWORD[(416+16)+rsp],xmm3 - movdqa XMMWORD[448+rsp],xmm4 - movdqa XMMWORD[(448+16)+rsp],xmm5 - por xmm5,xmm4 - - movdqu xmm0,XMMWORD[rsi] - pshufd xmm3,xmm5,0xb1 - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - por xmm5,xmm3 - movdqu xmm3,XMMWORD[48+rsi] - mov rdx,QWORD[((64+0))+rsi] - mov r14,QWORD[((64+8))+rsi] - mov r15,QWORD[((64+16))+rsi] - mov r8,QWORD[((64+24))+rsi] - movdqa XMMWORD[480+rsp],xmm0 - pshufd xmm4,xmm5,0x1e - movdqa XMMWORD[(480+16)+rsp],xmm1 - movdqu xmm0,XMMWORD[64+rsi] - movdqu xmm1,XMMWORD[80+rsi] - movdqa XMMWORD[512+rsp],xmm2 - movdqa XMMWORD[(512+16)+rsp],xmm3 - por xmm5,xmm4 - pxor xmm4,xmm4 - por xmm1,xmm0 -DB 102,72,15,110,199 - - lea rsi,[((64-128))+rsi] - mov QWORD[((544+0))+rsp],rdx - mov QWORD[((544+8))+rsp],r14 - mov QWORD[((544+16))+rsp],r15 - mov QWORD[((544+24))+rsp],r8 - lea rdi,[96+rsp] - call __ecp_nistz256_sqr_montx - - pcmpeqd xmm5,xmm4 - pshufd xmm4,xmm1,0xb1 - por xmm4,xmm1 - pshufd xmm5,xmm5,0 - pshufd xmm3,xmm4,0x1e - por xmm4,xmm3 - pxor xmm3,xmm3 - pcmpeqd xmm4,xmm3 - pshufd xmm4,xmm4,0 - mov rdx,QWORD[((64+0))+rbx] - mov r14,QWORD[((64+8))+rbx] - mov r15,QWORD[((64+16))+rbx] - mov r8,QWORD[((64+24))+rbx] -DB 102,72,15,110,203 - - lea rsi,[((64-128))+rbx] - lea rdi,[32+rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[544+rsp] - lea rbx,[544+rsp] - mov r9,QWORD[((0+96))+rsp] - mov r10,QWORD[((8+96))+rsp] - lea rsi,[((-128+96))+rsp] - mov r11,QWORD[((16+96))+rsp] - mov r12,QWORD[((24+96))+rsp] - lea rdi,[224+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[448+rsp] - lea rbx,[448+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[416+rsp] - lea rbx,[416+rsp] - mov r9,QWORD[((0+224))+rsp] - mov r10,QWORD[((8+224))+rsp] - lea rsi,[((-128+224))+rsp] - mov r11,QWORD[((16+224))+rsp] - mov r12,QWORD[((24+224))+rsp] - lea rdi,[224+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[512+rsp] - lea rbx,[512+rsp] - mov r9,QWORD[((0+256))+rsp] - mov r10,QWORD[((8+256))+rsp] - lea rsi,[((-128+256))+rsp] - mov r11,QWORD[((16+256))+rsp] - mov r12,QWORD[((24+256))+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_mul_montx - - lea rbx,[224+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_sub_fromx - - or r12,r13 - movdqa xmm2,xmm4 - or r12,r8 - or r12,r9 - por xmm2,xmm5 -DB 102,73,15,110,220 - - mov rdx,QWORD[384+rsp] - lea rbx,[384+rsp] - mov r9,QWORD[((0+96))+rsp] - mov r10,QWORD[((8+96))+rsp] - lea rsi,[((-128+96))+rsp] - mov r11,QWORD[((16+96))+rsp] - mov r12,QWORD[((24+96))+rsp] - lea rdi,[160+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[480+rsp] - lea rbx,[480+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[192+rsp] - call __ecp_nistz256_mul_montx - - lea rbx,[160+rsp] - lea rdi,[rsp] - call __ecp_nistz256_sub_fromx - - or r12,r13 - or r12,r8 - or r12,r9 - -DB 102,73,15,126,208 -DB 102,73,15,126,217 - or r12,r8 -DB 0x3e - jnz NEAR $L$add_proceedx - - - - test r9,r9 - jz NEAR $L$add_doublex - - - - - - -DB 102,72,15,126,199 - pxor xmm0,xmm0 - movdqu XMMWORD[rdi],xmm0 - movdqu XMMWORD[16+rdi],xmm0 - movdqu XMMWORD[32+rdi],xmm0 - movdqu XMMWORD[48+rdi],xmm0 - movdqu XMMWORD[64+rdi],xmm0 - movdqu XMMWORD[80+rdi],xmm0 - jmp NEAR $L$add_donex - -ALIGN 32 -$L$add_doublex: -DB 102,72,15,126,206 -DB 102,72,15,126,199 - add rsp,416 - - jmp NEAR $L$point_double_shortcutx - - -ALIGN 32 -$L$add_proceedx: - mov rdx,QWORD[((0+64))+rsp] - mov r14,QWORD[((8+64))+rsp] - lea rsi,[((-128+64))+rsp] - mov r15,QWORD[((16+64))+rsp] - mov r8,QWORD[((24+64))+rsp] - lea rdi,[96+rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[448+rsp] - lea rbx,[448+rsp] - mov r9,QWORD[((0+0))+rsp] - mov r10,QWORD[((8+0))+rsp] - lea rsi,[((-128+0))+rsp] - mov r11,QWORD[((16+0))+rsp] - mov r12,QWORD[((24+0))+rsp] - lea rdi,[352+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[((0+0))+rsp] - mov r14,QWORD[((8+0))+rsp] - lea rsi,[((-128+0))+rsp] - mov r15,QWORD[((16+0))+rsp] - mov r8,QWORD[((24+0))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[544+rsp] - lea rbx,[544+rsp] - mov r9,QWORD[((0+352))+rsp] - mov r10,QWORD[((8+352))+rsp] - lea rsi,[((-128+352))+rsp] - mov r11,QWORD[((16+352))+rsp] - mov r12,QWORD[((24+352))+rsp] - lea rdi,[352+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[rsp] - lea rbx,[rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[128+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[160+rsp] - lea rbx,[160+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[192+rsp] - call __ecp_nistz256_mul_montx - - - - - xor r11,r11 - add r12,r12 - lea rsi,[96+rsp] - adc r13,r13 - mov rax,r12 - adc r8,r8 - adc r9,r9 - mov rbp,r13 - adc r11,0 - - sub r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - mov rax,QWORD[rsi] - cmovc r13,rbp - mov rbp,QWORD[8+rsi] - cmovc r8,rcx - mov rcx,QWORD[16+rsi] - cmovc r9,r10 - mov r10,QWORD[24+rsi] - - call __ecp_nistz256_subx - - lea rbx,[128+rsp] - lea rdi,[288+rsp] - call __ecp_nistz256_sub_fromx - - mov rax,QWORD[((192+0))+rsp] - mov rbp,QWORD[((192+8))+rsp] - mov rcx,QWORD[((192+16))+rsp] - mov r10,QWORD[((192+24))+rsp] - lea rdi,[320+rsp] - - call __ecp_nistz256_subx - - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - mov rdx,QWORD[128+rsp] - lea rbx,[128+rsp] - mov r9,QWORD[((0+224))+rsp] - mov r10,QWORD[((8+224))+rsp] - lea rsi,[((-128+224))+rsp] - mov r11,QWORD[((16+224))+rsp] - mov r12,QWORD[((24+224))+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[320+rsp] - lea rbx,[320+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((-128+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[320+rsp] - call __ecp_nistz256_mul_montx - - lea rbx,[256+rsp] - lea rdi,[320+rsp] - call __ecp_nistz256_sub_fromx - -DB 102,72,15,126,199 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[352+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((352+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[544+rsp] - pand xmm3,XMMWORD[((544+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[448+rsp] - pand xmm3,XMMWORD[((448+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[64+rdi],xmm2 - movdqu XMMWORD[80+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[288+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((288+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[480+rsp] - pand xmm3,XMMWORD[((480+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[384+rsp] - pand xmm3,XMMWORD[((384+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[rdi],xmm2 - movdqu XMMWORD[16+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[320+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((320+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[512+rsp] - pand xmm3,XMMWORD[((512+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[416+rsp] - pand xmm3,XMMWORD[((416+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm3 - -$L$add_donex: - lea rsi,[((576+56))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbx,QWORD[((-16))+rsi] - - mov rbp,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$point_addx_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_point_addx: - -ALIGN 32 -ecp_nistz256_point_add_affinex: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_ecp_nistz256_point_add_affinex: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$point_add_affinex: - push rbp - - push rbx - - push r12 - - push r13 - - push r14 - - push r15 - - sub rsp,32*15+8 - -$L$add_affinex_body: - - movdqu xmm0,XMMWORD[rsi] - mov rbx,rdx - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] - movdqu xmm3,XMMWORD[48+rsi] - movdqu xmm4,XMMWORD[64+rsi] - movdqu xmm5,XMMWORD[80+rsi] - mov rdx,QWORD[((64+0))+rsi] - mov r14,QWORD[((64+8))+rsi] - mov r15,QWORD[((64+16))+rsi] - mov r8,QWORD[((64+24))+rsi] - movdqa XMMWORD[320+rsp],xmm0 - movdqa XMMWORD[(320+16)+rsp],xmm1 - movdqa XMMWORD[352+rsp],xmm2 - movdqa XMMWORD[(352+16)+rsp],xmm3 - movdqa XMMWORD[384+rsp],xmm4 - movdqa XMMWORD[(384+16)+rsp],xmm5 - por xmm5,xmm4 - - movdqu xmm0,XMMWORD[rbx] - pshufd xmm3,xmm5,0xb1 - movdqu xmm1,XMMWORD[16+rbx] - movdqu xmm2,XMMWORD[32+rbx] - por xmm5,xmm3 - movdqu xmm3,XMMWORD[48+rbx] - movdqa XMMWORD[416+rsp],xmm0 - pshufd xmm4,xmm5,0x1e - movdqa XMMWORD[(416+16)+rsp],xmm1 - por xmm1,xmm0 -DB 102,72,15,110,199 - movdqa XMMWORD[448+rsp],xmm2 - movdqa XMMWORD[(448+16)+rsp],xmm3 - por xmm3,xmm2 - por xmm5,xmm4 - pxor xmm4,xmm4 - por xmm3,xmm1 - - lea rsi,[((64-128))+rsi] - lea rdi,[32+rsp] - call __ecp_nistz256_sqr_montx - - pcmpeqd xmm5,xmm4 - pshufd xmm4,xmm3,0xb1 - mov rdx,QWORD[rbx] - - mov r9,r12 - por xmm4,xmm3 - pshufd xmm5,xmm5,0 - pshufd xmm3,xmm4,0x1e - mov r10,r13 - por xmm4,xmm3 - pxor xmm3,xmm3 - mov r11,r14 - pcmpeqd xmm4,xmm3 - pshufd xmm4,xmm4,0 - - lea rsi,[((32-128))+rsp] - mov r12,r15 - lea rdi,[rsp] - call __ecp_nistz256_mul_montx - - lea rbx,[320+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_sub_fromx - - mov rdx,QWORD[384+rsp] - lea rbx,[384+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[384+rsp] - lea rbx,[384+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((-128+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[288+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[448+rsp] - lea rbx,[448+rsp] - mov r9,QWORD[((0+32))+rsp] - mov r10,QWORD[((8+32))+rsp] - lea rsi,[((-128+32))+rsp] - mov r11,QWORD[((16+32))+rsp] - mov r12,QWORD[((24+32))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montx - - lea rbx,[352+rsp] - lea rdi,[96+rsp] - call __ecp_nistz256_sub_fromx - - mov rdx,QWORD[((0+64))+rsp] - mov r14,QWORD[((8+64))+rsp] - lea rsi,[((-128+64))+rsp] - mov r15,QWORD[((16+64))+rsp] - mov r8,QWORD[((24+64))+rsp] - lea rdi,[128+rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[((0+96))+rsp] - mov r14,QWORD[((8+96))+rsp] - lea rsi,[((-128+96))+rsp] - mov r15,QWORD[((16+96))+rsp] - mov r8,QWORD[((24+96))+rsp] - lea rdi,[192+rsp] - call __ecp_nistz256_sqr_montx - - mov rdx,QWORD[128+rsp] - lea rbx,[128+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((-128+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[160+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[320+rsp] - lea rbx,[320+rsp] - mov r9,QWORD[((0+128))+rsp] - mov r10,QWORD[((8+128))+rsp] - lea rsi,[((-128+128))+rsp] - mov r11,QWORD[((16+128))+rsp] - mov r12,QWORD[((24+128))+rsp] - lea rdi,[rsp] - call __ecp_nistz256_mul_montx - - - - - xor r11,r11 - add r12,r12 - lea rsi,[192+rsp] - adc r13,r13 - mov rax,r12 - adc r8,r8 - adc r9,r9 - mov rbp,r13 - adc r11,0 - - sub r12,-1 - mov rcx,r8 - sbb r13,r14 - sbb r8,0 - mov r10,r9 - sbb r9,r15 - sbb r11,0 - - cmovc r12,rax - mov rax,QWORD[rsi] - cmovc r13,rbp - mov rbp,QWORD[8+rsi] - cmovc r8,rcx - mov rcx,QWORD[16+rsi] - cmovc r9,r10 - mov r10,QWORD[24+rsi] - - call __ecp_nistz256_subx - - lea rbx,[160+rsp] - lea rdi,[224+rsp] - call __ecp_nistz256_sub_fromx - - mov rax,QWORD[((0+0))+rsp] - mov rbp,QWORD[((0+8))+rsp] - mov rcx,QWORD[((0+16))+rsp] - mov r10,QWORD[((0+24))+rsp] - lea rdi,[64+rsp] - - call __ecp_nistz256_subx - - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r8 - mov QWORD[24+rdi],r9 - mov rdx,QWORD[352+rsp] - lea rbx,[352+rsp] - mov r9,QWORD[((0+160))+rsp] - mov r10,QWORD[((8+160))+rsp] - lea rsi,[((-128+160))+rsp] - mov r11,QWORD[((16+160))+rsp] - mov r12,QWORD[((24+160))+rsp] - lea rdi,[32+rsp] - call __ecp_nistz256_mul_montx - - mov rdx,QWORD[96+rsp] - lea rbx,[96+rsp] - mov r9,QWORD[((0+64))+rsp] - mov r10,QWORD[((8+64))+rsp] - lea rsi,[((-128+64))+rsp] - mov r11,QWORD[((16+64))+rsp] - mov r12,QWORD[((24+64))+rsp] - lea rdi,[64+rsp] - call __ecp_nistz256_mul_montx - - lea rbx,[32+rsp] - lea rdi,[256+rsp] - call __ecp_nistz256_sub_fromx - -DB 102,72,15,126,199 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[288+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((288+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[$L$ONE_mont] - pand xmm3,XMMWORD[(($L$ONE_mont+16))] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[384+rsp] - pand xmm3,XMMWORD[((384+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[64+rdi],xmm2 - movdqu XMMWORD[80+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[224+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((224+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[416+rsp] - pand xmm3,XMMWORD[((416+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[320+rsp] - pand xmm3,XMMWORD[((320+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[rdi],xmm2 - movdqu XMMWORD[16+rdi],xmm3 - - movdqa xmm0,xmm5 - movdqa xmm1,xmm5 - pandn xmm0,XMMWORD[256+rsp] - movdqa xmm2,xmm5 - pandn xmm1,XMMWORD[((256+16))+rsp] - movdqa xmm3,xmm5 - pand xmm2,XMMWORD[448+rsp] - pand xmm3,XMMWORD[((448+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - - movdqa xmm0,xmm4 - movdqa xmm1,xmm4 - pandn xmm0,xmm2 - movdqa xmm2,xmm4 - pandn xmm1,xmm3 - movdqa xmm3,xmm4 - pand xmm2,XMMWORD[352+rsp] - pand xmm3,XMMWORD[((352+16))+rsp] - por xmm2,xmm0 - por xmm3,xmm1 - movdqu XMMWORD[32+rdi],xmm2 - movdqu XMMWORD[48+rdi],xmm3 - - lea rsi,[((480+56))+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbx,QWORD[((-16))+rsi] - - mov rbp,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$add_affinex_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_ecp_nistz256_point_add_affinex: -EXTERN __imp_RtlVirtualUnwind - - -ALIGN 16 -short_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rax,[16+rax] - - mov r12,QWORD[((-8))+rax] - mov r13,QWORD[((-16))+rax] - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - - jmp NEAR $L$common_seh_tail - - - -ALIGN 16 -full_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov r10d,DWORD[8+r11] - lea rax,[r10*1+rax] - - mov rbp,QWORD[((-8))+rax] - mov rbx,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_ecp_nistz256_neg wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_neg wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_neg wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_ord_mul_mont wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_ord_mul_mont wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_ord_mul_mont wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_ord_sqr_mont wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_ord_sqr_mont wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_ord_sqr_mont wrt ..imagebase - DD $L$SEH_begin_ecp_nistz256_ord_mul_montx wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_ord_mul_montx wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_ord_mul_montx wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_ord_sqr_montx wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_ord_sqr_montx wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_ord_sqr_montx wrt ..imagebase - DD $L$SEH_begin_ecp_nistz256_mul_mont wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_mul_mont wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_mul_mont wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_sqr_mont wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_sqr_mont wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_sqr_mont wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_select_w5 wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_select_w5 wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_select_wX wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_select_w7 wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_select_w7 wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_select_wX wrt ..imagebase - DD $L$SEH_begin_ecp_nistz256_avx2_select_w5 wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_avx2_select_w5 wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_avx2_select_wX wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_avx2_select_w7 wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_avx2_select_w7 wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_avx2_select_wX wrt ..imagebase - DD $L$SEH_begin_ecp_nistz256_point_double wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_point_double wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_point_double wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_point_add wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_point_add wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_point_add wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_point_add_affine wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_point_add_affine wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_point_add_affine wrt ..imagebase - DD $L$SEH_begin_ecp_nistz256_point_doublex wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_point_doublex wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_point_doublex wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_point_addx wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_point_addx wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_point_addx wrt ..imagebase - - DD $L$SEH_begin_ecp_nistz256_point_add_affinex wrt ..imagebase - DD $L$SEH_end_ecp_nistz256_point_add_affinex wrt ..imagebase - DD $L$SEH_info_ecp_nistz256_point_add_affinex wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_ecp_nistz256_neg: -DB 9,0,0,0 - DD short_handler wrt ..imagebase - DD $L$neg_body wrt ..imagebase,$L$neg_epilogue wrt ..imagebase -$L$SEH_info_ecp_nistz256_ord_mul_mont: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$ord_mul_body wrt ..imagebase,$L$ord_mul_epilogue wrt ..imagebase - DD 48,0 -$L$SEH_info_ecp_nistz256_ord_sqr_mont: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$ord_sqr_body wrt ..imagebase,$L$ord_sqr_epilogue wrt ..imagebase - DD 48,0 -$L$SEH_info_ecp_nistz256_ord_mul_montx: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$ord_mulx_body wrt ..imagebase,$L$ord_mulx_epilogue wrt ..imagebase - DD 48,0 -$L$SEH_info_ecp_nistz256_ord_sqr_montx: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$ord_sqrx_body wrt ..imagebase,$L$ord_sqrx_epilogue wrt ..imagebase - DD 48,0 -$L$SEH_info_ecp_nistz256_mul_mont: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$mul_body wrt ..imagebase,$L$mul_epilogue wrt ..imagebase - DD 48,0 -$L$SEH_info_ecp_nistz256_sqr_mont: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$sqr_body wrt ..imagebase,$L$sqr_epilogue wrt ..imagebase - DD 48,0 -$L$SEH_info_ecp_nistz256_select_wX: -DB 0x01,0x33,0x16,0x00 -DB 0x33,0xf8,0x09,0x00 -DB 0x2e,0xe8,0x08,0x00 -DB 0x29,0xd8,0x07,0x00 -DB 0x24,0xc8,0x06,0x00 -DB 0x1f,0xb8,0x05,0x00 -DB 0x1a,0xa8,0x04,0x00 -DB 0x15,0x98,0x03,0x00 -DB 0x10,0x88,0x02,0x00 -DB 0x0c,0x78,0x01,0x00 -DB 0x08,0x68,0x00,0x00 -DB 0x04,0x01,0x15,0x00 -ALIGN 8 -$L$SEH_info_ecp_nistz256_avx2_select_wX: -DB 0x01,0x36,0x17,0x0b -DB 0x36,0xf8,0x09,0x00 -DB 0x31,0xe8,0x08,0x00 -DB 0x2c,0xd8,0x07,0x00 -DB 0x27,0xc8,0x06,0x00 -DB 0x22,0xb8,0x05,0x00 -DB 0x1d,0xa8,0x04,0x00 -DB 0x18,0x98,0x03,0x00 -DB 0x13,0x88,0x02,0x00 -DB 0x0e,0x78,0x01,0x00 -DB 0x09,0x68,0x00,0x00 -DB 0x04,0x01,0x15,0x00 -DB 0x00,0xb3,0x00,0x00 -ALIGN 8 -$L$SEH_info_ecp_nistz256_point_double: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$point_doubleq_body wrt ..imagebase,$L$point_doubleq_epilogue wrt ..imagebase - DD 32*5+56,0 -$L$SEH_info_ecp_nistz256_point_add: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$point_addq_body wrt ..imagebase,$L$point_addq_epilogue wrt ..imagebase - DD 32*18+56,0 -$L$SEH_info_ecp_nistz256_point_add_affine: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$add_affineq_body wrt ..imagebase,$L$add_affineq_epilogue wrt ..imagebase - DD 32*15+56,0 -ALIGN 8 -$L$SEH_info_ecp_nistz256_point_doublex: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$point_doublex_body wrt ..imagebase,$L$point_doublex_epilogue wrt ..imagebase - DD 32*5+56,0 -$L$SEH_info_ecp_nistz256_point_addx: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$point_addx_body wrt ..imagebase,$L$point_addx_epilogue wrt ..imagebase - DD 32*18+56,0 -$L$SEH_info_ecp_nistz256_point_add_affinex: -DB 9,0,0,0 - DD full_handler wrt ..imagebase - DD $L$add_affinex_body wrt ..imagebase,$L$add_affinex_epilogue wrt ..imagebase - DD 32*15+56,0 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm deleted file mode 100644 index 563699d59d..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm +++ /dev/null @@ -1,339 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - - -global beeu_mod_inverse_vartime -ALIGN 32 -beeu_mod_inverse_vartime: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_beeu_mod_inverse_vartime: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - push rbx - - push rsi - - - sub rsp,80 - - mov QWORD[rsp],rdi - - - mov r8,1 - xor r9,r9 - xor r10,r10 - xor r11,r11 - xor rdi,rdi - - xor r12,r12 - xor r13,r13 - xor r14,r14 - xor r15,r15 - xor rbp,rbp - - - vmovdqu xmm0,XMMWORD[rsi] - vmovdqu xmm1,XMMWORD[16+rsi] - vmovdqu XMMWORD[48+rsp],xmm0 - vmovdqu XMMWORD[64+rsp],xmm1 - - vmovdqu xmm0,XMMWORD[rdx] - vmovdqu xmm1,XMMWORD[16+rdx] - vmovdqu XMMWORD[16+rsp],xmm0 - vmovdqu XMMWORD[32+rsp],xmm1 - -$L$beeu_loop: - xor rbx,rbx - or rbx,QWORD[48+rsp] - or rbx,QWORD[56+rsp] - or rbx,QWORD[64+rsp] - or rbx,QWORD[72+rsp] - jz NEAR $L$beeu_loop_end - - - - - - - - - - - mov rcx,1 - - -$L$beeu_shift_loop_XB: - mov rbx,rcx - and rbx,QWORD[48+rsp] - jnz NEAR $L$beeu_shift_loop_end_XB - - - mov rbx,1 - and rbx,r8 - jz NEAR $L$shift1_0 - add r8,QWORD[rdx] - adc r9,QWORD[8+rdx] - adc r10,QWORD[16+rdx] - adc r11,QWORD[24+rdx] - adc rdi,0 - -$L$shift1_0: - shrd r8,r9,1 - shrd r9,r10,1 - shrd r10,r11,1 - shrd r11,rdi,1 - shr rdi,1 - - shl rcx,1 - - - - - - cmp rcx,0x8000000 - jne NEAR $L$beeu_shift_loop_XB - -$L$beeu_shift_loop_end_XB: - bsf rcx,rcx - test rcx,rcx - jz NEAR $L$beeu_no_shift_XB - - - - mov rax,QWORD[((8+48))+rsp] - mov rbx,QWORD[((16+48))+rsp] - mov rsi,QWORD[((24+48))+rsp] - - shrd QWORD[((0+48))+rsp],rax,cl - shrd QWORD[((8+48))+rsp],rbx,cl - shrd QWORD[((16+48))+rsp],rsi,cl - - shr rsi,cl - mov QWORD[((24+48))+rsp],rsi - - -$L$beeu_no_shift_XB: - - mov rcx,1 - - -$L$beeu_shift_loop_YA: - mov rbx,rcx - and rbx,QWORD[16+rsp] - jnz NEAR $L$beeu_shift_loop_end_YA - - - mov rbx,1 - and rbx,r12 - jz NEAR $L$shift1_1 - add r12,QWORD[rdx] - adc r13,QWORD[8+rdx] - adc r14,QWORD[16+rdx] - adc r15,QWORD[24+rdx] - adc rbp,0 - -$L$shift1_1: - shrd r12,r13,1 - shrd r13,r14,1 - shrd r14,r15,1 - shrd r15,rbp,1 - shr rbp,1 - - shl rcx,1 - - - - - - cmp rcx,0x8000000 - jne NEAR $L$beeu_shift_loop_YA - -$L$beeu_shift_loop_end_YA: - bsf rcx,rcx - test rcx,rcx - jz NEAR $L$beeu_no_shift_YA - - - - mov rax,QWORD[((8+16))+rsp] - mov rbx,QWORD[((16+16))+rsp] - mov rsi,QWORD[((24+16))+rsp] - - shrd QWORD[((0+16))+rsp],rax,cl - shrd QWORD[((8+16))+rsp],rbx,cl - shrd QWORD[((16+16))+rsp],rsi,cl - - shr rsi,cl - mov QWORD[((24+16))+rsp],rsi - - -$L$beeu_no_shift_YA: - - mov rax,QWORD[48+rsp] - mov rbx,QWORD[56+rsp] - mov rsi,QWORD[64+rsp] - mov rcx,QWORD[72+rsp] - sub rax,QWORD[16+rsp] - sbb rbx,QWORD[24+rsp] - sbb rsi,QWORD[32+rsp] - sbb rcx,QWORD[40+rsp] - jnc NEAR $L$beeu_B_bigger_than_A - - - mov rax,QWORD[16+rsp] - mov rbx,QWORD[24+rsp] - mov rsi,QWORD[32+rsp] - mov rcx,QWORD[40+rsp] - sub rax,QWORD[48+rsp] - sbb rbx,QWORD[56+rsp] - sbb rsi,QWORD[64+rsp] - sbb rcx,QWORD[72+rsp] - mov QWORD[16+rsp],rax - mov QWORD[24+rsp],rbx - mov QWORD[32+rsp],rsi - mov QWORD[40+rsp],rcx - - - add r12,r8 - adc r13,r9 - adc r14,r10 - adc r15,r11 - adc rbp,rdi - jmp NEAR $L$beeu_loop - -$L$beeu_B_bigger_than_A: - - mov QWORD[48+rsp],rax - mov QWORD[56+rsp],rbx - mov QWORD[64+rsp],rsi - mov QWORD[72+rsp],rcx - - - add r8,r12 - adc r9,r13 - adc r10,r14 - adc r11,r15 - adc rdi,rbp - - jmp NEAR $L$beeu_loop - -$L$beeu_loop_end: - - - - - mov rbx,QWORD[16+rsp] - sub rbx,1 - or rbx,QWORD[24+rsp] - or rbx,QWORD[32+rsp] - or rbx,QWORD[40+rsp] - - jnz NEAR $L$beeu_err - - - - - mov r8,QWORD[rdx] - mov r9,QWORD[8+rdx] - mov r10,QWORD[16+rdx] - mov r11,QWORD[24+rdx] - xor rdi,rdi - -$L$beeu_reduction_loop: - mov QWORD[16+rsp],r12 - mov QWORD[24+rsp],r13 - mov QWORD[32+rsp],r14 - mov QWORD[40+rsp],r15 - mov QWORD[48+rsp],rbp - - - sub r12,r8 - sbb r13,r9 - sbb r14,r10 - sbb r15,r11 - sbb rbp,0 - - - cmovc r12,QWORD[16+rsp] - cmovc r13,QWORD[24+rsp] - cmovc r14,QWORD[32+rsp] - cmovc r15,QWORD[40+rsp] - jnc NEAR $L$beeu_reduction_loop - - - sub r8,r12 - sbb r9,r13 - sbb r10,r14 - sbb r11,r15 - -$L$beeu_save: - - mov rdi,QWORD[rsp] - - mov QWORD[rdi],r8 - mov QWORD[8+rdi],r9 - mov QWORD[16+rdi],r10 - mov QWORD[24+rdi],r11 - - - mov rax,1 - jmp NEAR $L$beeu_finish - -$L$beeu_err: - - xor rax,rax - -$L$beeu_finish: - add rsp,80 - - pop rsi - - pop rbx - - pop r15 - - pop r14 - - pop r13 - - pop r12 - - pop rbp - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - - -$L$SEH_end_beeu_mod_inverse_vartime: diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm deleted file mode 100644 index 89b91de10d..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm +++ /dev/null @@ -1,58 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - - - -global CRYPTO_rdrand - -ALIGN 16 -CRYPTO_rdrand: - - xor rax,rax -DB 73,15,199,240 - - adc rax,rax - mov QWORD[rcx],r8 - DB 0F3h,0C3h ;repret - - - - - - - -global CRYPTO_rdrand_multiple8_buf - -ALIGN 16 -CRYPTO_rdrand_multiple8_buf: - - test rdx,rdx - jz NEAR $L$out - mov r8,8 -$L$loop: -DB 73,15,199,241 - jnc NEAR $L$err - mov QWORD[rcx],r9 - add rcx,r8 - sub rdx,r8 - jnz NEAR $L$loop -$L$out: - mov rax,1 - DB 0F3h,0C3h ;repret -$L$err: - xor rax,rax - DB 0F3h,0C3h ;repret - - diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/rsaz-avx2.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/rsaz-avx2.asm deleted file mode 100644 index 74e2705cb9..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/rsaz-avx2.asm +++ /dev/null @@ -1,1972 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -global rsaz_1024_sqr_avx2 - -ALIGN 64 -rsaz_1024_sqr_avx2: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_rsaz_1024_sqr_avx2: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - lea rax,[rsp] - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - vzeroupper - lea rsp,[((-168))+rsp] - vmovaps XMMWORD[(-216)+rax],xmm6 - vmovaps XMMWORD[(-200)+rax],xmm7 - vmovaps XMMWORD[(-184)+rax],xmm8 - vmovaps XMMWORD[(-168)+rax],xmm9 - vmovaps XMMWORD[(-152)+rax],xmm10 - vmovaps XMMWORD[(-136)+rax],xmm11 - vmovaps XMMWORD[(-120)+rax],xmm12 - vmovaps XMMWORD[(-104)+rax],xmm13 - vmovaps XMMWORD[(-88)+rax],xmm14 - vmovaps XMMWORD[(-72)+rax],xmm15 -$L$sqr_1024_body: - mov rbp,rax - - mov r13,rdx - sub rsp,832 - mov r15,r13 - sub rdi,-128 - sub rsi,-128 - sub r13,-128 - - and r15,4095 - add r15,32*10 - shr r15,12 - vpxor ymm9,ymm9,ymm9 - jz NEAR $L$sqr_1024_no_n_copy - - - - - - sub rsp,32*10 - vmovdqu ymm0,YMMWORD[((0-128))+r13] - and rsp,-2048 - vmovdqu ymm1,YMMWORD[((32-128))+r13] - vmovdqu ymm2,YMMWORD[((64-128))+r13] - vmovdqu ymm3,YMMWORD[((96-128))+r13] - vmovdqu ymm4,YMMWORD[((128-128))+r13] - vmovdqu ymm5,YMMWORD[((160-128))+r13] - vmovdqu ymm6,YMMWORD[((192-128))+r13] - vmovdqu ymm7,YMMWORD[((224-128))+r13] - vmovdqu ymm8,YMMWORD[((256-128))+r13] - lea r13,[((832+128))+rsp] - vmovdqu YMMWORD[(0-128)+r13],ymm0 - vmovdqu YMMWORD[(32-128)+r13],ymm1 - vmovdqu YMMWORD[(64-128)+r13],ymm2 - vmovdqu YMMWORD[(96-128)+r13],ymm3 - vmovdqu YMMWORD[(128-128)+r13],ymm4 - vmovdqu YMMWORD[(160-128)+r13],ymm5 - vmovdqu YMMWORD[(192-128)+r13],ymm6 - vmovdqu YMMWORD[(224-128)+r13],ymm7 - vmovdqu YMMWORD[(256-128)+r13],ymm8 - vmovdqu YMMWORD[(288-128)+r13],ymm9 - -$L$sqr_1024_no_n_copy: - and rsp,-1024 - - vmovdqu ymm1,YMMWORD[((32-128))+rsi] - vmovdqu ymm2,YMMWORD[((64-128))+rsi] - vmovdqu ymm3,YMMWORD[((96-128))+rsi] - vmovdqu ymm4,YMMWORD[((128-128))+rsi] - vmovdqu ymm5,YMMWORD[((160-128))+rsi] - vmovdqu ymm6,YMMWORD[((192-128))+rsi] - vmovdqu ymm7,YMMWORD[((224-128))+rsi] - vmovdqu ymm8,YMMWORD[((256-128))+rsi] - - lea rbx,[192+rsp] - vmovdqu ymm15,YMMWORD[$L$and_mask] - jmp NEAR $L$OOP_GRANDE_SQR_1024 - -ALIGN 32 -$L$OOP_GRANDE_SQR_1024: - lea r9,[((576+128))+rsp] - lea r12,[448+rsp] - - - - - vpaddq ymm1,ymm1,ymm1 - vpbroadcastq ymm10,QWORD[((0-128))+rsi] - vpaddq ymm2,ymm2,ymm2 - vmovdqa YMMWORD[(0-128)+r9],ymm1 - vpaddq ymm3,ymm3,ymm3 - vmovdqa YMMWORD[(32-128)+r9],ymm2 - vpaddq ymm4,ymm4,ymm4 - vmovdqa YMMWORD[(64-128)+r9],ymm3 - vpaddq ymm5,ymm5,ymm5 - vmovdqa YMMWORD[(96-128)+r9],ymm4 - vpaddq ymm6,ymm6,ymm6 - vmovdqa YMMWORD[(128-128)+r9],ymm5 - vpaddq ymm7,ymm7,ymm7 - vmovdqa YMMWORD[(160-128)+r9],ymm6 - vpaddq ymm8,ymm8,ymm8 - vmovdqa YMMWORD[(192-128)+r9],ymm7 - vpxor ymm9,ymm9,ymm9 - vmovdqa YMMWORD[(224-128)+r9],ymm8 - - vpmuludq ymm0,ymm10,YMMWORD[((0-128))+rsi] - vpbroadcastq ymm11,QWORD[((32-128))+rsi] - vmovdqu YMMWORD[(288-192)+rbx],ymm9 - vpmuludq ymm1,ymm1,ymm10 - vmovdqu YMMWORD[(320-448)+r12],ymm9 - vpmuludq ymm2,ymm2,ymm10 - vmovdqu YMMWORD[(352-448)+r12],ymm9 - vpmuludq ymm3,ymm3,ymm10 - vmovdqu YMMWORD[(384-448)+r12],ymm9 - vpmuludq ymm4,ymm4,ymm10 - vmovdqu YMMWORD[(416-448)+r12],ymm9 - vpmuludq ymm5,ymm5,ymm10 - vmovdqu YMMWORD[(448-448)+r12],ymm9 - vpmuludq ymm6,ymm6,ymm10 - vmovdqu YMMWORD[(480-448)+r12],ymm9 - vpmuludq ymm7,ymm7,ymm10 - vmovdqu YMMWORD[(512-448)+r12],ymm9 - vpmuludq ymm8,ymm8,ymm10 - vpbroadcastq ymm10,QWORD[((64-128))+rsi] - vmovdqu YMMWORD[(544-448)+r12],ymm9 - - mov r15,rsi - mov r14d,4 - jmp NEAR $L$sqr_entry_1024 -ALIGN 32 -$L$OOP_SQR_1024: - vpbroadcastq ymm11,QWORD[((32-128))+r15] - vpmuludq ymm0,ymm10,YMMWORD[((0-128))+rsi] - vpaddq ymm0,ymm0,YMMWORD[((0-192))+rbx] - vpmuludq ymm1,ymm10,YMMWORD[((0-128))+r9] - vpaddq ymm1,ymm1,YMMWORD[((32-192))+rbx] - vpmuludq ymm2,ymm10,YMMWORD[((32-128))+r9] - vpaddq ymm2,ymm2,YMMWORD[((64-192))+rbx] - vpmuludq ymm3,ymm10,YMMWORD[((64-128))+r9] - vpaddq ymm3,ymm3,YMMWORD[((96-192))+rbx] - vpmuludq ymm4,ymm10,YMMWORD[((96-128))+r9] - vpaddq ymm4,ymm4,YMMWORD[((128-192))+rbx] - vpmuludq ymm5,ymm10,YMMWORD[((128-128))+r9] - vpaddq ymm5,ymm5,YMMWORD[((160-192))+rbx] - vpmuludq ymm6,ymm10,YMMWORD[((160-128))+r9] - vpaddq ymm6,ymm6,YMMWORD[((192-192))+rbx] - vpmuludq ymm7,ymm10,YMMWORD[((192-128))+r9] - vpaddq ymm7,ymm7,YMMWORD[((224-192))+rbx] - vpmuludq ymm8,ymm10,YMMWORD[((224-128))+r9] - vpbroadcastq ymm10,QWORD[((64-128))+r15] - vpaddq ymm8,ymm8,YMMWORD[((256-192))+rbx] -$L$sqr_entry_1024: - vmovdqu YMMWORD[(0-192)+rbx],ymm0 - vmovdqu YMMWORD[(32-192)+rbx],ymm1 - - vpmuludq ymm12,ymm11,YMMWORD[((32-128))+rsi] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm14,ymm11,YMMWORD[((32-128))+r9] - vpaddq ymm3,ymm3,ymm14 - vpmuludq ymm13,ymm11,YMMWORD[((64-128))+r9] - vpaddq ymm4,ymm4,ymm13 - vpmuludq ymm12,ymm11,YMMWORD[((96-128))+r9] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm14,ymm11,YMMWORD[((128-128))+r9] - vpaddq ymm6,ymm6,ymm14 - vpmuludq ymm13,ymm11,YMMWORD[((160-128))+r9] - vpaddq ymm7,ymm7,ymm13 - vpmuludq ymm12,ymm11,YMMWORD[((192-128))+r9] - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm0,ymm11,YMMWORD[((224-128))+r9] - vpbroadcastq ymm11,QWORD[((96-128))+r15] - vpaddq ymm0,ymm0,YMMWORD[((288-192))+rbx] - - vmovdqu YMMWORD[(64-192)+rbx],ymm2 - vmovdqu YMMWORD[(96-192)+rbx],ymm3 - - vpmuludq ymm13,ymm10,YMMWORD[((64-128))+rsi] - vpaddq ymm4,ymm4,ymm13 - vpmuludq ymm12,ymm10,YMMWORD[((64-128))+r9] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm14,ymm10,YMMWORD[((96-128))+r9] - vpaddq ymm6,ymm6,ymm14 - vpmuludq ymm13,ymm10,YMMWORD[((128-128))+r9] - vpaddq ymm7,ymm7,ymm13 - vpmuludq ymm12,ymm10,YMMWORD[((160-128))+r9] - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm14,ymm10,YMMWORD[((192-128))+r9] - vpaddq ymm0,ymm0,ymm14 - vpmuludq ymm1,ymm10,YMMWORD[((224-128))+r9] - vpbroadcastq ymm10,QWORD[((128-128))+r15] - vpaddq ymm1,ymm1,YMMWORD[((320-448))+r12] - - vmovdqu YMMWORD[(128-192)+rbx],ymm4 - vmovdqu YMMWORD[(160-192)+rbx],ymm5 - - vpmuludq ymm12,ymm11,YMMWORD[((96-128))+rsi] - vpaddq ymm6,ymm6,ymm12 - vpmuludq ymm14,ymm11,YMMWORD[((96-128))+r9] - vpaddq ymm7,ymm7,ymm14 - vpmuludq ymm13,ymm11,YMMWORD[((128-128))+r9] - vpaddq ymm8,ymm8,ymm13 - vpmuludq ymm12,ymm11,YMMWORD[((160-128))+r9] - vpaddq ymm0,ymm0,ymm12 - vpmuludq ymm14,ymm11,YMMWORD[((192-128))+r9] - vpaddq ymm1,ymm1,ymm14 - vpmuludq ymm2,ymm11,YMMWORD[((224-128))+r9] - vpbroadcastq ymm11,QWORD[((160-128))+r15] - vpaddq ymm2,ymm2,YMMWORD[((352-448))+r12] - - vmovdqu YMMWORD[(192-192)+rbx],ymm6 - vmovdqu YMMWORD[(224-192)+rbx],ymm7 - - vpmuludq ymm12,ymm10,YMMWORD[((128-128))+rsi] - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm14,ymm10,YMMWORD[((128-128))+r9] - vpaddq ymm0,ymm0,ymm14 - vpmuludq ymm13,ymm10,YMMWORD[((160-128))+r9] - vpaddq ymm1,ymm1,ymm13 - vpmuludq ymm12,ymm10,YMMWORD[((192-128))+r9] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm3,ymm10,YMMWORD[((224-128))+r9] - vpbroadcastq ymm10,QWORD[((192-128))+r15] - vpaddq ymm3,ymm3,YMMWORD[((384-448))+r12] - - vmovdqu YMMWORD[(256-192)+rbx],ymm8 - vmovdqu YMMWORD[(288-192)+rbx],ymm0 - lea rbx,[8+rbx] - - vpmuludq ymm13,ymm11,YMMWORD[((160-128))+rsi] - vpaddq ymm1,ymm1,ymm13 - vpmuludq ymm12,ymm11,YMMWORD[((160-128))+r9] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm14,ymm11,YMMWORD[((192-128))+r9] - vpaddq ymm3,ymm3,ymm14 - vpmuludq ymm4,ymm11,YMMWORD[((224-128))+r9] - vpbroadcastq ymm11,QWORD[((224-128))+r15] - vpaddq ymm4,ymm4,YMMWORD[((416-448))+r12] - - vmovdqu YMMWORD[(320-448)+r12],ymm1 - vmovdqu YMMWORD[(352-448)+r12],ymm2 - - vpmuludq ymm12,ymm10,YMMWORD[((192-128))+rsi] - vpaddq ymm3,ymm3,ymm12 - vpmuludq ymm14,ymm10,YMMWORD[((192-128))+r9] - vpbroadcastq ymm0,QWORD[((256-128))+r15] - vpaddq ymm4,ymm4,ymm14 - vpmuludq ymm5,ymm10,YMMWORD[((224-128))+r9] - vpbroadcastq ymm10,QWORD[((0+8-128))+r15] - vpaddq ymm5,ymm5,YMMWORD[((448-448))+r12] - - vmovdqu YMMWORD[(384-448)+r12],ymm3 - vmovdqu YMMWORD[(416-448)+r12],ymm4 - lea r15,[8+r15] - - vpmuludq ymm12,ymm11,YMMWORD[((224-128))+rsi] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm6,ymm11,YMMWORD[((224-128))+r9] - vpaddq ymm6,ymm6,YMMWORD[((480-448))+r12] - - vpmuludq ymm7,ymm0,YMMWORD[((256-128))+rsi] - vmovdqu YMMWORD[(448-448)+r12],ymm5 - vpaddq ymm7,ymm7,YMMWORD[((512-448))+r12] - vmovdqu YMMWORD[(480-448)+r12],ymm6 - vmovdqu YMMWORD[(512-448)+r12],ymm7 - lea r12,[8+r12] - - dec r14d - jnz NEAR $L$OOP_SQR_1024 - - vmovdqu ymm8,YMMWORD[256+rsp] - vmovdqu ymm1,YMMWORD[288+rsp] - vmovdqu ymm2,YMMWORD[320+rsp] - lea rbx,[192+rsp] - - vpsrlq ymm14,ymm8,29 - vpand ymm8,ymm8,ymm15 - vpsrlq ymm11,ymm1,29 - vpand ymm1,ymm1,ymm15 - - vpermq ymm14,ymm14,0x93 - vpxor ymm9,ymm9,ymm9 - vpermq ymm11,ymm11,0x93 - - vpblendd ymm10,ymm14,ymm9,3 - vpblendd ymm14,ymm11,ymm14,3 - vpaddq ymm8,ymm8,ymm10 - vpblendd ymm11,ymm9,ymm11,3 - vpaddq ymm1,ymm1,ymm14 - vpaddq ymm2,ymm2,ymm11 - vmovdqu YMMWORD[(288-192)+rbx],ymm1 - vmovdqu YMMWORD[(320-192)+rbx],ymm2 - - mov rax,QWORD[rsp] - mov r10,QWORD[8+rsp] - mov r11,QWORD[16+rsp] - mov r12,QWORD[24+rsp] - vmovdqu ymm1,YMMWORD[32+rsp] - vmovdqu ymm2,YMMWORD[((64-192))+rbx] - vmovdqu ymm3,YMMWORD[((96-192))+rbx] - vmovdqu ymm4,YMMWORD[((128-192))+rbx] - vmovdqu ymm5,YMMWORD[((160-192))+rbx] - vmovdqu ymm6,YMMWORD[((192-192))+rbx] - vmovdqu ymm7,YMMWORD[((224-192))+rbx] - - mov r9,rax - imul eax,ecx - and eax,0x1fffffff - vmovd xmm12,eax - - mov rdx,rax - imul rax,QWORD[((-128))+r13] - vpbroadcastq ymm12,xmm12 - add r9,rax - mov rax,rdx - imul rax,QWORD[((8-128))+r13] - shr r9,29 - add r10,rax - mov rax,rdx - imul rax,QWORD[((16-128))+r13] - add r10,r9 - add r11,rax - imul rdx,QWORD[((24-128))+r13] - add r12,rdx - - mov rax,r10 - imul eax,ecx - and eax,0x1fffffff - - mov r14d,9 - jmp NEAR $L$OOP_REDUCE_1024 - -ALIGN 32 -$L$OOP_REDUCE_1024: - vmovd xmm13,eax - vpbroadcastq ymm13,xmm13 - - vpmuludq ymm10,ymm12,YMMWORD[((32-128))+r13] - mov rdx,rax - imul rax,QWORD[((-128))+r13] - vpaddq ymm1,ymm1,ymm10 - add r10,rax - vpmuludq ymm14,ymm12,YMMWORD[((64-128))+r13] - mov rax,rdx - imul rax,QWORD[((8-128))+r13] - vpaddq ymm2,ymm2,ymm14 - vpmuludq ymm11,ymm12,YMMWORD[((96-128))+r13] -DB 0x67 - add r11,rax -DB 0x67 - mov rax,rdx - imul rax,QWORD[((16-128))+r13] - shr r10,29 - vpaddq ymm3,ymm3,ymm11 - vpmuludq ymm10,ymm12,YMMWORD[((128-128))+r13] - add r12,rax - add r11,r10 - vpaddq ymm4,ymm4,ymm10 - vpmuludq ymm14,ymm12,YMMWORD[((160-128))+r13] - mov rax,r11 - imul eax,ecx - vpaddq ymm5,ymm5,ymm14 - vpmuludq ymm11,ymm12,YMMWORD[((192-128))+r13] - and eax,0x1fffffff - vpaddq ymm6,ymm6,ymm11 - vpmuludq ymm10,ymm12,YMMWORD[((224-128))+r13] - vpaddq ymm7,ymm7,ymm10 - vpmuludq ymm14,ymm12,YMMWORD[((256-128))+r13] - vmovd xmm12,eax - - vpaddq ymm8,ymm8,ymm14 - - vpbroadcastq ymm12,xmm12 - - vpmuludq ymm11,ymm13,YMMWORD[((32-8-128))+r13] - vmovdqu ymm14,YMMWORD[((96-8-128))+r13] - mov rdx,rax - imul rax,QWORD[((-128))+r13] - vpaddq ymm1,ymm1,ymm11 - vpmuludq ymm10,ymm13,YMMWORD[((64-8-128))+r13] - vmovdqu ymm11,YMMWORD[((128-8-128))+r13] - add r11,rax - mov rax,rdx - imul rax,QWORD[((8-128))+r13] - vpaddq ymm2,ymm2,ymm10 - add rax,r12 - shr r11,29 - vpmuludq ymm14,ymm14,ymm13 - vmovdqu ymm10,YMMWORD[((160-8-128))+r13] - add rax,r11 - vpaddq ymm3,ymm3,ymm14 - vpmuludq ymm11,ymm11,ymm13 - vmovdqu ymm14,YMMWORD[((192-8-128))+r13] -DB 0x67 - mov r12,rax - imul eax,ecx - vpaddq ymm4,ymm4,ymm11 - vpmuludq ymm10,ymm10,ymm13 -DB 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 - and eax,0x1fffffff - vpaddq ymm5,ymm5,ymm10 - vpmuludq ymm14,ymm14,ymm13 - vmovdqu ymm10,YMMWORD[((256-8-128))+r13] - vpaddq ymm6,ymm6,ymm14 - vpmuludq ymm11,ymm11,ymm13 - vmovdqu ymm9,YMMWORD[((288-8-128))+r13] - vmovd xmm0,eax - imul rax,QWORD[((-128))+r13] - vpaddq ymm7,ymm7,ymm11 - vpmuludq ymm10,ymm10,ymm13 - vmovdqu ymm14,YMMWORD[((32-16-128))+r13] - vpbroadcastq ymm0,xmm0 - vpaddq ymm8,ymm8,ymm10 - vpmuludq ymm9,ymm9,ymm13 - vmovdqu ymm11,YMMWORD[((64-16-128))+r13] - add r12,rax - - vmovdqu ymm13,YMMWORD[((32-24-128))+r13] - vpmuludq ymm14,ymm14,ymm12 - vmovdqu ymm10,YMMWORD[((96-16-128))+r13] - vpaddq ymm1,ymm1,ymm14 - vpmuludq ymm13,ymm13,ymm0 - vpmuludq ymm11,ymm11,ymm12 -DB 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff - vpaddq ymm13,ymm13,ymm1 - vpaddq ymm2,ymm2,ymm11 - vpmuludq ymm10,ymm10,ymm12 - vmovdqu ymm11,YMMWORD[((160-16-128))+r13] -DB 0x67 - vmovq rax,xmm13 - vmovdqu YMMWORD[rsp],ymm13 - vpaddq ymm3,ymm3,ymm10 - vpmuludq ymm14,ymm14,ymm12 - vmovdqu ymm10,YMMWORD[((192-16-128))+r13] - vpaddq ymm4,ymm4,ymm14 - vpmuludq ymm11,ymm11,ymm12 - vmovdqu ymm14,YMMWORD[((224-16-128))+r13] - vpaddq ymm5,ymm5,ymm11 - vpmuludq ymm10,ymm10,ymm12 - vmovdqu ymm11,YMMWORD[((256-16-128))+r13] - vpaddq ymm6,ymm6,ymm10 - vpmuludq ymm14,ymm14,ymm12 - shr r12,29 - vmovdqu ymm10,YMMWORD[((288-16-128))+r13] - add rax,r12 - vpaddq ymm7,ymm7,ymm14 - vpmuludq ymm11,ymm11,ymm12 - - mov r9,rax - imul eax,ecx - vpaddq ymm8,ymm8,ymm11 - vpmuludq ymm10,ymm10,ymm12 - and eax,0x1fffffff - vmovd xmm12,eax - vmovdqu ymm11,YMMWORD[((96-24-128))+r13] -DB 0x67 - vpaddq ymm9,ymm9,ymm10 - vpbroadcastq ymm12,xmm12 - - vpmuludq ymm14,ymm0,YMMWORD[((64-24-128))+r13] - vmovdqu ymm10,YMMWORD[((128-24-128))+r13] - mov rdx,rax - imul rax,QWORD[((-128))+r13] - mov r10,QWORD[8+rsp] - vpaddq ymm1,ymm2,ymm14 - vpmuludq ymm11,ymm11,ymm0 - vmovdqu ymm14,YMMWORD[((160-24-128))+r13] - add r9,rax - mov rax,rdx - imul rax,QWORD[((8-128))+r13] -DB 0x67 - shr r9,29 - mov r11,QWORD[16+rsp] - vpaddq ymm2,ymm3,ymm11 - vpmuludq ymm10,ymm10,ymm0 - vmovdqu ymm11,YMMWORD[((192-24-128))+r13] - add r10,rax - mov rax,rdx - imul rax,QWORD[((16-128))+r13] - vpaddq ymm3,ymm4,ymm10 - vpmuludq ymm14,ymm14,ymm0 - vmovdqu ymm10,YMMWORD[((224-24-128))+r13] - imul rdx,QWORD[((24-128))+r13] - add r11,rax - lea rax,[r10*1+r9] - vpaddq ymm4,ymm5,ymm14 - vpmuludq ymm11,ymm11,ymm0 - vmovdqu ymm14,YMMWORD[((256-24-128))+r13] - mov r10,rax - imul eax,ecx - vpmuludq ymm10,ymm10,ymm0 - vpaddq ymm5,ymm6,ymm11 - vmovdqu ymm11,YMMWORD[((288-24-128))+r13] - and eax,0x1fffffff - vpaddq ymm6,ymm7,ymm10 - vpmuludq ymm14,ymm14,ymm0 - add rdx,QWORD[24+rsp] - vpaddq ymm7,ymm8,ymm14 - vpmuludq ymm11,ymm11,ymm0 - vpaddq ymm8,ymm9,ymm11 - vmovq xmm9,r12 - mov r12,rdx - - dec r14d - jnz NEAR $L$OOP_REDUCE_1024 - lea r12,[448+rsp] - vpaddq ymm0,ymm13,ymm9 - vpxor ymm9,ymm9,ymm9 - - vpaddq ymm0,ymm0,YMMWORD[((288-192))+rbx] - vpaddq ymm1,ymm1,YMMWORD[((320-448))+r12] - vpaddq ymm2,ymm2,YMMWORD[((352-448))+r12] - vpaddq ymm3,ymm3,YMMWORD[((384-448))+r12] - vpaddq ymm4,ymm4,YMMWORD[((416-448))+r12] - vpaddq ymm5,ymm5,YMMWORD[((448-448))+r12] - vpaddq ymm6,ymm6,YMMWORD[((480-448))+r12] - vpaddq ymm7,ymm7,YMMWORD[((512-448))+r12] - vpaddq ymm8,ymm8,YMMWORD[((544-448))+r12] - - vpsrlq ymm14,ymm0,29 - vpand ymm0,ymm0,ymm15 - vpsrlq ymm11,ymm1,29 - vpand ymm1,ymm1,ymm15 - vpsrlq ymm12,ymm2,29 - vpermq ymm14,ymm14,0x93 - vpand ymm2,ymm2,ymm15 - vpsrlq ymm13,ymm3,29 - vpermq ymm11,ymm11,0x93 - vpand ymm3,ymm3,ymm15 - vpermq ymm12,ymm12,0x93 - - vpblendd ymm10,ymm14,ymm9,3 - vpermq ymm13,ymm13,0x93 - vpblendd ymm14,ymm11,ymm14,3 - vpaddq ymm0,ymm0,ymm10 - vpblendd ymm11,ymm12,ymm11,3 - vpaddq ymm1,ymm1,ymm14 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm2,ymm2,ymm11 - vpblendd ymm13,ymm9,ymm13,3 - vpaddq ymm3,ymm3,ymm12 - vpaddq ymm4,ymm4,ymm13 - - vpsrlq ymm14,ymm0,29 - vpand ymm0,ymm0,ymm15 - vpsrlq ymm11,ymm1,29 - vpand ymm1,ymm1,ymm15 - vpsrlq ymm12,ymm2,29 - vpermq ymm14,ymm14,0x93 - vpand ymm2,ymm2,ymm15 - vpsrlq ymm13,ymm3,29 - vpermq ymm11,ymm11,0x93 - vpand ymm3,ymm3,ymm15 - vpermq ymm12,ymm12,0x93 - - vpblendd ymm10,ymm14,ymm9,3 - vpermq ymm13,ymm13,0x93 - vpblendd ymm14,ymm11,ymm14,3 - vpaddq ymm0,ymm0,ymm10 - vpblendd ymm11,ymm12,ymm11,3 - vpaddq ymm1,ymm1,ymm14 - vmovdqu YMMWORD[(0-128)+rdi],ymm0 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm2,ymm2,ymm11 - vmovdqu YMMWORD[(32-128)+rdi],ymm1 - vpblendd ymm13,ymm9,ymm13,3 - vpaddq ymm3,ymm3,ymm12 - vmovdqu YMMWORD[(64-128)+rdi],ymm2 - vpaddq ymm4,ymm4,ymm13 - vmovdqu YMMWORD[(96-128)+rdi],ymm3 - vpsrlq ymm14,ymm4,29 - vpand ymm4,ymm4,ymm15 - vpsrlq ymm11,ymm5,29 - vpand ymm5,ymm5,ymm15 - vpsrlq ymm12,ymm6,29 - vpermq ymm14,ymm14,0x93 - vpand ymm6,ymm6,ymm15 - vpsrlq ymm13,ymm7,29 - vpermq ymm11,ymm11,0x93 - vpand ymm7,ymm7,ymm15 - vpsrlq ymm0,ymm8,29 - vpermq ymm12,ymm12,0x93 - vpand ymm8,ymm8,ymm15 - vpermq ymm13,ymm13,0x93 - - vpblendd ymm10,ymm14,ymm9,3 - vpermq ymm0,ymm0,0x93 - vpblendd ymm14,ymm11,ymm14,3 - vpaddq ymm4,ymm4,ymm10 - vpblendd ymm11,ymm12,ymm11,3 - vpaddq ymm5,ymm5,ymm14 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm6,ymm6,ymm11 - vpblendd ymm13,ymm0,ymm13,3 - vpaddq ymm7,ymm7,ymm12 - vpaddq ymm8,ymm8,ymm13 - - vpsrlq ymm14,ymm4,29 - vpand ymm4,ymm4,ymm15 - vpsrlq ymm11,ymm5,29 - vpand ymm5,ymm5,ymm15 - vpsrlq ymm12,ymm6,29 - vpermq ymm14,ymm14,0x93 - vpand ymm6,ymm6,ymm15 - vpsrlq ymm13,ymm7,29 - vpermq ymm11,ymm11,0x93 - vpand ymm7,ymm7,ymm15 - vpsrlq ymm0,ymm8,29 - vpermq ymm12,ymm12,0x93 - vpand ymm8,ymm8,ymm15 - vpermq ymm13,ymm13,0x93 - - vpblendd ymm10,ymm14,ymm9,3 - vpermq ymm0,ymm0,0x93 - vpblendd ymm14,ymm11,ymm14,3 - vpaddq ymm4,ymm4,ymm10 - vpblendd ymm11,ymm12,ymm11,3 - vpaddq ymm5,ymm5,ymm14 - vmovdqu YMMWORD[(128-128)+rdi],ymm4 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm6,ymm6,ymm11 - vmovdqu YMMWORD[(160-128)+rdi],ymm5 - vpblendd ymm13,ymm0,ymm13,3 - vpaddq ymm7,ymm7,ymm12 - vmovdqu YMMWORD[(192-128)+rdi],ymm6 - vpaddq ymm8,ymm8,ymm13 - vmovdqu YMMWORD[(224-128)+rdi],ymm7 - vmovdqu YMMWORD[(256-128)+rdi],ymm8 - - mov rsi,rdi - dec r8d - jne NEAR $L$OOP_GRANDE_SQR_1024 - - vzeroall - mov rax,rbp - -$L$sqr_1024_in_tail: - movaps xmm6,XMMWORD[((-216))+rax] - movaps xmm7,XMMWORD[((-200))+rax] - movaps xmm8,XMMWORD[((-184))+rax] - movaps xmm9,XMMWORD[((-168))+rax] - movaps xmm10,XMMWORD[((-152))+rax] - movaps xmm11,XMMWORD[((-136))+rax] - movaps xmm12,XMMWORD[((-120))+rax] - movaps xmm13,XMMWORD[((-104))+rax] - movaps xmm14,XMMWORD[((-88))+rax] - movaps xmm15,XMMWORD[((-72))+rax] - mov r15,QWORD[((-48))+rax] - - mov r14,QWORD[((-40))+rax] - - mov r13,QWORD[((-32))+rax] - - mov r12,QWORD[((-24))+rax] - - mov rbp,QWORD[((-16))+rax] - - mov rbx,QWORD[((-8))+rax] - - lea rsp,[rax] - -$L$sqr_1024_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_rsaz_1024_sqr_avx2: -global rsaz_1024_mul_avx2 - -ALIGN 64 -rsaz_1024_mul_avx2: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_rsaz_1024_mul_avx2: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - lea rax,[rsp] - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - vzeroupper - lea rsp,[((-168))+rsp] - vmovaps XMMWORD[(-216)+rax],xmm6 - vmovaps XMMWORD[(-200)+rax],xmm7 - vmovaps XMMWORD[(-184)+rax],xmm8 - vmovaps XMMWORD[(-168)+rax],xmm9 - vmovaps XMMWORD[(-152)+rax],xmm10 - vmovaps XMMWORD[(-136)+rax],xmm11 - vmovaps XMMWORD[(-120)+rax],xmm12 - vmovaps XMMWORD[(-104)+rax],xmm13 - vmovaps XMMWORD[(-88)+rax],xmm14 - vmovaps XMMWORD[(-72)+rax],xmm15 -$L$mul_1024_body: - mov rbp,rax - - vzeroall - mov r13,rdx - sub rsp,64 - - - - - - -DB 0x67,0x67 - mov r15,rsi - and r15,4095 - add r15,32*10 - shr r15,12 - mov r15,rsi - cmovnz rsi,r13 - cmovnz r13,r15 - - mov r15,rcx - sub rsi,-128 - sub rcx,-128 - sub rdi,-128 - - and r15,4095 - add r15,32*10 -DB 0x67,0x67 - shr r15,12 - jz NEAR $L$mul_1024_no_n_copy - - - - - - sub rsp,32*10 - vmovdqu ymm0,YMMWORD[((0-128))+rcx] - and rsp,-512 - vmovdqu ymm1,YMMWORD[((32-128))+rcx] - vmovdqu ymm2,YMMWORD[((64-128))+rcx] - vmovdqu ymm3,YMMWORD[((96-128))+rcx] - vmovdqu ymm4,YMMWORD[((128-128))+rcx] - vmovdqu ymm5,YMMWORD[((160-128))+rcx] - vmovdqu ymm6,YMMWORD[((192-128))+rcx] - vmovdqu ymm7,YMMWORD[((224-128))+rcx] - vmovdqu ymm8,YMMWORD[((256-128))+rcx] - lea rcx,[((64+128))+rsp] - vmovdqu YMMWORD[(0-128)+rcx],ymm0 - vpxor ymm0,ymm0,ymm0 - vmovdqu YMMWORD[(32-128)+rcx],ymm1 - vpxor ymm1,ymm1,ymm1 - vmovdqu YMMWORD[(64-128)+rcx],ymm2 - vpxor ymm2,ymm2,ymm2 - vmovdqu YMMWORD[(96-128)+rcx],ymm3 - vpxor ymm3,ymm3,ymm3 - vmovdqu YMMWORD[(128-128)+rcx],ymm4 - vpxor ymm4,ymm4,ymm4 - vmovdqu YMMWORD[(160-128)+rcx],ymm5 - vpxor ymm5,ymm5,ymm5 - vmovdqu YMMWORD[(192-128)+rcx],ymm6 - vpxor ymm6,ymm6,ymm6 - vmovdqu YMMWORD[(224-128)+rcx],ymm7 - vpxor ymm7,ymm7,ymm7 - vmovdqu YMMWORD[(256-128)+rcx],ymm8 - vmovdqa ymm8,ymm0 - vmovdqu YMMWORD[(288-128)+rcx],ymm9 -$L$mul_1024_no_n_copy: - and rsp,-64 - - mov rbx,QWORD[r13] - vpbroadcastq ymm10,QWORD[r13] - vmovdqu YMMWORD[rsp],ymm0 - xor r9,r9 -DB 0x67 - xor r10,r10 - xor r11,r11 - xor r12,r12 - - vmovdqu ymm15,YMMWORD[$L$and_mask] - mov r14d,9 - vmovdqu YMMWORD[(288-128)+rdi],ymm9 - jmp NEAR $L$oop_mul_1024 - -ALIGN 32 -$L$oop_mul_1024: - vpsrlq ymm9,ymm3,29 - mov rax,rbx - imul rax,QWORD[((-128))+rsi] - add rax,r9 - mov r10,rbx - imul r10,QWORD[((8-128))+rsi] - add r10,QWORD[8+rsp] - - mov r9,rax - imul eax,r8d - and eax,0x1fffffff - - mov r11,rbx - imul r11,QWORD[((16-128))+rsi] - add r11,QWORD[16+rsp] - - mov r12,rbx - imul r12,QWORD[((24-128))+rsi] - add r12,QWORD[24+rsp] - vpmuludq ymm0,ymm10,YMMWORD[((32-128))+rsi] - vmovd xmm11,eax - vpaddq ymm1,ymm1,ymm0 - vpmuludq ymm12,ymm10,YMMWORD[((64-128))+rsi] - vpbroadcastq ymm11,xmm11 - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm13,ymm10,YMMWORD[((96-128))+rsi] - vpand ymm3,ymm3,ymm15 - vpaddq ymm3,ymm3,ymm13 - vpmuludq ymm0,ymm10,YMMWORD[((128-128))+rsi] - vpaddq ymm4,ymm4,ymm0 - vpmuludq ymm12,ymm10,YMMWORD[((160-128))+rsi] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm13,ymm10,YMMWORD[((192-128))+rsi] - vpaddq ymm6,ymm6,ymm13 - vpmuludq ymm0,ymm10,YMMWORD[((224-128))+rsi] - vpermq ymm9,ymm9,0x93 - vpaddq ymm7,ymm7,ymm0 - vpmuludq ymm12,ymm10,YMMWORD[((256-128))+rsi] - vpbroadcastq ymm10,QWORD[8+r13] - vpaddq ymm8,ymm8,ymm12 - - mov rdx,rax - imul rax,QWORD[((-128))+rcx] - add r9,rax - mov rax,rdx - imul rax,QWORD[((8-128))+rcx] - add r10,rax - mov rax,rdx - imul rax,QWORD[((16-128))+rcx] - add r11,rax - shr r9,29 - imul rdx,QWORD[((24-128))+rcx] - add r12,rdx - add r10,r9 - - vpmuludq ymm13,ymm11,YMMWORD[((32-128))+rcx] - vmovq rbx,xmm10 - vpaddq ymm1,ymm1,ymm13 - vpmuludq ymm0,ymm11,YMMWORD[((64-128))+rcx] - vpaddq ymm2,ymm2,ymm0 - vpmuludq ymm12,ymm11,YMMWORD[((96-128))+rcx] - vpaddq ymm3,ymm3,ymm12 - vpmuludq ymm13,ymm11,YMMWORD[((128-128))+rcx] - vpaddq ymm4,ymm4,ymm13 - vpmuludq ymm0,ymm11,YMMWORD[((160-128))+rcx] - vpaddq ymm5,ymm5,ymm0 - vpmuludq ymm12,ymm11,YMMWORD[((192-128))+rcx] - vpaddq ymm6,ymm6,ymm12 - vpmuludq ymm13,ymm11,YMMWORD[((224-128))+rcx] - vpblendd ymm12,ymm9,ymm14,3 - vpaddq ymm7,ymm7,ymm13 - vpmuludq ymm0,ymm11,YMMWORD[((256-128))+rcx] - vpaddq ymm3,ymm3,ymm12 - vpaddq ymm8,ymm8,ymm0 - - mov rax,rbx - imul rax,QWORD[((-128))+rsi] - add r10,rax - vmovdqu ymm12,YMMWORD[((-8+32-128))+rsi] - mov rax,rbx - imul rax,QWORD[((8-128))+rsi] - add r11,rax - vmovdqu ymm13,YMMWORD[((-8+64-128))+rsi] - - mov rax,r10 - vpblendd ymm9,ymm9,ymm14,0xfc - imul eax,r8d - vpaddq ymm4,ymm4,ymm9 - and eax,0x1fffffff - - imul rbx,QWORD[((16-128))+rsi] - add r12,rbx - vpmuludq ymm12,ymm12,ymm10 - vmovd xmm11,eax - vmovdqu ymm0,YMMWORD[((-8+96-128))+rsi] - vpaddq ymm1,ymm1,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vpbroadcastq ymm11,xmm11 - vmovdqu ymm12,YMMWORD[((-8+128-128))+rsi] - vpaddq ymm2,ymm2,ymm13 - vpmuludq ymm0,ymm0,ymm10 - vmovdqu ymm13,YMMWORD[((-8+160-128))+rsi] - vpaddq ymm3,ymm3,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vmovdqu ymm0,YMMWORD[((-8+192-128))+rsi] - vpaddq ymm4,ymm4,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vmovdqu ymm12,YMMWORD[((-8+224-128))+rsi] - vpaddq ymm5,ymm5,ymm13 - vpmuludq ymm0,ymm0,ymm10 - vmovdqu ymm13,YMMWORD[((-8+256-128))+rsi] - vpaddq ymm6,ymm6,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vmovdqu ymm9,YMMWORD[((-8+288-128))+rsi] - vpaddq ymm7,ymm7,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vpaddq ymm8,ymm8,ymm13 - vpmuludq ymm9,ymm9,ymm10 - vpbroadcastq ymm10,QWORD[16+r13] - - mov rdx,rax - imul rax,QWORD[((-128))+rcx] - add r10,rax - vmovdqu ymm0,YMMWORD[((-8+32-128))+rcx] - mov rax,rdx - imul rax,QWORD[((8-128))+rcx] - add r11,rax - vmovdqu ymm12,YMMWORD[((-8+64-128))+rcx] - shr r10,29 - imul rdx,QWORD[((16-128))+rcx] - add r12,rdx - add r11,r10 - - vpmuludq ymm0,ymm0,ymm11 - vmovq rbx,xmm10 - vmovdqu ymm13,YMMWORD[((-8+96-128))+rcx] - vpaddq ymm1,ymm1,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu ymm0,YMMWORD[((-8+128-128))+rcx] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-8+160-128))+rcx] - vpaddq ymm3,ymm3,ymm13 - vpmuludq ymm0,ymm0,ymm11 - vmovdqu ymm13,YMMWORD[((-8+192-128))+rcx] - vpaddq ymm4,ymm4,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu ymm0,YMMWORD[((-8+224-128))+rcx] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-8+256-128))+rcx] - vpaddq ymm6,ymm6,ymm13 - vpmuludq ymm0,ymm0,ymm11 - vmovdqu ymm13,YMMWORD[((-8+288-128))+rcx] - vpaddq ymm7,ymm7,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vpaddq ymm9,ymm9,ymm13 - - vmovdqu ymm0,YMMWORD[((-16+32-128))+rsi] - mov rax,rbx - imul rax,QWORD[((-128))+rsi] - add rax,r11 - - vmovdqu ymm12,YMMWORD[((-16+64-128))+rsi] - mov r11,rax - imul eax,r8d - and eax,0x1fffffff - - imul rbx,QWORD[((8-128))+rsi] - add r12,rbx - vpmuludq ymm0,ymm0,ymm10 - vmovd xmm11,eax - vmovdqu ymm13,YMMWORD[((-16+96-128))+rsi] - vpaddq ymm1,ymm1,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vpbroadcastq ymm11,xmm11 - vmovdqu ymm0,YMMWORD[((-16+128-128))+rsi] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vmovdqu ymm12,YMMWORD[((-16+160-128))+rsi] - vpaddq ymm3,ymm3,ymm13 - vpmuludq ymm0,ymm0,ymm10 - vmovdqu ymm13,YMMWORD[((-16+192-128))+rsi] - vpaddq ymm4,ymm4,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vmovdqu ymm0,YMMWORD[((-16+224-128))+rsi] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vmovdqu ymm12,YMMWORD[((-16+256-128))+rsi] - vpaddq ymm6,ymm6,ymm13 - vpmuludq ymm0,ymm0,ymm10 - vmovdqu ymm13,YMMWORD[((-16+288-128))+rsi] - vpaddq ymm7,ymm7,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vpbroadcastq ymm10,QWORD[24+r13] - vpaddq ymm9,ymm9,ymm13 - - vmovdqu ymm0,YMMWORD[((-16+32-128))+rcx] - mov rdx,rax - imul rax,QWORD[((-128))+rcx] - add r11,rax - vmovdqu ymm12,YMMWORD[((-16+64-128))+rcx] - imul rdx,QWORD[((8-128))+rcx] - add r12,rdx - shr r11,29 - - vpmuludq ymm0,ymm0,ymm11 - vmovq rbx,xmm10 - vmovdqu ymm13,YMMWORD[((-16+96-128))+rcx] - vpaddq ymm1,ymm1,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu ymm0,YMMWORD[((-16+128-128))+rcx] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-16+160-128))+rcx] - vpaddq ymm3,ymm3,ymm13 - vpmuludq ymm0,ymm0,ymm11 - vmovdqu ymm13,YMMWORD[((-16+192-128))+rcx] - vpaddq ymm4,ymm4,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu ymm0,YMMWORD[((-16+224-128))+rcx] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-16+256-128))+rcx] - vpaddq ymm6,ymm6,ymm13 - vpmuludq ymm0,ymm0,ymm11 - vmovdqu ymm13,YMMWORD[((-16+288-128))+rcx] - vpaddq ymm7,ymm7,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu ymm0,YMMWORD[((-24+32-128))+rsi] - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-24+64-128))+rsi] - vpaddq ymm9,ymm9,ymm13 - - add r12,r11 - imul rbx,QWORD[((-128))+rsi] - add r12,rbx - - mov rax,r12 - imul eax,r8d - and eax,0x1fffffff - - vpmuludq ymm0,ymm0,ymm10 - vmovd xmm11,eax - vmovdqu ymm13,YMMWORD[((-24+96-128))+rsi] - vpaddq ymm1,ymm1,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vpbroadcastq ymm11,xmm11 - vmovdqu ymm0,YMMWORD[((-24+128-128))+rsi] - vpaddq ymm2,ymm2,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vmovdqu ymm12,YMMWORD[((-24+160-128))+rsi] - vpaddq ymm3,ymm3,ymm13 - vpmuludq ymm0,ymm0,ymm10 - vmovdqu ymm13,YMMWORD[((-24+192-128))+rsi] - vpaddq ymm4,ymm4,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vmovdqu ymm0,YMMWORD[((-24+224-128))+rsi] - vpaddq ymm5,ymm5,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vmovdqu ymm12,YMMWORD[((-24+256-128))+rsi] - vpaddq ymm6,ymm6,ymm13 - vpmuludq ymm0,ymm0,ymm10 - vmovdqu ymm13,YMMWORD[((-24+288-128))+rsi] - vpaddq ymm7,ymm7,ymm0 - vpmuludq ymm12,ymm12,ymm10 - vpaddq ymm8,ymm8,ymm12 - vpmuludq ymm13,ymm13,ymm10 - vpbroadcastq ymm10,QWORD[32+r13] - vpaddq ymm9,ymm9,ymm13 - add r13,32 - - vmovdqu ymm0,YMMWORD[((-24+32-128))+rcx] - imul rax,QWORD[((-128))+rcx] - add r12,rax - shr r12,29 - - vmovdqu ymm12,YMMWORD[((-24+64-128))+rcx] - vpmuludq ymm0,ymm0,ymm11 - vmovq rbx,xmm10 - vmovdqu ymm13,YMMWORD[((-24+96-128))+rcx] - vpaddq ymm0,ymm1,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu YMMWORD[rsp],ymm0 - vpaddq ymm1,ymm2,ymm12 - vmovdqu ymm0,YMMWORD[((-24+128-128))+rcx] - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-24+160-128))+rcx] - vpaddq ymm2,ymm3,ymm13 - vpmuludq ymm0,ymm0,ymm11 - vmovdqu ymm13,YMMWORD[((-24+192-128))+rcx] - vpaddq ymm3,ymm4,ymm0 - vpmuludq ymm12,ymm12,ymm11 - vmovdqu ymm0,YMMWORD[((-24+224-128))+rcx] - vpaddq ymm4,ymm5,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovdqu ymm12,YMMWORD[((-24+256-128))+rcx] - vpaddq ymm5,ymm6,ymm13 - vpmuludq ymm0,ymm0,ymm11 - vmovdqu ymm13,YMMWORD[((-24+288-128))+rcx] - mov r9,r12 - vpaddq ymm6,ymm7,ymm0 - vpmuludq ymm12,ymm12,ymm11 - add r9,QWORD[rsp] - vpaddq ymm7,ymm8,ymm12 - vpmuludq ymm13,ymm13,ymm11 - vmovq xmm12,r12 - vpaddq ymm8,ymm9,ymm13 - - dec r14d - jnz NEAR $L$oop_mul_1024 - vpaddq ymm0,ymm12,YMMWORD[rsp] - - vpsrlq ymm12,ymm0,29 - vpand ymm0,ymm0,ymm15 - vpsrlq ymm13,ymm1,29 - vpand ymm1,ymm1,ymm15 - vpsrlq ymm10,ymm2,29 - vpermq ymm12,ymm12,0x93 - vpand ymm2,ymm2,ymm15 - vpsrlq ymm11,ymm3,29 - vpermq ymm13,ymm13,0x93 - vpand ymm3,ymm3,ymm15 - - vpblendd ymm9,ymm12,ymm14,3 - vpermq ymm10,ymm10,0x93 - vpblendd ymm12,ymm13,ymm12,3 - vpermq ymm11,ymm11,0x93 - vpaddq ymm0,ymm0,ymm9 - vpblendd ymm13,ymm10,ymm13,3 - vpaddq ymm1,ymm1,ymm12 - vpblendd ymm10,ymm11,ymm10,3 - vpaddq ymm2,ymm2,ymm13 - vpblendd ymm11,ymm14,ymm11,3 - vpaddq ymm3,ymm3,ymm10 - vpaddq ymm4,ymm4,ymm11 - - vpsrlq ymm12,ymm0,29 - vpand ymm0,ymm0,ymm15 - vpsrlq ymm13,ymm1,29 - vpand ymm1,ymm1,ymm15 - vpsrlq ymm10,ymm2,29 - vpermq ymm12,ymm12,0x93 - vpand ymm2,ymm2,ymm15 - vpsrlq ymm11,ymm3,29 - vpermq ymm13,ymm13,0x93 - vpand ymm3,ymm3,ymm15 - vpermq ymm10,ymm10,0x93 - - vpblendd ymm9,ymm12,ymm14,3 - vpermq ymm11,ymm11,0x93 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm0,ymm0,ymm9 - vpblendd ymm13,ymm10,ymm13,3 - vpaddq ymm1,ymm1,ymm12 - vpblendd ymm10,ymm11,ymm10,3 - vpaddq ymm2,ymm2,ymm13 - vpblendd ymm11,ymm14,ymm11,3 - vpaddq ymm3,ymm3,ymm10 - vpaddq ymm4,ymm4,ymm11 - - vmovdqu YMMWORD[(0-128)+rdi],ymm0 - vmovdqu YMMWORD[(32-128)+rdi],ymm1 - vmovdqu YMMWORD[(64-128)+rdi],ymm2 - vmovdqu YMMWORD[(96-128)+rdi],ymm3 - vpsrlq ymm12,ymm4,29 - vpand ymm4,ymm4,ymm15 - vpsrlq ymm13,ymm5,29 - vpand ymm5,ymm5,ymm15 - vpsrlq ymm10,ymm6,29 - vpermq ymm12,ymm12,0x93 - vpand ymm6,ymm6,ymm15 - vpsrlq ymm11,ymm7,29 - vpermq ymm13,ymm13,0x93 - vpand ymm7,ymm7,ymm15 - vpsrlq ymm0,ymm8,29 - vpermq ymm10,ymm10,0x93 - vpand ymm8,ymm8,ymm15 - vpermq ymm11,ymm11,0x93 - - vpblendd ymm9,ymm12,ymm14,3 - vpermq ymm0,ymm0,0x93 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm4,ymm4,ymm9 - vpblendd ymm13,ymm10,ymm13,3 - vpaddq ymm5,ymm5,ymm12 - vpblendd ymm10,ymm11,ymm10,3 - vpaddq ymm6,ymm6,ymm13 - vpblendd ymm11,ymm0,ymm11,3 - vpaddq ymm7,ymm7,ymm10 - vpaddq ymm8,ymm8,ymm11 - - vpsrlq ymm12,ymm4,29 - vpand ymm4,ymm4,ymm15 - vpsrlq ymm13,ymm5,29 - vpand ymm5,ymm5,ymm15 - vpsrlq ymm10,ymm6,29 - vpermq ymm12,ymm12,0x93 - vpand ymm6,ymm6,ymm15 - vpsrlq ymm11,ymm7,29 - vpermq ymm13,ymm13,0x93 - vpand ymm7,ymm7,ymm15 - vpsrlq ymm0,ymm8,29 - vpermq ymm10,ymm10,0x93 - vpand ymm8,ymm8,ymm15 - vpermq ymm11,ymm11,0x93 - - vpblendd ymm9,ymm12,ymm14,3 - vpermq ymm0,ymm0,0x93 - vpblendd ymm12,ymm13,ymm12,3 - vpaddq ymm4,ymm4,ymm9 - vpblendd ymm13,ymm10,ymm13,3 - vpaddq ymm5,ymm5,ymm12 - vpblendd ymm10,ymm11,ymm10,3 - vpaddq ymm6,ymm6,ymm13 - vpblendd ymm11,ymm0,ymm11,3 - vpaddq ymm7,ymm7,ymm10 - vpaddq ymm8,ymm8,ymm11 - - vmovdqu YMMWORD[(128-128)+rdi],ymm4 - vmovdqu YMMWORD[(160-128)+rdi],ymm5 - vmovdqu YMMWORD[(192-128)+rdi],ymm6 - vmovdqu YMMWORD[(224-128)+rdi],ymm7 - vmovdqu YMMWORD[(256-128)+rdi],ymm8 - vzeroupper - - mov rax,rbp - -$L$mul_1024_in_tail: - movaps xmm6,XMMWORD[((-216))+rax] - movaps xmm7,XMMWORD[((-200))+rax] - movaps xmm8,XMMWORD[((-184))+rax] - movaps xmm9,XMMWORD[((-168))+rax] - movaps xmm10,XMMWORD[((-152))+rax] - movaps xmm11,XMMWORD[((-136))+rax] - movaps xmm12,XMMWORD[((-120))+rax] - movaps xmm13,XMMWORD[((-104))+rax] - movaps xmm14,XMMWORD[((-88))+rax] - movaps xmm15,XMMWORD[((-72))+rax] - mov r15,QWORD[((-48))+rax] - - mov r14,QWORD[((-40))+rax] - - mov r13,QWORD[((-32))+rax] - - mov r12,QWORD[((-24))+rax] - - mov rbp,QWORD[((-16))+rax] - - mov rbx,QWORD[((-8))+rax] - - lea rsp,[rax] - -$L$mul_1024_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_rsaz_1024_mul_avx2: -global rsaz_1024_red2norm_avx2 - -ALIGN 32 -rsaz_1024_red2norm_avx2: - - sub rdx,-128 - xor rax,rax - mov r8,QWORD[((-128))+rdx] - mov r9,QWORD[((-120))+rdx] - mov r10,QWORD[((-112))+rdx] - shl r8,0 - shl r9,29 - mov r11,r10 - shl r10,58 - shr r11,6 - add rax,r8 - add rax,r9 - add rax,r10 - adc r11,0 - mov QWORD[rcx],rax - mov rax,r11 - mov r8,QWORD[((-104))+rdx] - mov r9,QWORD[((-96))+rdx] - shl r8,23 - mov r10,r9 - shl r9,52 - shr r10,12 - add rax,r8 - add rax,r9 - adc r10,0 - mov QWORD[8+rcx],rax - mov rax,r10 - mov r11,QWORD[((-88))+rdx] - mov r8,QWORD[((-80))+rdx] - shl r11,17 - mov r9,r8 - shl r8,46 - shr r9,18 - add rax,r11 - add rax,r8 - adc r9,0 - mov QWORD[16+rcx],rax - mov rax,r9 - mov r10,QWORD[((-72))+rdx] - mov r11,QWORD[((-64))+rdx] - shl r10,11 - mov r8,r11 - shl r11,40 - shr r8,24 - add rax,r10 - add rax,r11 - adc r8,0 - mov QWORD[24+rcx],rax - mov rax,r8 - mov r9,QWORD[((-56))+rdx] - mov r10,QWORD[((-48))+rdx] - mov r11,QWORD[((-40))+rdx] - shl r9,5 - shl r10,34 - mov r8,r11 - shl r11,63 - shr r8,1 - add rax,r9 - add rax,r10 - add rax,r11 - adc r8,0 - mov QWORD[32+rcx],rax - mov rax,r8 - mov r9,QWORD[((-32))+rdx] - mov r10,QWORD[((-24))+rdx] - shl r9,28 - mov r11,r10 - shl r10,57 - shr r11,7 - add rax,r9 - add rax,r10 - adc r11,0 - mov QWORD[40+rcx],rax - mov rax,r11 - mov r8,QWORD[((-16))+rdx] - mov r9,QWORD[((-8))+rdx] - shl r8,22 - mov r10,r9 - shl r9,51 - shr r10,13 - add rax,r8 - add rax,r9 - adc r10,0 - mov QWORD[48+rcx],rax - mov rax,r10 - mov r11,QWORD[rdx] - mov r8,QWORD[8+rdx] - shl r11,16 - mov r9,r8 - shl r8,45 - shr r9,19 - add rax,r11 - add rax,r8 - adc r9,0 - mov QWORD[56+rcx],rax - mov rax,r9 - mov r10,QWORD[16+rdx] - mov r11,QWORD[24+rdx] - shl r10,10 - mov r8,r11 - shl r11,39 - shr r8,25 - add rax,r10 - add rax,r11 - adc r8,0 - mov QWORD[64+rcx],rax - mov rax,r8 - mov r9,QWORD[32+rdx] - mov r10,QWORD[40+rdx] - mov r11,QWORD[48+rdx] - shl r9,4 - shl r10,33 - mov r8,r11 - shl r11,62 - shr r8,2 - add rax,r9 - add rax,r10 - add rax,r11 - adc r8,0 - mov QWORD[72+rcx],rax - mov rax,r8 - mov r9,QWORD[56+rdx] - mov r10,QWORD[64+rdx] - shl r9,27 - mov r11,r10 - shl r10,56 - shr r11,8 - add rax,r9 - add rax,r10 - adc r11,0 - mov QWORD[80+rcx],rax - mov rax,r11 - mov r8,QWORD[72+rdx] - mov r9,QWORD[80+rdx] - shl r8,21 - mov r10,r9 - shl r9,50 - shr r10,14 - add rax,r8 - add rax,r9 - adc r10,0 - mov QWORD[88+rcx],rax - mov rax,r10 - mov r11,QWORD[88+rdx] - mov r8,QWORD[96+rdx] - shl r11,15 - mov r9,r8 - shl r8,44 - shr r9,20 - add rax,r11 - add rax,r8 - adc r9,0 - mov QWORD[96+rcx],rax - mov rax,r9 - mov r10,QWORD[104+rdx] - mov r11,QWORD[112+rdx] - shl r10,9 - mov r8,r11 - shl r11,38 - shr r8,26 - add rax,r10 - add rax,r11 - adc r8,0 - mov QWORD[104+rcx],rax - mov rax,r8 - mov r9,QWORD[120+rdx] - mov r10,QWORD[128+rdx] - mov r11,QWORD[136+rdx] - shl r9,3 - shl r10,32 - mov r8,r11 - shl r11,61 - shr r8,3 - add rax,r9 - add rax,r10 - add rax,r11 - adc r8,0 - mov QWORD[112+rcx],rax - mov rax,r8 - mov r9,QWORD[144+rdx] - mov r10,QWORD[152+rdx] - shl r9,26 - mov r11,r10 - shl r10,55 - shr r11,9 - add rax,r9 - add rax,r10 - adc r11,0 - mov QWORD[120+rcx],rax - mov rax,r11 - DB 0F3h,0C3h ;repret - - - -global rsaz_1024_norm2red_avx2 - -ALIGN 32 -rsaz_1024_norm2red_avx2: - - sub rcx,-128 - mov r8,QWORD[rdx] - mov eax,0x1fffffff - mov r9,QWORD[8+rdx] - mov r11,r8 - shr r11,0 - and r11,rax - mov QWORD[((-128))+rcx],r11 - mov r10,r8 - shr r10,29 - and r10,rax - mov QWORD[((-120))+rcx],r10 - shrd r8,r9,58 - and r8,rax - mov QWORD[((-112))+rcx],r8 - mov r10,QWORD[16+rdx] - mov r8,r9 - shr r8,23 - and r8,rax - mov QWORD[((-104))+rcx],r8 - shrd r9,r10,52 - and r9,rax - mov QWORD[((-96))+rcx],r9 - mov r11,QWORD[24+rdx] - mov r9,r10 - shr r9,17 - and r9,rax - mov QWORD[((-88))+rcx],r9 - shrd r10,r11,46 - and r10,rax - mov QWORD[((-80))+rcx],r10 - mov r8,QWORD[32+rdx] - mov r10,r11 - shr r10,11 - and r10,rax - mov QWORD[((-72))+rcx],r10 - shrd r11,r8,40 - and r11,rax - mov QWORD[((-64))+rcx],r11 - mov r9,QWORD[40+rdx] - mov r11,r8 - shr r11,5 - and r11,rax - mov QWORD[((-56))+rcx],r11 - mov r10,r8 - shr r10,34 - and r10,rax - mov QWORD[((-48))+rcx],r10 - shrd r8,r9,63 - and r8,rax - mov QWORD[((-40))+rcx],r8 - mov r10,QWORD[48+rdx] - mov r8,r9 - shr r8,28 - and r8,rax - mov QWORD[((-32))+rcx],r8 - shrd r9,r10,57 - and r9,rax - mov QWORD[((-24))+rcx],r9 - mov r11,QWORD[56+rdx] - mov r9,r10 - shr r9,22 - and r9,rax - mov QWORD[((-16))+rcx],r9 - shrd r10,r11,51 - and r10,rax - mov QWORD[((-8))+rcx],r10 - mov r8,QWORD[64+rdx] - mov r10,r11 - shr r10,16 - and r10,rax - mov QWORD[rcx],r10 - shrd r11,r8,45 - and r11,rax - mov QWORD[8+rcx],r11 - mov r9,QWORD[72+rdx] - mov r11,r8 - shr r11,10 - and r11,rax - mov QWORD[16+rcx],r11 - shrd r8,r9,39 - and r8,rax - mov QWORD[24+rcx],r8 - mov r10,QWORD[80+rdx] - mov r8,r9 - shr r8,4 - and r8,rax - mov QWORD[32+rcx],r8 - mov r11,r9 - shr r11,33 - and r11,rax - mov QWORD[40+rcx],r11 - shrd r9,r10,62 - and r9,rax - mov QWORD[48+rcx],r9 - mov r11,QWORD[88+rdx] - mov r9,r10 - shr r9,27 - and r9,rax - mov QWORD[56+rcx],r9 - shrd r10,r11,56 - and r10,rax - mov QWORD[64+rcx],r10 - mov r8,QWORD[96+rdx] - mov r10,r11 - shr r10,21 - and r10,rax - mov QWORD[72+rcx],r10 - shrd r11,r8,50 - and r11,rax - mov QWORD[80+rcx],r11 - mov r9,QWORD[104+rdx] - mov r11,r8 - shr r11,15 - and r11,rax - mov QWORD[88+rcx],r11 - shrd r8,r9,44 - and r8,rax - mov QWORD[96+rcx],r8 - mov r10,QWORD[112+rdx] - mov r8,r9 - shr r8,9 - and r8,rax - mov QWORD[104+rcx],r8 - shrd r9,r10,38 - and r9,rax - mov QWORD[112+rcx],r9 - mov r11,QWORD[120+rdx] - mov r9,r10 - shr r9,3 - and r9,rax - mov QWORD[120+rcx],r9 - mov r8,r10 - shr r8,32 - and r8,rax - mov QWORD[128+rcx],r8 - shrd r10,r11,61 - and r10,rax - mov QWORD[136+rcx],r10 - xor r8,r8 - mov r10,r11 - shr r10,26 - and r10,rax - mov QWORD[144+rcx],r10 - shrd r11,r8,55 - and r11,rax - mov QWORD[152+rcx],r11 - mov QWORD[160+rcx],r8 - mov QWORD[168+rcx],r8 - mov QWORD[176+rcx],r8 - mov QWORD[184+rcx],r8 - DB 0F3h,0C3h ;repret - - -global rsaz_1024_scatter5_avx2 - -ALIGN 32 -rsaz_1024_scatter5_avx2: - - vzeroupper - vmovdqu ymm5,YMMWORD[$L$scatter_permd] - shl r8d,4 - lea rcx,[r8*1+rcx] - mov eax,9 - jmp NEAR $L$oop_scatter_1024 - -ALIGN 32 -$L$oop_scatter_1024: - vmovdqu ymm0,YMMWORD[rdx] - lea rdx,[32+rdx] - vpermd ymm0,ymm5,ymm0 - vmovdqu XMMWORD[rcx],xmm0 - lea rcx,[512+rcx] - dec eax - jnz NEAR $L$oop_scatter_1024 - - vzeroupper - DB 0F3h,0C3h ;repret - - - -global rsaz_1024_gather5_avx2 - -ALIGN 32 -rsaz_1024_gather5_avx2: - - vzeroupper - mov r11,rsp - - lea rax,[((-136))+rsp] -$L$SEH_begin_rsaz_1024_gather5: - -DB 0x48,0x8d,0x60,0xe0 -DB 0xc5,0xf8,0x29,0x70,0xe0 -DB 0xc5,0xf8,0x29,0x78,0xf0 -DB 0xc5,0x78,0x29,0x40,0x00 -DB 0xc5,0x78,0x29,0x48,0x10 -DB 0xc5,0x78,0x29,0x50,0x20 -DB 0xc5,0x78,0x29,0x58,0x30 -DB 0xc5,0x78,0x29,0x60,0x40 -DB 0xc5,0x78,0x29,0x68,0x50 -DB 0xc5,0x78,0x29,0x70,0x60 -DB 0xc5,0x78,0x29,0x78,0x70 - lea rsp,[((-256))+rsp] - and rsp,-32 - lea r10,[$L$inc] - lea rax,[((-128))+rsp] - - vmovd xmm4,r8d - vmovdqa ymm0,YMMWORD[r10] - vmovdqa ymm1,YMMWORD[32+r10] - vmovdqa ymm5,YMMWORD[64+r10] - vpbroadcastd ymm4,xmm4 - - vpaddd ymm2,ymm0,ymm5 - vpcmpeqd ymm0,ymm0,ymm4 - vpaddd ymm3,ymm1,ymm5 - vpcmpeqd ymm1,ymm1,ymm4 - vmovdqa YMMWORD[(0+128)+rax],ymm0 - vpaddd ymm0,ymm2,ymm5 - vpcmpeqd ymm2,ymm2,ymm4 - vmovdqa YMMWORD[(32+128)+rax],ymm1 - vpaddd ymm1,ymm3,ymm5 - vpcmpeqd ymm3,ymm3,ymm4 - vmovdqa YMMWORD[(64+128)+rax],ymm2 - vpaddd ymm2,ymm0,ymm5 - vpcmpeqd ymm0,ymm0,ymm4 - vmovdqa YMMWORD[(96+128)+rax],ymm3 - vpaddd ymm3,ymm1,ymm5 - vpcmpeqd ymm1,ymm1,ymm4 - vmovdqa YMMWORD[(128+128)+rax],ymm0 - vpaddd ymm8,ymm2,ymm5 - vpcmpeqd ymm2,ymm2,ymm4 - vmovdqa YMMWORD[(160+128)+rax],ymm1 - vpaddd ymm9,ymm3,ymm5 - vpcmpeqd ymm3,ymm3,ymm4 - vmovdqa YMMWORD[(192+128)+rax],ymm2 - vpaddd ymm10,ymm8,ymm5 - vpcmpeqd ymm8,ymm8,ymm4 - vmovdqa YMMWORD[(224+128)+rax],ymm3 - vpaddd ymm11,ymm9,ymm5 - vpcmpeqd ymm9,ymm9,ymm4 - vpaddd ymm12,ymm10,ymm5 - vpcmpeqd ymm10,ymm10,ymm4 - vpaddd ymm13,ymm11,ymm5 - vpcmpeqd ymm11,ymm11,ymm4 - vpaddd ymm14,ymm12,ymm5 - vpcmpeqd ymm12,ymm12,ymm4 - vpaddd ymm15,ymm13,ymm5 - vpcmpeqd ymm13,ymm13,ymm4 - vpcmpeqd ymm14,ymm14,ymm4 - vpcmpeqd ymm15,ymm15,ymm4 - - vmovdqa ymm7,YMMWORD[((-32))+r10] - lea rdx,[128+rdx] - mov r8d,9 - -$L$oop_gather_1024: - vmovdqa ymm0,YMMWORD[((0-128))+rdx] - vmovdqa ymm1,YMMWORD[((32-128))+rdx] - vmovdqa ymm2,YMMWORD[((64-128))+rdx] - vmovdqa ymm3,YMMWORD[((96-128))+rdx] - vpand ymm0,ymm0,YMMWORD[((0+128))+rax] - vpand ymm1,ymm1,YMMWORD[((32+128))+rax] - vpand ymm2,ymm2,YMMWORD[((64+128))+rax] - vpor ymm4,ymm1,ymm0 - vpand ymm3,ymm3,YMMWORD[((96+128))+rax] - vmovdqa ymm0,YMMWORD[((128-128))+rdx] - vmovdqa ymm1,YMMWORD[((160-128))+rdx] - vpor ymm5,ymm3,ymm2 - vmovdqa ymm2,YMMWORD[((192-128))+rdx] - vmovdqa ymm3,YMMWORD[((224-128))+rdx] - vpand ymm0,ymm0,YMMWORD[((128+128))+rax] - vpand ymm1,ymm1,YMMWORD[((160+128))+rax] - vpand ymm2,ymm2,YMMWORD[((192+128))+rax] - vpor ymm4,ymm4,ymm0 - vpand ymm3,ymm3,YMMWORD[((224+128))+rax] - vpand ymm0,ymm8,YMMWORD[((256-128))+rdx] - vpor ymm5,ymm5,ymm1 - vpand ymm1,ymm9,YMMWORD[((288-128))+rdx] - vpor ymm4,ymm4,ymm2 - vpand ymm2,ymm10,YMMWORD[((320-128))+rdx] - vpor ymm5,ymm5,ymm3 - vpand ymm3,ymm11,YMMWORD[((352-128))+rdx] - vpor ymm4,ymm4,ymm0 - vpand ymm0,ymm12,YMMWORD[((384-128))+rdx] - vpor ymm5,ymm5,ymm1 - vpand ymm1,ymm13,YMMWORD[((416-128))+rdx] - vpor ymm4,ymm4,ymm2 - vpand ymm2,ymm14,YMMWORD[((448-128))+rdx] - vpor ymm5,ymm5,ymm3 - vpand ymm3,ymm15,YMMWORD[((480-128))+rdx] - lea rdx,[512+rdx] - vpor ymm4,ymm4,ymm0 - vpor ymm5,ymm5,ymm1 - vpor ymm4,ymm4,ymm2 - vpor ymm5,ymm5,ymm3 - - vpor ymm4,ymm4,ymm5 - vextracti128 xmm5,ymm4,1 - vpor xmm5,xmm5,xmm4 - vpermd ymm5,ymm7,ymm5 - vmovdqu YMMWORD[rcx],ymm5 - lea rcx,[32+rcx] - dec r8d - jnz NEAR $L$oop_gather_1024 - - vpxor ymm0,ymm0,ymm0 - vmovdqu YMMWORD[rcx],ymm0 - vzeroupper - movaps xmm6,XMMWORD[((-168))+r11] - movaps xmm7,XMMWORD[((-152))+r11] - movaps xmm8,XMMWORD[((-136))+r11] - movaps xmm9,XMMWORD[((-120))+r11] - movaps xmm10,XMMWORD[((-104))+r11] - movaps xmm11,XMMWORD[((-88))+r11] - movaps xmm12,XMMWORD[((-72))+r11] - movaps xmm13,XMMWORD[((-56))+r11] - movaps xmm14,XMMWORD[((-40))+r11] - movaps xmm15,XMMWORD[((-24))+r11] - lea rsp,[r11] - - DB 0F3h,0C3h ;repret - -$L$SEH_end_rsaz_1024_gather5: - -ALIGN 64 -$L$and_mask: - DQ 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff -$L$scatter_permd: - DD 0,2,4,6,7,7,7,7 -$L$gather_permd: - DD 0,7,1,7,2,7,3,7 -$L$inc: - DD 0,0,0,0,1,1,1,1 - DD 2,2,2,2,3,3,3,3 - DD 4,4,4,4,4,4,4,4 -ALIGN 64 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -rsaz_se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov rbp,QWORD[160+r8] - - mov r10d,DWORD[8+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - cmovc rax,rbp - - mov r15,QWORD[((-48))+rax] - mov r14,QWORD[((-40))+rax] - mov r13,QWORD[((-32))+rax] - mov r12,QWORD[((-24))+rax] - mov rbp,QWORD[((-16))+rax] - mov rbx,QWORD[((-8))+rax] - mov QWORD[240+r8],r15 - mov QWORD[232+r8],r14 - mov QWORD[224+r8],r13 - mov QWORD[216+r8],r12 - mov QWORD[160+r8],rbp - mov QWORD[144+r8],rbx - - lea rsi,[((-216))+rax] - lea rdi,[512+r8] - mov ecx,20 - DD 0xa548f3fc - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_rsaz_1024_sqr_avx2 wrt ..imagebase - DD $L$SEH_end_rsaz_1024_sqr_avx2 wrt ..imagebase - DD $L$SEH_info_rsaz_1024_sqr_avx2 wrt ..imagebase - - DD $L$SEH_begin_rsaz_1024_mul_avx2 wrt ..imagebase - DD $L$SEH_end_rsaz_1024_mul_avx2 wrt ..imagebase - DD $L$SEH_info_rsaz_1024_mul_avx2 wrt ..imagebase - - DD $L$SEH_begin_rsaz_1024_gather5 wrt ..imagebase - DD $L$SEH_end_rsaz_1024_gather5 wrt ..imagebase - DD $L$SEH_info_rsaz_1024_gather5 wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_rsaz_1024_sqr_avx2: -DB 9,0,0,0 - DD rsaz_se_handler wrt ..imagebase - DD $L$sqr_1024_body wrt ..imagebase,$L$sqr_1024_epilogue wrt ..imagebase,$L$sqr_1024_in_tail wrt ..imagebase - DD 0 -$L$SEH_info_rsaz_1024_mul_avx2: -DB 9,0,0,0 - DD rsaz_se_handler wrt ..imagebase - DD $L$mul_1024_body wrt ..imagebase,$L$mul_1024_epilogue wrt ..imagebase,$L$mul_1024_in_tail wrt ..imagebase - DD 0 -$L$SEH_info_rsaz_1024_gather5: -DB 0x01,0x36,0x17,0x0b -DB 0x36,0xf8,0x09,0x00 -DB 0x31,0xe8,0x08,0x00 -DB 0x2c,0xd8,0x07,0x00 -DB 0x27,0xc8,0x06,0x00 -DB 0x22,0xb8,0x05,0x00 -DB 0x1d,0xa8,0x04,0x00 -DB 0x18,0x98,0x03,0x00 -DB 0x13,0x88,0x02,0x00 -DB 0x0e,0x78,0x01,0x00 -DB 0x09,0x68,0x00,0x00 -DB 0x04,0x01,0x15,0x00 -DB 0x00,0xb3,0x00,0x00 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha1-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha1-x86_64.asm deleted file mode 100644 index 62dcc62c25..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha1-x86_64.asm +++ /dev/null @@ -1,3814 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - -EXTERN OPENSSL_ia32cap_P - -global sha1_block_data_order - -ALIGN 16 -sha1_block_data_order: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha1_block_data_order: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea r10,[OPENSSL_ia32cap_P] - mov r9d,DWORD[r10] - mov r8d,DWORD[4+r10] - mov r10d,DWORD[8+r10] - test r8d,512 - jz NEAR $L$ialu - and r8d,268435456 - and r9d,1073741824 - or r8d,r9d - cmp r8d,1342177280 - je NEAR _avx_shortcut - jmp NEAR _ssse3_shortcut - -ALIGN 16 -$L$ialu: - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - mov r8,rdi - sub rsp,72 - mov r9,rsi - and rsp,-64 - mov r10,rdx - mov QWORD[64+rsp],rax - -$L$prologue: - - mov esi,DWORD[r8] - mov edi,DWORD[4+r8] - mov r11d,DWORD[8+r8] - mov r12d,DWORD[12+r8] - mov r13d,DWORD[16+r8] - jmp NEAR $L$loop - -ALIGN 16 -$L$loop: - mov edx,DWORD[r9] - bswap edx - mov ebp,DWORD[4+r9] - mov eax,r12d - mov DWORD[rsp],edx - mov ecx,esi - bswap ebp - xor eax,r11d - rol ecx,5 - and eax,edi - lea r13d,[1518500249+r13*1+rdx] - add r13d,ecx - xor eax,r12d - rol edi,30 - add r13d,eax - mov r14d,DWORD[8+r9] - mov eax,r11d - mov DWORD[4+rsp],ebp - mov ecx,r13d - bswap r14d - xor eax,edi - rol ecx,5 - and eax,esi - lea r12d,[1518500249+r12*1+rbp] - add r12d,ecx - xor eax,r11d - rol esi,30 - add r12d,eax - mov edx,DWORD[12+r9] - mov eax,edi - mov DWORD[8+rsp],r14d - mov ecx,r12d - bswap edx - xor eax,esi - rol ecx,5 - and eax,r13d - lea r11d,[1518500249+r11*1+r14] - add r11d,ecx - xor eax,edi - rol r13d,30 - add r11d,eax - mov ebp,DWORD[16+r9] - mov eax,esi - mov DWORD[12+rsp],edx - mov ecx,r11d - bswap ebp - xor eax,r13d - rol ecx,5 - and eax,r12d - lea edi,[1518500249+rdi*1+rdx] - add edi,ecx - xor eax,esi - rol r12d,30 - add edi,eax - mov r14d,DWORD[20+r9] - mov eax,r13d - mov DWORD[16+rsp],ebp - mov ecx,edi - bswap r14d - xor eax,r12d - rol ecx,5 - and eax,r11d - lea esi,[1518500249+rsi*1+rbp] - add esi,ecx - xor eax,r13d - rol r11d,30 - add esi,eax - mov edx,DWORD[24+r9] - mov eax,r12d - mov DWORD[20+rsp],r14d - mov ecx,esi - bswap edx - xor eax,r11d - rol ecx,5 - and eax,edi - lea r13d,[1518500249+r13*1+r14] - add r13d,ecx - xor eax,r12d - rol edi,30 - add r13d,eax - mov ebp,DWORD[28+r9] - mov eax,r11d - mov DWORD[24+rsp],edx - mov ecx,r13d - bswap ebp - xor eax,edi - rol ecx,5 - and eax,esi - lea r12d,[1518500249+r12*1+rdx] - add r12d,ecx - xor eax,r11d - rol esi,30 - add r12d,eax - mov r14d,DWORD[32+r9] - mov eax,edi - mov DWORD[28+rsp],ebp - mov ecx,r12d - bswap r14d - xor eax,esi - rol ecx,5 - and eax,r13d - lea r11d,[1518500249+r11*1+rbp] - add r11d,ecx - xor eax,edi - rol r13d,30 - add r11d,eax - mov edx,DWORD[36+r9] - mov eax,esi - mov DWORD[32+rsp],r14d - mov ecx,r11d - bswap edx - xor eax,r13d - rol ecx,5 - and eax,r12d - lea edi,[1518500249+rdi*1+r14] - add edi,ecx - xor eax,esi - rol r12d,30 - add edi,eax - mov ebp,DWORD[40+r9] - mov eax,r13d - mov DWORD[36+rsp],edx - mov ecx,edi - bswap ebp - xor eax,r12d - rol ecx,5 - and eax,r11d - lea esi,[1518500249+rsi*1+rdx] - add esi,ecx - xor eax,r13d - rol r11d,30 - add esi,eax - mov r14d,DWORD[44+r9] - mov eax,r12d - mov DWORD[40+rsp],ebp - mov ecx,esi - bswap r14d - xor eax,r11d - rol ecx,5 - and eax,edi - lea r13d,[1518500249+r13*1+rbp] - add r13d,ecx - xor eax,r12d - rol edi,30 - add r13d,eax - mov edx,DWORD[48+r9] - mov eax,r11d - mov DWORD[44+rsp],r14d - mov ecx,r13d - bswap edx - xor eax,edi - rol ecx,5 - and eax,esi - lea r12d,[1518500249+r12*1+r14] - add r12d,ecx - xor eax,r11d - rol esi,30 - add r12d,eax - mov ebp,DWORD[52+r9] - mov eax,edi - mov DWORD[48+rsp],edx - mov ecx,r12d - bswap ebp - xor eax,esi - rol ecx,5 - and eax,r13d - lea r11d,[1518500249+r11*1+rdx] - add r11d,ecx - xor eax,edi - rol r13d,30 - add r11d,eax - mov r14d,DWORD[56+r9] - mov eax,esi - mov DWORD[52+rsp],ebp - mov ecx,r11d - bswap r14d - xor eax,r13d - rol ecx,5 - and eax,r12d - lea edi,[1518500249+rdi*1+rbp] - add edi,ecx - xor eax,esi - rol r12d,30 - add edi,eax - mov edx,DWORD[60+r9] - mov eax,r13d - mov DWORD[56+rsp],r14d - mov ecx,edi - bswap edx - xor eax,r12d - rol ecx,5 - and eax,r11d - lea esi,[1518500249+rsi*1+r14] - add esi,ecx - xor eax,r13d - rol r11d,30 - add esi,eax - xor ebp,DWORD[rsp] - mov eax,r12d - mov DWORD[60+rsp],edx - mov ecx,esi - xor ebp,DWORD[8+rsp] - xor eax,r11d - rol ecx,5 - xor ebp,DWORD[32+rsp] - and eax,edi - lea r13d,[1518500249+r13*1+rdx] - rol edi,30 - xor eax,r12d - add r13d,ecx - rol ebp,1 - add r13d,eax - xor r14d,DWORD[4+rsp] - mov eax,r11d - mov DWORD[rsp],ebp - mov ecx,r13d - xor r14d,DWORD[12+rsp] - xor eax,edi - rol ecx,5 - xor r14d,DWORD[36+rsp] - and eax,esi - lea r12d,[1518500249+r12*1+rbp] - rol esi,30 - xor eax,r11d - add r12d,ecx - rol r14d,1 - add r12d,eax - xor edx,DWORD[8+rsp] - mov eax,edi - mov DWORD[4+rsp],r14d - mov ecx,r12d - xor edx,DWORD[16+rsp] - xor eax,esi - rol ecx,5 - xor edx,DWORD[40+rsp] - and eax,r13d - lea r11d,[1518500249+r11*1+r14] - rol r13d,30 - xor eax,edi - add r11d,ecx - rol edx,1 - add r11d,eax - xor ebp,DWORD[12+rsp] - mov eax,esi - mov DWORD[8+rsp],edx - mov ecx,r11d - xor ebp,DWORD[20+rsp] - xor eax,r13d - rol ecx,5 - xor ebp,DWORD[44+rsp] - and eax,r12d - lea edi,[1518500249+rdi*1+rdx] - rol r12d,30 - xor eax,esi - add edi,ecx - rol ebp,1 - add edi,eax - xor r14d,DWORD[16+rsp] - mov eax,r13d - mov DWORD[12+rsp],ebp - mov ecx,edi - xor r14d,DWORD[24+rsp] - xor eax,r12d - rol ecx,5 - xor r14d,DWORD[48+rsp] - and eax,r11d - lea esi,[1518500249+rsi*1+rbp] - rol r11d,30 - xor eax,r13d - add esi,ecx - rol r14d,1 - add esi,eax - xor edx,DWORD[20+rsp] - mov eax,edi - mov DWORD[16+rsp],r14d - mov ecx,esi - xor edx,DWORD[28+rsp] - xor eax,r12d - rol ecx,5 - xor edx,DWORD[52+rsp] - lea r13d,[1859775393+r13*1+r14] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol edx,1 - xor ebp,DWORD[24+rsp] - mov eax,esi - mov DWORD[20+rsp],edx - mov ecx,r13d - xor ebp,DWORD[32+rsp] - xor eax,r11d - rol ecx,5 - xor ebp,DWORD[56+rsp] - lea r12d,[1859775393+r12*1+rdx] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol ebp,1 - xor r14d,DWORD[28+rsp] - mov eax,r13d - mov DWORD[24+rsp],ebp - mov ecx,r12d - xor r14d,DWORD[36+rsp] - xor eax,edi - rol ecx,5 - xor r14d,DWORD[60+rsp] - lea r11d,[1859775393+r11*1+rbp] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol r14d,1 - xor edx,DWORD[32+rsp] - mov eax,r12d - mov DWORD[28+rsp],r14d - mov ecx,r11d - xor edx,DWORD[40+rsp] - xor eax,esi - rol ecx,5 - xor edx,DWORD[rsp] - lea edi,[1859775393+rdi*1+r14] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol edx,1 - xor ebp,DWORD[36+rsp] - mov eax,r11d - mov DWORD[32+rsp],edx - mov ecx,edi - xor ebp,DWORD[44+rsp] - xor eax,r13d - rol ecx,5 - xor ebp,DWORD[4+rsp] - lea esi,[1859775393+rsi*1+rdx] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol ebp,1 - xor r14d,DWORD[40+rsp] - mov eax,edi - mov DWORD[36+rsp],ebp - mov ecx,esi - xor r14d,DWORD[48+rsp] - xor eax,r12d - rol ecx,5 - xor r14d,DWORD[8+rsp] - lea r13d,[1859775393+r13*1+rbp] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol r14d,1 - xor edx,DWORD[44+rsp] - mov eax,esi - mov DWORD[40+rsp],r14d - mov ecx,r13d - xor edx,DWORD[52+rsp] - xor eax,r11d - rol ecx,5 - xor edx,DWORD[12+rsp] - lea r12d,[1859775393+r12*1+r14] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol edx,1 - xor ebp,DWORD[48+rsp] - mov eax,r13d - mov DWORD[44+rsp],edx - mov ecx,r12d - xor ebp,DWORD[56+rsp] - xor eax,edi - rol ecx,5 - xor ebp,DWORD[16+rsp] - lea r11d,[1859775393+r11*1+rdx] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol ebp,1 - xor r14d,DWORD[52+rsp] - mov eax,r12d - mov DWORD[48+rsp],ebp - mov ecx,r11d - xor r14d,DWORD[60+rsp] - xor eax,esi - rol ecx,5 - xor r14d,DWORD[20+rsp] - lea edi,[1859775393+rdi*1+rbp] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol r14d,1 - xor edx,DWORD[56+rsp] - mov eax,r11d - mov DWORD[52+rsp],r14d - mov ecx,edi - xor edx,DWORD[rsp] - xor eax,r13d - rol ecx,5 - xor edx,DWORD[24+rsp] - lea esi,[1859775393+rsi*1+r14] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol edx,1 - xor ebp,DWORD[60+rsp] - mov eax,edi - mov DWORD[56+rsp],edx - mov ecx,esi - xor ebp,DWORD[4+rsp] - xor eax,r12d - rol ecx,5 - xor ebp,DWORD[28+rsp] - lea r13d,[1859775393+r13*1+rdx] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol ebp,1 - xor r14d,DWORD[rsp] - mov eax,esi - mov DWORD[60+rsp],ebp - mov ecx,r13d - xor r14d,DWORD[8+rsp] - xor eax,r11d - rol ecx,5 - xor r14d,DWORD[32+rsp] - lea r12d,[1859775393+r12*1+rbp] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol r14d,1 - xor edx,DWORD[4+rsp] - mov eax,r13d - mov DWORD[rsp],r14d - mov ecx,r12d - xor edx,DWORD[12+rsp] - xor eax,edi - rol ecx,5 - xor edx,DWORD[36+rsp] - lea r11d,[1859775393+r11*1+r14] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol edx,1 - xor ebp,DWORD[8+rsp] - mov eax,r12d - mov DWORD[4+rsp],edx - mov ecx,r11d - xor ebp,DWORD[16+rsp] - xor eax,esi - rol ecx,5 - xor ebp,DWORD[40+rsp] - lea edi,[1859775393+rdi*1+rdx] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol ebp,1 - xor r14d,DWORD[12+rsp] - mov eax,r11d - mov DWORD[8+rsp],ebp - mov ecx,edi - xor r14d,DWORD[20+rsp] - xor eax,r13d - rol ecx,5 - xor r14d,DWORD[44+rsp] - lea esi,[1859775393+rsi*1+rbp] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol r14d,1 - xor edx,DWORD[16+rsp] - mov eax,edi - mov DWORD[12+rsp],r14d - mov ecx,esi - xor edx,DWORD[24+rsp] - xor eax,r12d - rol ecx,5 - xor edx,DWORD[48+rsp] - lea r13d,[1859775393+r13*1+r14] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol edx,1 - xor ebp,DWORD[20+rsp] - mov eax,esi - mov DWORD[16+rsp],edx - mov ecx,r13d - xor ebp,DWORD[28+rsp] - xor eax,r11d - rol ecx,5 - xor ebp,DWORD[52+rsp] - lea r12d,[1859775393+r12*1+rdx] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol ebp,1 - xor r14d,DWORD[24+rsp] - mov eax,r13d - mov DWORD[20+rsp],ebp - mov ecx,r12d - xor r14d,DWORD[32+rsp] - xor eax,edi - rol ecx,5 - xor r14d,DWORD[56+rsp] - lea r11d,[1859775393+r11*1+rbp] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol r14d,1 - xor edx,DWORD[28+rsp] - mov eax,r12d - mov DWORD[24+rsp],r14d - mov ecx,r11d - xor edx,DWORD[36+rsp] - xor eax,esi - rol ecx,5 - xor edx,DWORD[60+rsp] - lea edi,[1859775393+rdi*1+r14] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol edx,1 - xor ebp,DWORD[32+rsp] - mov eax,r11d - mov DWORD[28+rsp],edx - mov ecx,edi - xor ebp,DWORD[40+rsp] - xor eax,r13d - rol ecx,5 - xor ebp,DWORD[rsp] - lea esi,[1859775393+rsi*1+rdx] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol ebp,1 - xor r14d,DWORD[36+rsp] - mov eax,r12d - mov DWORD[32+rsp],ebp - mov ebx,r12d - xor r14d,DWORD[44+rsp] - and eax,r11d - mov ecx,esi - xor r14d,DWORD[4+rsp] - lea r13d,[((-1894007588))+r13*1+rbp] - xor ebx,r11d - rol ecx,5 - add r13d,eax - rol r14d,1 - and ebx,edi - add r13d,ecx - rol edi,30 - add r13d,ebx - xor edx,DWORD[40+rsp] - mov eax,r11d - mov DWORD[36+rsp],r14d - mov ebx,r11d - xor edx,DWORD[48+rsp] - and eax,edi - mov ecx,r13d - xor edx,DWORD[8+rsp] - lea r12d,[((-1894007588))+r12*1+r14] - xor ebx,edi - rol ecx,5 - add r12d,eax - rol edx,1 - and ebx,esi - add r12d,ecx - rol esi,30 - add r12d,ebx - xor ebp,DWORD[44+rsp] - mov eax,edi - mov DWORD[40+rsp],edx - mov ebx,edi - xor ebp,DWORD[52+rsp] - and eax,esi - mov ecx,r12d - xor ebp,DWORD[12+rsp] - lea r11d,[((-1894007588))+r11*1+rdx] - xor ebx,esi - rol ecx,5 - add r11d,eax - rol ebp,1 - and ebx,r13d - add r11d,ecx - rol r13d,30 - add r11d,ebx - xor r14d,DWORD[48+rsp] - mov eax,esi - mov DWORD[44+rsp],ebp - mov ebx,esi - xor r14d,DWORD[56+rsp] - and eax,r13d - mov ecx,r11d - xor r14d,DWORD[16+rsp] - lea edi,[((-1894007588))+rdi*1+rbp] - xor ebx,r13d - rol ecx,5 - add edi,eax - rol r14d,1 - and ebx,r12d - add edi,ecx - rol r12d,30 - add edi,ebx - xor edx,DWORD[52+rsp] - mov eax,r13d - mov DWORD[48+rsp],r14d - mov ebx,r13d - xor edx,DWORD[60+rsp] - and eax,r12d - mov ecx,edi - xor edx,DWORD[20+rsp] - lea esi,[((-1894007588))+rsi*1+r14] - xor ebx,r12d - rol ecx,5 - add esi,eax - rol edx,1 - and ebx,r11d - add esi,ecx - rol r11d,30 - add esi,ebx - xor ebp,DWORD[56+rsp] - mov eax,r12d - mov DWORD[52+rsp],edx - mov ebx,r12d - xor ebp,DWORD[rsp] - and eax,r11d - mov ecx,esi - xor ebp,DWORD[24+rsp] - lea r13d,[((-1894007588))+r13*1+rdx] - xor ebx,r11d - rol ecx,5 - add r13d,eax - rol ebp,1 - and ebx,edi - add r13d,ecx - rol edi,30 - add r13d,ebx - xor r14d,DWORD[60+rsp] - mov eax,r11d - mov DWORD[56+rsp],ebp - mov ebx,r11d - xor r14d,DWORD[4+rsp] - and eax,edi - mov ecx,r13d - xor r14d,DWORD[28+rsp] - lea r12d,[((-1894007588))+r12*1+rbp] - xor ebx,edi - rol ecx,5 - add r12d,eax - rol r14d,1 - and ebx,esi - add r12d,ecx - rol esi,30 - add r12d,ebx - xor edx,DWORD[rsp] - mov eax,edi - mov DWORD[60+rsp],r14d - mov ebx,edi - xor edx,DWORD[8+rsp] - and eax,esi - mov ecx,r12d - xor edx,DWORD[32+rsp] - lea r11d,[((-1894007588))+r11*1+r14] - xor ebx,esi - rol ecx,5 - add r11d,eax - rol edx,1 - and ebx,r13d - add r11d,ecx - rol r13d,30 - add r11d,ebx - xor ebp,DWORD[4+rsp] - mov eax,esi - mov DWORD[rsp],edx - mov ebx,esi - xor ebp,DWORD[12+rsp] - and eax,r13d - mov ecx,r11d - xor ebp,DWORD[36+rsp] - lea edi,[((-1894007588))+rdi*1+rdx] - xor ebx,r13d - rol ecx,5 - add edi,eax - rol ebp,1 - and ebx,r12d - add edi,ecx - rol r12d,30 - add edi,ebx - xor r14d,DWORD[8+rsp] - mov eax,r13d - mov DWORD[4+rsp],ebp - mov ebx,r13d - xor r14d,DWORD[16+rsp] - and eax,r12d - mov ecx,edi - xor r14d,DWORD[40+rsp] - lea esi,[((-1894007588))+rsi*1+rbp] - xor ebx,r12d - rol ecx,5 - add esi,eax - rol r14d,1 - and ebx,r11d - add esi,ecx - rol r11d,30 - add esi,ebx - xor edx,DWORD[12+rsp] - mov eax,r12d - mov DWORD[8+rsp],r14d - mov ebx,r12d - xor edx,DWORD[20+rsp] - and eax,r11d - mov ecx,esi - xor edx,DWORD[44+rsp] - lea r13d,[((-1894007588))+r13*1+r14] - xor ebx,r11d - rol ecx,5 - add r13d,eax - rol edx,1 - and ebx,edi - add r13d,ecx - rol edi,30 - add r13d,ebx - xor ebp,DWORD[16+rsp] - mov eax,r11d - mov DWORD[12+rsp],edx - mov ebx,r11d - xor ebp,DWORD[24+rsp] - and eax,edi - mov ecx,r13d - xor ebp,DWORD[48+rsp] - lea r12d,[((-1894007588))+r12*1+rdx] - xor ebx,edi - rol ecx,5 - add r12d,eax - rol ebp,1 - and ebx,esi - add r12d,ecx - rol esi,30 - add r12d,ebx - xor r14d,DWORD[20+rsp] - mov eax,edi - mov DWORD[16+rsp],ebp - mov ebx,edi - xor r14d,DWORD[28+rsp] - and eax,esi - mov ecx,r12d - xor r14d,DWORD[52+rsp] - lea r11d,[((-1894007588))+r11*1+rbp] - xor ebx,esi - rol ecx,5 - add r11d,eax - rol r14d,1 - and ebx,r13d - add r11d,ecx - rol r13d,30 - add r11d,ebx - xor edx,DWORD[24+rsp] - mov eax,esi - mov DWORD[20+rsp],r14d - mov ebx,esi - xor edx,DWORD[32+rsp] - and eax,r13d - mov ecx,r11d - xor edx,DWORD[56+rsp] - lea edi,[((-1894007588))+rdi*1+r14] - xor ebx,r13d - rol ecx,5 - add edi,eax - rol edx,1 - and ebx,r12d - add edi,ecx - rol r12d,30 - add edi,ebx - xor ebp,DWORD[28+rsp] - mov eax,r13d - mov DWORD[24+rsp],edx - mov ebx,r13d - xor ebp,DWORD[36+rsp] - and eax,r12d - mov ecx,edi - xor ebp,DWORD[60+rsp] - lea esi,[((-1894007588))+rsi*1+rdx] - xor ebx,r12d - rol ecx,5 - add esi,eax - rol ebp,1 - and ebx,r11d - add esi,ecx - rol r11d,30 - add esi,ebx - xor r14d,DWORD[32+rsp] - mov eax,r12d - mov DWORD[28+rsp],ebp - mov ebx,r12d - xor r14d,DWORD[40+rsp] - and eax,r11d - mov ecx,esi - xor r14d,DWORD[rsp] - lea r13d,[((-1894007588))+r13*1+rbp] - xor ebx,r11d - rol ecx,5 - add r13d,eax - rol r14d,1 - and ebx,edi - add r13d,ecx - rol edi,30 - add r13d,ebx - xor edx,DWORD[36+rsp] - mov eax,r11d - mov DWORD[32+rsp],r14d - mov ebx,r11d - xor edx,DWORD[44+rsp] - and eax,edi - mov ecx,r13d - xor edx,DWORD[4+rsp] - lea r12d,[((-1894007588))+r12*1+r14] - xor ebx,edi - rol ecx,5 - add r12d,eax - rol edx,1 - and ebx,esi - add r12d,ecx - rol esi,30 - add r12d,ebx - xor ebp,DWORD[40+rsp] - mov eax,edi - mov DWORD[36+rsp],edx - mov ebx,edi - xor ebp,DWORD[48+rsp] - and eax,esi - mov ecx,r12d - xor ebp,DWORD[8+rsp] - lea r11d,[((-1894007588))+r11*1+rdx] - xor ebx,esi - rol ecx,5 - add r11d,eax - rol ebp,1 - and ebx,r13d - add r11d,ecx - rol r13d,30 - add r11d,ebx - xor r14d,DWORD[44+rsp] - mov eax,esi - mov DWORD[40+rsp],ebp - mov ebx,esi - xor r14d,DWORD[52+rsp] - and eax,r13d - mov ecx,r11d - xor r14d,DWORD[12+rsp] - lea edi,[((-1894007588))+rdi*1+rbp] - xor ebx,r13d - rol ecx,5 - add edi,eax - rol r14d,1 - and ebx,r12d - add edi,ecx - rol r12d,30 - add edi,ebx - xor edx,DWORD[48+rsp] - mov eax,r13d - mov DWORD[44+rsp],r14d - mov ebx,r13d - xor edx,DWORD[56+rsp] - and eax,r12d - mov ecx,edi - xor edx,DWORD[16+rsp] - lea esi,[((-1894007588))+rsi*1+r14] - xor ebx,r12d - rol ecx,5 - add esi,eax - rol edx,1 - and ebx,r11d - add esi,ecx - rol r11d,30 - add esi,ebx - xor ebp,DWORD[52+rsp] - mov eax,edi - mov DWORD[48+rsp],edx - mov ecx,esi - xor ebp,DWORD[60+rsp] - xor eax,r12d - rol ecx,5 - xor ebp,DWORD[20+rsp] - lea r13d,[((-899497514))+r13*1+rdx] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol ebp,1 - xor r14d,DWORD[56+rsp] - mov eax,esi - mov DWORD[52+rsp],ebp - mov ecx,r13d - xor r14d,DWORD[rsp] - xor eax,r11d - rol ecx,5 - xor r14d,DWORD[24+rsp] - lea r12d,[((-899497514))+r12*1+rbp] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol r14d,1 - xor edx,DWORD[60+rsp] - mov eax,r13d - mov DWORD[56+rsp],r14d - mov ecx,r12d - xor edx,DWORD[4+rsp] - xor eax,edi - rol ecx,5 - xor edx,DWORD[28+rsp] - lea r11d,[((-899497514))+r11*1+r14] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol edx,1 - xor ebp,DWORD[rsp] - mov eax,r12d - mov DWORD[60+rsp],edx - mov ecx,r11d - xor ebp,DWORD[8+rsp] - xor eax,esi - rol ecx,5 - xor ebp,DWORD[32+rsp] - lea edi,[((-899497514))+rdi*1+rdx] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol ebp,1 - xor r14d,DWORD[4+rsp] - mov eax,r11d - mov DWORD[rsp],ebp - mov ecx,edi - xor r14d,DWORD[12+rsp] - xor eax,r13d - rol ecx,5 - xor r14d,DWORD[36+rsp] - lea esi,[((-899497514))+rsi*1+rbp] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol r14d,1 - xor edx,DWORD[8+rsp] - mov eax,edi - mov DWORD[4+rsp],r14d - mov ecx,esi - xor edx,DWORD[16+rsp] - xor eax,r12d - rol ecx,5 - xor edx,DWORD[40+rsp] - lea r13d,[((-899497514))+r13*1+r14] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol edx,1 - xor ebp,DWORD[12+rsp] - mov eax,esi - mov DWORD[8+rsp],edx - mov ecx,r13d - xor ebp,DWORD[20+rsp] - xor eax,r11d - rol ecx,5 - xor ebp,DWORD[44+rsp] - lea r12d,[((-899497514))+r12*1+rdx] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol ebp,1 - xor r14d,DWORD[16+rsp] - mov eax,r13d - mov DWORD[12+rsp],ebp - mov ecx,r12d - xor r14d,DWORD[24+rsp] - xor eax,edi - rol ecx,5 - xor r14d,DWORD[48+rsp] - lea r11d,[((-899497514))+r11*1+rbp] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol r14d,1 - xor edx,DWORD[20+rsp] - mov eax,r12d - mov DWORD[16+rsp],r14d - mov ecx,r11d - xor edx,DWORD[28+rsp] - xor eax,esi - rol ecx,5 - xor edx,DWORD[52+rsp] - lea edi,[((-899497514))+rdi*1+r14] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol edx,1 - xor ebp,DWORD[24+rsp] - mov eax,r11d - mov DWORD[20+rsp],edx - mov ecx,edi - xor ebp,DWORD[32+rsp] - xor eax,r13d - rol ecx,5 - xor ebp,DWORD[56+rsp] - lea esi,[((-899497514))+rsi*1+rdx] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol ebp,1 - xor r14d,DWORD[28+rsp] - mov eax,edi - mov DWORD[24+rsp],ebp - mov ecx,esi - xor r14d,DWORD[36+rsp] - xor eax,r12d - rol ecx,5 - xor r14d,DWORD[60+rsp] - lea r13d,[((-899497514))+r13*1+rbp] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol r14d,1 - xor edx,DWORD[32+rsp] - mov eax,esi - mov DWORD[28+rsp],r14d - mov ecx,r13d - xor edx,DWORD[40+rsp] - xor eax,r11d - rol ecx,5 - xor edx,DWORD[rsp] - lea r12d,[((-899497514))+r12*1+r14] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol edx,1 - xor ebp,DWORD[36+rsp] - mov eax,r13d - - mov ecx,r12d - xor ebp,DWORD[44+rsp] - xor eax,edi - rol ecx,5 - xor ebp,DWORD[4+rsp] - lea r11d,[((-899497514))+r11*1+rdx] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol ebp,1 - xor r14d,DWORD[40+rsp] - mov eax,r12d - - mov ecx,r11d - xor r14d,DWORD[48+rsp] - xor eax,esi - rol ecx,5 - xor r14d,DWORD[8+rsp] - lea edi,[((-899497514))+rdi*1+rbp] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol r14d,1 - xor edx,DWORD[44+rsp] - mov eax,r11d - - mov ecx,edi - xor edx,DWORD[52+rsp] - xor eax,r13d - rol ecx,5 - xor edx,DWORD[12+rsp] - lea esi,[((-899497514))+rsi*1+r14] - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - rol edx,1 - xor ebp,DWORD[48+rsp] - mov eax,edi - - mov ecx,esi - xor ebp,DWORD[56+rsp] - xor eax,r12d - rol ecx,5 - xor ebp,DWORD[16+rsp] - lea r13d,[((-899497514))+r13*1+rdx] - xor eax,r11d - add r13d,ecx - rol edi,30 - add r13d,eax - rol ebp,1 - xor r14d,DWORD[52+rsp] - mov eax,esi - - mov ecx,r13d - xor r14d,DWORD[60+rsp] - xor eax,r11d - rol ecx,5 - xor r14d,DWORD[20+rsp] - lea r12d,[((-899497514))+r12*1+rbp] - xor eax,edi - add r12d,ecx - rol esi,30 - add r12d,eax - rol r14d,1 - xor edx,DWORD[56+rsp] - mov eax,r13d - - mov ecx,r12d - xor edx,DWORD[rsp] - xor eax,edi - rol ecx,5 - xor edx,DWORD[24+rsp] - lea r11d,[((-899497514))+r11*1+r14] - xor eax,esi - add r11d,ecx - rol r13d,30 - add r11d,eax - rol edx,1 - xor ebp,DWORD[60+rsp] - mov eax,r12d - - mov ecx,r11d - xor ebp,DWORD[4+rsp] - xor eax,esi - rol ecx,5 - xor ebp,DWORD[28+rsp] - lea edi,[((-899497514))+rdi*1+rdx] - xor eax,r13d - add edi,ecx - rol r12d,30 - add edi,eax - rol ebp,1 - mov eax,r11d - mov ecx,edi - xor eax,r13d - lea esi,[((-899497514))+rsi*1+rbp] - rol ecx,5 - xor eax,r12d - add esi,ecx - rol r11d,30 - add esi,eax - add esi,DWORD[r8] - add edi,DWORD[4+r8] - add r11d,DWORD[8+r8] - add r12d,DWORD[12+r8] - add r13d,DWORD[16+r8] - mov DWORD[r8],esi - mov DWORD[4+r8],edi - mov DWORD[8+r8],r11d - mov DWORD[12+r8],r12d - mov DWORD[16+r8],r13d - - sub r10,1 - lea r9,[64+r9] - jnz NEAR $L$loop - - mov rsi,QWORD[64+rsp] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha1_block_data_order: - -ALIGN 16 -sha1_block_data_order_ssse3: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha1_block_data_order_ssse3: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - -_ssse3_shortcut: - - mov r11,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - lea rsp,[((-160))+rsp] - movaps XMMWORD[(-40-96)+r11],xmm6 - movaps XMMWORD[(-40-80)+r11],xmm7 - movaps XMMWORD[(-40-64)+r11],xmm8 - movaps XMMWORD[(-40-48)+r11],xmm9 - movaps XMMWORD[(-40-32)+r11],xmm10 - movaps XMMWORD[(-40-16)+r11],xmm11 -$L$prologue_ssse3: - and rsp,-64 - mov r8,rdi - mov r9,rsi - mov r10,rdx - - shl r10,6 - add r10,r9 - lea r14,[((K_XX_XX+64))] - - mov eax,DWORD[r8] - mov ebx,DWORD[4+r8] - mov ecx,DWORD[8+r8] - mov edx,DWORD[12+r8] - mov esi,ebx - mov ebp,DWORD[16+r8] - mov edi,ecx - xor edi,edx - and esi,edi - - movdqa xmm6,XMMWORD[64+r14] - movdqa xmm9,XMMWORD[((-64))+r14] - movdqu xmm0,XMMWORD[r9] - movdqu xmm1,XMMWORD[16+r9] - movdqu xmm2,XMMWORD[32+r9] - movdqu xmm3,XMMWORD[48+r9] -DB 102,15,56,0,198 -DB 102,15,56,0,206 -DB 102,15,56,0,214 - add r9,64 - paddd xmm0,xmm9 -DB 102,15,56,0,222 - paddd xmm1,xmm9 - paddd xmm2,xmm9 - movdqa XMMWORD[rsp],xmm0 - psubd xmm0,xmm9 - movdqa XMMWORD[16+rsp],xmm1 - psubd xmm1,xmm9 - movdqa XMMWORD[32+rsp],xmm2 - psubd xmm2,xmm9 - jmp NEAR $L$oop_ssse3 -ALIGN 16 -$L$oop_ssse3: - ror ebx,2 - pshufd xmm4,xmm0,238 - xor esi,edx - movdqa xmm8,xmm3 - paddd xmm9,xmm3 - mov edi,eax - add ebp,DWORD[rsp] - punpcklqdq xmm4,xmm1 - xor ebx,ecx - rol eax,5 - add ebp,esi - psrldq xmm8,4 - and edi,ebx - xor ebx,ecx - pxor xmm4,xmm0 - add ebp,eax - ror eax,7 - pxor xmm8,xmm2 - xor edi,ecx - mov esi,ebp - add edx,DWORD[4+rsp] - pxor xmm4,xmm8 - xor eax,ebx - rol ebp,5 - movdqa XMMWORD[48+rsp],xmm9 - add edx,edi - and esi,eax - movdqa xmm10,xmm4 - xor eax,ebx - add edx,ebp - ror ebp,7 - movdqa xmm8,xmm4 - xor esi,ebx - pslldq xmm10,12 - paddd xmm4,xmm4 - mov edi,edx - add ecx,DWORD[8+rsp] - psrld xmm8,31 - xor ebp,eax - rol edx,5 - add ecx,esi - movdqa xmm9,xmm10 - and edi,ebp - xor ebp,eax - psrld xmm10,30 - add ecx,edx - ror edx,7 - por xmm4,xmm8 - xor edi,eax - mov esi,ecx - add ebx,DWORD[12+rsp] - pslld xmm9,2 - pxor xmm4,xmm10 - xor edx,ebp - movdqa xmm10,XMMWORD[((-64))+r14] - rol ecx,5 - add ebx,edi - and esi,edx - pxor xmm4,xmm9 - xor edx,ebp - add ebx,ecx - ror ecx,7 - pshufd xmm5,xmm1,238 - xor esi,ebp - movdqa xmm9,xmm4 - paddd xmm10,xmm4 - mov edi,ebx - add eax,DWORD[16+rsp] - punpcklqdq xmm5,xmm2 - xor ecx,edx - rol ebx,5 - add eax,esi - psrldq xmm9,4 - and edi,ecx - xor ecx,edx - pxor xmm5,xmm1 - add eax,ebx - ror ebx,7 - pxor xmm9,xmm3 - xor edi,edx - mov esi,eax - add ebp,DWORD[20+rsp] - pxor xmm5,xmm9 - xor ebx,ecx - rol eax,5 - movdqa XMMWORD[rsp],xmm10 - add ebp,edi - and esi,ebx - movdqa xmm8,xmm5 - xor ebx,ecx - add ebp,eax - ror eax,7 - movdqa xmm9,xmm5 - xor esi,ecx - pslldq xmm8,12 - paddd xmm5,xmm5 - mov edi,ebp - add edx,DWORD[24+rsp] - psrld xmm9,31 - xor eax,ebx - rol ebp,5 - add edx,esi - movdqa xmm10,xmm8 - and edi,eax - xor eax,ebx - psrld xmm8,30 - add edx,ebp - ror ebp,7 - por xmm5,xmm9 - xor edi,ebx - mov esi,edx - add ecx,DWORD[28+rsp] - pslld xmm10,2 - pxor xmm5,xmm8 - xor ebp,eax - movdqa xmm8,XMMWORD[((-32))+r14] - rol edx,5 - add ecx,edi - and esi,ebp - pxor xmm5,xmm10 - xor ebp,eax - add ecx,edx - ror edx,7 - pshufd xmm6,xmm2,238 - xor esi,eax - movdqa xmm10,xmm5 - paddd xmm8,xmm5 - mov edi,ecx - add ebx,DWORD[32+rsp] - punpcklqdq xmm6,xmm3 - xor edx,ebp - rol ecx,5 - add ebx,esi - psrldq xmm10,4 - and edi,edx - xor edx,ebp - pxor xmm6,xmm2 - add ebx,ecx - ror ecx,7 - pxor xmm10,xmm4 - xor edi,ebp - mov esi,ebx - add eax,DWORD[36+rsp] - pxor xmm6,xmm10 - xor ecx,edx - rol ebx,5 - movdqa XMMWORD[16+rsp],xmm8 - add eax,edi - and esi,ecx - movdqa xmm9,xmm6 - xor ecx,edx - add eax,ebx - ror ebx,7 - movdqa xmm10,xmm6 - xor esi,edx - pslldq xmm9,12 - paddd xmm6,xmm6 - mov edi,eax - add ebp,DWORD[40+rsp] - psrld xmm10,31 - xor ebx,ecx - rol eax,5 - add ebp,esi - movdqa xmm8,xmm9 - and edi,ebx - xor ebx,ecx - psrld xmm9,30 - add ebp,eax - ror eax,7 - por xmm6,xmm10 - xor edi,ecx - mov esi,ebp - add edx,DWORD[44+rsp] - pslld xmm8,2 - pxor xmm6,xmm9 - xor eax,ebx - movdqa xmm9,XMMWORD[((-32))+r14] - rol ebp,5 - add edx,edi - and esi,eax - pxor xmm6,xmm8 - xor eax,ebx - add edx,ebp - ror ebp,7 - pshufd xmm7,xmm3,238 - xor esi,ebx - movdqa xmm8,xmm6 - paddd xmm9,xmm6 - mov edi,edx - add ecx,DWORD[48+rsp] - punpcklqdq xmm7,xmm4 - xor ebp,eax - rol edx,5 - add ecx,esi - psrldq xmm8,4 - and edi,ebp - xor ebp,eax - pxor xmm7,xmm3 - add ecx,edx - ror edx,7 - pxor xmm8,xmm5 - xor edi,eax - mov esi,ecx - add ebx,DWORD[52+rsp] - pxor xmm7,xmm8 - xor edx,ebp - rol ecx,5 - movdqa XMMWORD[32+rsp],xmm9 - add ebx,edi - and esi,edx - movdqa xmm10,xmm7 - xor edx,ebp - add ebx,ecx - ror ecx,7 - movdqa xmm8,xmm7 - xor esi,ebp - pslldq xmm10,12 - paddd xmm7,xmm7 - mov edi,ebx - add eax,DWORD[56+rsp] - psrld xmm8,31 - xor ecx,edx - rol ebx,5 - add eax,esi - movdqa xmm9,xmm10 - and edi,ecx - xor ecx,edx - psrld xmm10,30 - add eax,ebx - ror ebx,7 - por xmm7,xmm8 - xor edi,edx - mov esi,eax - add ebp,DWORD[60+rsp] - pslld xmm9,2 - pxor xmm7,xmm10 - xor ebx,ecx - movdqa xmm10,XMMWORD[((-32))+r14] - rol eax,5 - add ebp,edi - and esi,ebx - pxor xmm7,xmm9 - pshufd xmm9,xmm6,238 - xor ebx,ecx - add ebp,eax - ror eax,7 - pxor xmm0,xmm4 - xor esi,ecx - mov edi,ebp - add edx,DWORD[rsp] - punpcklqdq xmm9,xmm7 - xor eax,ebx - rol ebp,5 - pxor xmm0,xmm1 - add edx,esi - and edi,eax - movdqa xmm8,xmm10 - xor eax,ebx - paddd xmm10,xmm7 - add edx,ebp - pxor xmm0,xmm9 - ror ebp,7 - xor edi,ebx - mov esi,edx - add ecx,DWORD[4+rsp] - movdqa xmm9,xmm0 - xor ebp,eax - rol edx,5 - movdqa XMMWORD[48+rsp],xmm10 - add ecx,edi - and esi,ebp - xor ebp,eax - pslld xmm0,2 - add ecx,edx - ror edx,7 - psrld xmm9,30 - xor esi,eax - mov edi,ecx - add ebx,DWORD[8+rsp] - por xmm0,xmm9 - xor edx,ebp - rol ecx,5 - pshufd xmm10,xmm7,238 - add ebx,esi - and edi,edx - xor edx,ebp - add ebx,ecx - add eax,DWORD[12+rsp] - xor edi,ebp - mov esi,ebx - rol ebx,5 - add eax,edi - xor esi,edx - ror ecx,7 - add eax,ebx - pxor xmm1,xmm5 - add ebp,DWORD[16+rsp] - xor esi,ecx - punpcklqdq xmm10,xmm0 - mov edi,eax - rol eax,5 - pxor xmm1,xmm2 - add ebp,esi - xor edi,ecx - movdqa xmm9,xmm8 - ror ebx,7 - paddd xmm8,xmm0 - add ebp,eax - pxor xmm1,xmm10 - add edx,DWORD[20+rsp] - xor edi,ebx - mov esi,ebp - rol ebp,5 - movdqa xmm10,xmm1 - add edx,edi - xor esi,ebx - movdqa XMMWORD[rsp],xmm8 - ror eax,7 - add edx,ebp - add ecx,DWORD[24+rsp] - pslld xmm1,2 - xor esi,eax - mov edi,edx - psrld xmm10,30 - rol edx,5 - add ecx,esi - xor edi,eax - ror ebp,7 - por xmm1,xmm10 - add ecx,edx - add ebx,DWORD[28+rsp] - pshufd xmm8,xmm0,238 - xor edi,ebp - mov esi,ecx - rol ecx,5 - add ebx,edi - xor esi,ebp - ror edx,7 - add ebx,ecx - pxor xmm2,xmm6 - add eax,DWORD[32+rsp] - xor esi,edx - punpcklqdq xmm8,xmm1 - mov edi,ebx - rol ebx,5 - pxor xmm2,xmm3 - add eax,esi - xor edi,edx - movdqa xmm10,XMMWORD[r14] - ror ecx,7 - paddd xmm9,xmm1 - add eax,ebx - pxor xmm2,xmm8 - add ebp,DWORD[36+rsp] - xor edi,ecx - mov esi,eax - rol eax,5 - movdqa xmm8,xmm2 - add ebp,edi - xor esi,ecx - movdqa XMMWORD[16+rsp],xmm9 - ror ebx,7 - add ebp,eax - add edx,DWORD[40+rsp] - pslld xmm2,2 - xor esi,ebx - mov edi,ebp - psrld xmm8,30 - rol ebp,5 - add edx,esi - xor edi,ebx - ror eax,7 - por xmm2,xmm8 - add edx,ebp - add ecx,DWORD[44+rsp] - pshufd xmm9,xmm1,238 - xor edi,eax - mov esi,edx - rol edx,5 - add ecx,edi - xor esi,eax - ror ebp,7 - add ecx,edx - pxor xmm3,xmm7 - add ebx,DWORD[48+rsp] - xor esi,ebp - punpcklqdq xmm9,xmm2 - mov edi,ecx - rol ecx,5 - pxor xmm3,xmm4 - add ebx,esi - xor edi,ebp - movdqa xmm8,xmm10 - ror edx,7 - paddd xmm10,xmm2 - add ebx,ecx - pxor xmm3,xmm9 - add eax,DWORD[52+rsp] - xor edi,edx - mov esi,ebx - rol ebx,5 - movdqa xmm9,xmm3 - add eax,edi - xor esi,edx - movdqa XMMWORD[32+rsp],xmm10 - ror ecx,7 - add eax,ebx - add ebp,DWORD[56+rsp] - pslld xmm3,2 - xor esi,ecx - mov edi,eax - psrld xmm9,30 - rol eax,5 - add ebp,esi - xor edi,ecx - ror ebx,7 - por xmm3,xmm9 - add ebp,eax - add edx,DWORD[60+rsp] - pshufd xmm10,xmm2,238 - xor edi,ebx - mov esi,ebp - rol ebp,5 - add edx,edi - xor esi,ebx - ror eax,7 - add edx,ebp - pxor xmm4,xmm0 - add ecx,DWORD[rsp] - xor esi,eax - punpcklqdq xmm10,xmm3 - mov edi,edx - rol edx,5 - pxor xmm4,xmm5 - add ecx,esi - xor edi,eax - movdqa xmm9,xmm8 - ror ebp,7 - paddd xmm8,xmm3 - add ecx,edx - pxor xmm4,xmm10 - add ebx,DWORD[4+rsp] - xor edi,ebp - mov esi,ecx - rol ecx,5 - movdqa xmm10,xmm4 - add ebx,edi - xor esi,ebp - movdqa XMMWORD[48+rsp],xmm8 - ror edx,7 - add ebx,ecx - add eax,DWORD[8+rsp] - pslld xmm4,2 - xor esi,edx - mov edi,ebx - psrld xmm10,30 - rol ebx,5 - add eax,esi - xor edi,edx - ror ecx,7 - por xmm4,xmm10 - add eax,ebx - add ebp,DWORD[12+rsp] - pshufd xmm8,xmm3,238 - xor edi,ecx - mov esi,eax - rol eax,5 - add ebp,edi - xor esi,ecx - ror ebx,7 - add ebp,eax - pxor xmm5,xmm1 - add edx,DWORD[16+rsp] - xor esi,ebx - punpcklqdq xmm8,xmm4 - mov edi,ebp - rol ebp,5 - pxor xmm5,xmm6 - add edx,esi - xor edi,ebx - movdqa xmm10,xmm9 - ror eax,7 - paddd xmm9,xmm4 - add edx,ebp - pxor xmm5,xmm8 - add ecx,DWORD[20+rsp] - xor edi,eax - mov esi,edx - rol edx,5 - movdqa xmm8,xmm5 - add ecx,edi - xor esi,eax - movdqa XMMWORD[rsp],xmm9 - ror ebp,7 - add ecx,edx - add ebx,DWORD[24+rsp] - pslld xmm5,2 - xor esi,ebp - mov edi,ecx - psrld xmm8,30 - rol ecx,5 - add ebx,esi - xor edi,ebp - ror edx,7 - por xmm5,xmm8 - add ebx,ecx - add eax,DWORD[28+rsp] - pshufd xmm9,xmm4,238 - ror ecx,7 - mov esi,ebx - xor edi,edx - rol ebx,5 - add eax,edi - xor esi,ecx - xor ecx,edx - add eax,ebx - pxor xmm6,xmm2 - add ebp,DWORD[32+rsp] - and esi,ecx - xor ecx,edx - ror ebx,7 - punpcklqdq xmm9,xmm5 - mov edi,eax - xor esi,ecx - pxor xmm6,xmm7 - rol eax,5 - add ebp,esi - movdqa xmm8,xmm10 - xor edi,ebx - paddd xmm10,xmm5 - xor ebx,ecx - pxor xmm6,xmm9 - add ebp,eax - add edx,DWORD[36+rsp] - and edi,ebx - xor ebx,ecx - ror eax,7 - movdqa xmm9,xmm6 - mov esi,ebp - xor edi,ebx - movdqa XMMWORD[16+rsp],xmm10 - rol ebp,5 - add edx,edi - xor esi,eax - pslld xmm6,2 - xor eax,ebx - add edx,ebp - psrld xmm9,30 - add ecx,DWORD[40+rsp] - and esi,eax - xor eax,ebx - por xmm6,xmm9 - ror ebp,7 - mov edi,edx - xor esi,eax - rol edx,5 - pshufd xmm10,xmm5,238 - add ecx,esi - xor edi,ebp - xor ebp,eax - add ecx,edx - add ebx,DWORD[44+rsp] - and edi,ebp - xor ebp,eax - ror edx,7 - mov esi,ecx - xor edi,ebp - rol ecx,5 - add ebx,edi - xor esi,edx - xor edx,ebp - add ebx,ecx - pxor xmm7,xmm3 - add eax,DWORD[48+rsp] - and esi,edx - xor edx,ebp - ror ecx,7 - punpcklqdq xmm10,xmm6 - mov edi,ebx - xor esi,edx - pxor xmm7,xmm0 - rol ebx,5 - add eax,esi - movdqa xmm9,XMMWORD[32+r14] - xor edi,ecx - paddd xmm8,xmm6 - xor ecx,edx - pxor xmm7,xmm10 - add eax,ebx - add ebp,DWORD[52+rsp] - and edi,ecx - xor ecx,edx - ror ebx,7 - movdqa xmm10,xmm7 - mov esi,eax - xor edi,ecx - movdqa XMMWORD[32+rsp],xmm8 - rol eax,5 - add ebp,edi - xor esi,ebx - pslld xmm7,2 - xor ebx,ecx - add ebp,eax - psrld xmm10,30 - add edx,DWORD[56+rsp] - and esi,ebx - xor ebx,ecx - por xmm7,xmm10 - ror eax,7 - mov edi,ebp - xor esi,ebx - rol ebp,5 - pshufd xmm8,xmm6,238 - add edx,esi - xor edi,eax - xor eax,ebx - add edx,ebp - add ecx,DWORD[60+rsp] - and edi,eax - xor eax,ebx - ror ebp,7 - mov esi,edx - xor edi,eax - rol edx,5 - add ecx,edi - xor esi,ebp - xor ebp,eax - add ecx,edx - pxor xmm0,xmm4 - add ebx,DWORD[rsp] - and esi,ebp - xor ebp,eax - ror edx,7 - punpcklqdq xmm8,xmm7 - mov edi,ecx - xor esi,ebp - pxor xmm0,xmm1 - rol ecx,5 - add ebx,esi - movdqa xmm10,xmm9 - xor edi,edx - paddd xmm9,xmm7 - xor edx,ebp - pxor xmm0,xmm8 - add ebx,ecx - add eax,DWORD[4+rsp] - and edi,edx - xor edx,ebp - ror ecx,7 - movdqa xmm8,xmm0 - mov esi,ebx - xor edi,edx - movdqa XMMWORD[48+rsp],xmm9 - rol ebx,5 - add eax,edi - xor esi,ecx - pslld xmm0,2 - xor ecx,edx - add eax,ebx - psrld xmm8,30 - add ebp,DWORD[8+rsp] - and esi,ecx - xor ecx,edx - por xmm0,xmm8 - ror ebx,7 - mov edi,eax - xor esi,ecx - rol eax,5 - pshufd xmm9,xmm7,238 - add ebp,esi - xor edi,ebx - xor ebx,ecx - add ebp,eax - add edx,DWORD[12+rsp] - and edi,ebx - xor ebx,ecx - ror eax,7 - mov esi,ebp - xor edi,ebx - rol ebp,5 - add edx,edi - xor esi,eax - xor eax,ebx - add edx,ebp - pxor xmm1,xmm5 - add ecx,DWORD[16+rsp] - and esi,eax - xor eax,ebx - ror ebp,7 - punpcklqdq xmm9,xmm0 - mov edi,edx - xor esi,eax - pxor xmm1,xmm2 - rol edx,5 - add ecx,esi - movdqa xmm8,xmm10 - xor edi,ebp - paddd xmm10,xmm0 - xor ebp,eax - pxor xmm1,xmm9 - add ecx,edx - add ebx,DWORD[20+rsp] - and edi,ebp - xor ebp,eax - ror edx,7 - movdqa xmm9,xmm1 - mov esi,ecx - xor edi,ebp - movdqa XMMWORD[rsp],xmm10 - rol ecx,5 - add ebx,edi - xor esi,edx - pslld xmm1,2 - xor edx,ebp - add ebx,ecx - psrld xmm9,30 - add eax,DWORD[24+rsp] - and esi,edx - xor edx,ebp - por xmm1,xmm9 - ror ecx,7 - mov edi,ebx - xor esi,edx - rol ebx,5 - pshufd xmm10,xmm0,238 - add eax,esi - xor edi,ecx - xor ecx,edx - add eax,ebx - add ebp,DWORD[28+rsp] - and edi,ecx - xor ecx,edx - ror ebx,7 - mov esi,eax - xor edi,ecx - rol eax,5 - add ebp,edi - xor esi,ebx - xor ebx,ecx - add ebp,eax - pxor xmm2,xmm6 - add edx,DWORD[32+rsp] - and esi,ebx - xor ebx,ecx - ror eax,7 - punpcklqdq xmm10,xmm1 - mov edi,ebp - xor esi,ebx - pxor xmm2,xmm3 - rol ebp,5 - add edx,esi - movdqa xmm9,xmm8 - xor edi,eax - paddd xmm8,xmm1 - xor eax,ebx - pxor xmm2,xmm10 - add edx,ebp - add ecx,DWORD[36+rsp] - and edi,eax - xor eax,ebx - ror ebp,7 - movdqa xmm10,xmm2 - mov esi,edx - xor edi,eax - movdqa XMMWORD[16+rsp],xmm8 - rol edx,5 - add ecx,edi - xor esi,ebp - pslld xmm2,2 - xor ebp,eax - add ecx,edx - psrld xmm10,30 - add ebx,DWORD[40+rsp] - and esi,ebp - xor ebp,eax - por xmm2,xmm10 - ror edx,7 - mov edi,ecx - xor esi,ebp - rol ecx,5 - pshufd xmm8,xmm1,238 - add ebx,esi - xor edi,edx - xor edx,ebp - add ebx,ecx - add eax,DWORD[44+rsp] - and edi,edx - xor edx,ebp - ror ecx,7 - mov esi,ebx - xor edi,edx - rol ebx,5 - add eax,edi - xor esi,edx - add eax,ebx - pxor xmm3,xmm7 - add ebp,DWORD[48+rsp] - xor esi,ecx - punpcklqdq xmm8,xmm2 - mov edi,eax - rol eax,5 - pxor xmm3,xmm4 - add ebp,esi - xor edi,ecx - movdqa xmm10,xmm9 - ror ebx,7 - paddd xmm9,xmm2 - add ebp,eax - pxor xmm3,xmm8 - add edx,DWORD[52+rsp] - xor edi,ebx - mov esi,ebp - rol ebp,5 - movdqa xmm8,xmm3 - add edx,edi - xor esi,ebx - movdqa XMMWORD[32+rsp],xmm9 - ror eax,7 - add edx,ebp - add ecx,DWORD[56+rsp] - pslld xmm3,2 - xor esi,eax - mov edi,edx - psrld xmm8,30 - rol edx,5 - add ecx,esi - xor edi,eax - ror ebp,7 - por xmm3,xmm8 - add ecx,edx - add ebx,DWORD[60+rsp] - xor edi,ebp - mov esi,ecx - rol ecx,5 - add ebx,edi - xor esi,ebp - ror edx,7 - add ebx,ecx - add eax,DWORD[rsp] - xor esi,edx - mov edi,ebx - rol ebx,5 - paddd xmm10,xmm3 - add eax,esi - xor edi,edx - movdqa XMMWORD[48+rsp],xmm10 - ror ecx,7 - add eax,ebx - add ebp,DWORD[4+rsp] - xor edi,ecx - mov esi,eax - rol eax,5 - add ebp,edi - xor esi,ecx - ror ebx,7 - add ebp,eax - add edx,DWORD[8+rsp] - xor esi,ebx - mov edi,ebp - rol ebp,5 - add edx,esi - xor edi,ebx - ror eax,7 - add edx,ebp - add ecx,DWORD[12+rsp] - xor edi,eax - mov esi,edx - rol edx,5 - add ecx,edi - xor esi,eax - ror ebp,7 - add ecx,edx - cmp r9,r10 - je NEAR $L$done_ssse3 - movdqa xmm6,XMMWORD[64+r14] - movdqa xmm9,XMMWORD[((-64))+r14] - movdqu xmm0,XMMWORD[r9] - movdqu xmm1,XMMWORD[16+r9] - movdqu xmm2,XMMWORD[32+r9] - movdqu xmm3,XMMWORD[48+r9] -DB 102,15,56,0,198 - add r9,64 - add ebx,DWORD[16+rsp] - xor esi,ebp - mov edi,ecx -DB 102,15,56,0,206 - rol ecx,5 - add ebx,esi - xor edi,ebp - ror edx,7 - paddd xmm0,xmm9 - add ebx,ecx - add eax,DWORD[20+rsp] - xor edi,edx - mov esi,ebx - movdqa XMMWORD[rsp],xmm0 - rol ebx,5 - add eax,edi - xor esi,edx - ror ecx,7 - psubd xmm0,xmm9 - add eax,ebx - add ebp,DWORD[24+rsp] - xor esi,ecx - mov edi,eax - rol eax,5 - add ebp,esi - xor edi,ecx - ror ebx,7 - add ebp,eax - add edx,DWORD[28+rsp] - xor edi,ebx - mov esi,ebp - rol ebp,5 - add edx,edi - xor esi,ebx - ror eax,7 - add edx,ebp - add ecx,DWORD[32+rsp] - xor esi,eax - mov edi,edx -DB 102,15,56,0,214 - rol edx,5 - add ecx,esi - xor edi,eax - ror ebp,7 - paddd xmm1,xmm9 - add ecx,edx - add ebx,DWORD[36+rsp] - xor edi,ebp - mov esi,ecx - movdqa XMMWORD[16+rsp],xmm1 - rol ecx,5 - add ebx,edi - xor esi,ebp - ror edx,7 - psubd xmm1,xmm9 - add ebx,ecx - add eax,DWORD[40+rsp] - xor esi,edx - mov edi,ebx - rol ebx,5 - add eax,esi - xor edi,edx - ror ecx,7 - add eax,ebx - add ebp,DWORD[44+rsp] - xor edi,ecx - mov esi,eax - rol eax,5 - add ebp,edi - xor esi,ecx - ror ebx,7 - add ebp,eax - add edx,DWORD[48+rsp] - xor esi,ebx - mov edi,ebp -DB 102,15,56,0,222 - rol ebp,5 - add edx,esi - xor edi,ebx - ror eax,7 - paddd xmm2,xmm9 - add edx,ebp - add ecx,DWORD[52+rsp] - xor edi,eax - mov esi,edx - movdqa XMMWORD[32+rsp],xmm2 - rol edx,5 - add ecx,edi - xor esi,eax - ror ebp,7 - psubd xmm2,xmm9 - add ecx,edx - add ebx,DWORD[56+rsp] - xor esi,ebp - mov edi,ecx - rol ecx,5 - add ebx,esi - xor edi,ebp - ror edx,7 - add ebx,ecx - add eax,DWORD[60+rsp] - xor edi,edx - mov esi,ebx - rol ebx,5 - add eax,edi - ror ecx,7 - add eax,ebx - add eax,DWORD[r8] - add esi,DWORD[4+r8] - add ecx,DWORD[8+r8] - add edx,DWORD[12+r8] - mov DWORD[r8],eax - add ebp,DWORD[16+r8] - mov DWORD[4+r8],esi - mov ebx,esi - mov DWORD[8+r8],ecx - mov edi,ecx - mov DWORD[12+r8],edx - xor edi,edx - mov DWORD[16+r8],ebp - and esi,edi - jmp NEAR $L$oop_ssse3 - -ALIGN 16 -$L$done_ssse3: - add ebx,DWORD[16+rsp] - xor esi,ebp - mov edi,ecx - rol ecx,5 - add ebx,esi - xor edi,ebp - ror edx,7 - add ebx,ecx - add eax,DWORD[20+rsp] - xor edi,edx - mov esi,ebx - rol ebx,5 - add eax,edi - xor esi,edx - ror ecx,7 - add eax,ebx - add ebp,DWORD[24+rsp] - xor esi,ecx - mov edi,eax - rol eax,5 - add ebp,esi - xor edi,ecx - ror ebx,7 - add ebp,eax - add edx,DWORD[28+rsp] - xor edi,ebx - mov esi,ebp - rol ebp,5 - add edx,edi - xor esi,ebx - ror eax,7 - add edx,ebp - add ecx,DWORD[32+rsp] - xor esi,eax - mov edi,edx - rol edx,5 - add ecx,esi - xor edi,eax - ror ebp,7 - add ecx,edx - add ebx,DWORD[36+rsp] - xor edi,ebp - mov esi,ecx - rol ecx,5 - add ebx,edi - xor esi,ebp - ror edx,7 - add ebx,ecx - add eax,DWORD[40+rsp] - xor esi,edx - mov edi,ebx - rol ebx,5 - add eax,esi - xor edi,edx - ror ecx,7 - add eax,ebx - add ebp,DWORD[44+rsp] - xor edi,ecx - mov esi,eax - rol eax,5 - add ebp,edi - xor esi,ecx - ror ebx,7 - add ebp,eax - add edx,DWORD[48+rsp] - xor esi,ebx - mov edi,ebp - rol ebp,5 - add edx,esi - xor edi,ebx - ror eax,7 - add edx,ebp - add ecx,DWORD[52+rsp] - xor edi,eax - mov esi,edx - rol edx,5 - add ecx,edi - xor esi,eax - ror ebp,7 - add ecx,edx - add ebx,DWORD[56+rsp] - xor esi,ebp - mov edi,ecx - rol ecx,5 - add ebx,esi - xor edi,ebp - ror edx,7 - add ebx,ecx - add eax,DWORD[60+rsp] - xor edi,edx - mov esi,ebx - rol ebx,5 - add eax,edi - ror ecx,7 - add eax,ebx - add eax,DWORD[r8] - add esi,DWORD[4+r8] - add ecx,DWORD[8+r8] - mov DWORD[r8],eax - add edx,DWORD[12+r8] - mov DWORD[4+r8],esi - add ebp,DWORD[16+r8] - mov DWORD[8+r8],ecx - mov DWORD[12+r8],edx - mov DWORD[16+r8],ebp - movaps xmm6,XMMWORD[((-40-96))+r11] - movaps xmm7,XMMWORD[((-40-80))+r11] - movaps xmm8,XMMWORD[((-40-64))+r11] - movaps xmm9,XMMWORD[((-40-48))+r11] - movaps xmm10,XMMWORD[((-40-32))+r11] - movaps xmm11,XMMWORD[((-40-16))+r11] - mov r14,QWORD[((-40))+r11] - - mov r13,QWORD[((-32))+r11] - - mov r12,QWORD[((-24))+r11] - - mov rbp,QWORD[((-16))+r11] - - mov rbx,QWORD[((-8))+r11] - - lea rsp,[r11] - -$L$epilogue_ssse3: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha1_block_data_order_ssse3: - -ALIGN 16 -sha1_block_data_order_avx: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha1_block_data_order_avx: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - -_avx_shortcut: - - mov r11,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - lea rsp,[((-160))+rsp] - vzeroupper - vmovaps XMMWORD[(-40-96)+r11],xmm6 - vmovaps XMMWORD[(-40-80)+r11],xmm7 - vmovaps XMMWORD[(-40-64)+r11],xmm8 - vmovaps XMMWORD[(-40-48)+r11],xmm9 - vmovaps XMMWORD[(-40-32)+r11],xmm10 - vmovaps XMMWORD[(-40-16)+r11],xmm11 -$L$prologue_avx: - and rsp,-64 - mov r8,rdi - mov r9,rsi - mov r10,rdx - - shl r10,6 - add r10,r9 - lea r14,[((K_XX_XX+64))] - - mov eax,DWORD[r8] - mov ebx,DWORD[4+r8] - mov ecx,DWORD[8+r8] - mov edx,DWORD[12+r8] - mov esi,ebx - mov ebp,DWORD[16+r8] - mov edi,ecx - xor edi,edx - and esi,edi - - vmovdqa xmm6,XMMWORD[64+r14] - vmovdqa xmm11,XMMWORD[((-64))+r14] - vmovdqu xmm0,XMMWORD[r9] - vmovdqu xmm1,XMMWORD[16+r9] - vmovdqu xmm2,XMMWORD[32+r9] - vmovdqu xmm3,XMMWORD[48+r9] - vpshufb xmm0,xmm0,xmm6 - add r9,64 - vpshufb xmm1,xmm1,xmm6 - vpshufb xmm2,xmm2,xmm6 - vpshufb xmm3,xmm3,xmm6 - vpaddd xmm4,xmm0,xmm11 - vpaddd xmm5,xmm1,xmm11 - vpaddd xmm6,xmm2,xmm11 - vmovdqa XMMWORD[rsp],xmm4 - vmovdqa XMMWORD[16+rsp],xmm5 - vmovdqa XMMWORD[32+rsp],xmm6 - jmp NEAR $L$oop_avx -ALIGN 16 -$L$oop_avx: - shrd ebx,ebx,2 - xor esi,edx - vpalignr xmm4,xmm1,xmm0,8 - mov edi,eax - add ebp,DWORD[rsp] - vpaddd xmm9,xmm11,xmm3 - xor ebx,ecx - shld eax,eax,5 - vpsrldq xmm8,xmm3,4 - add ebp,esi - and edi,ebx - vpxor xmm4,xmm4,xmm0 - xor ebx,ecx - add ebp,eax - vpxor xmm8,xmm8,xmm2 - shrd eax,eax,7 - xor edi,ecx - mov esi,ebp - add edx,DWORD[4+rsp] - vpxor xmm4,xmm4,xmm8 - xor eax,ebx - shld ebp,ebp,5 - vmovdqa XMMWORD[48+rsp],xmm9 - add edx,edi - and esi,eax - vpsrld xmm8,xmm4,31 - xor eax,ebx - add edx,ebp - shrd ebp,ebp,7 - xor esi,ebx - vpslldq xmm10,xmm4,12 - vpaddd xmm4,xmm4,xmm4 - mov edi,edx - add ecx,DWORD[8+rsp] - xor ebp,eax - shld edx,edx,5 - vpsrld xmm9,xmm10,30 - vpor xmm4,xmm4,xmm8 - add ecx,esi - and edi,ebp - xor ebp,eax - add ecx,edx - vpslld xmm10,xmm10,2 - vpxor xmm4,xmm4,xmm9 - shrd edx,edx,7 - xor edi,eax - mov esi,ecx - add ebx,DWORD[12+rsp] - vpxor xmm4,xmm4,xmm10 - xor edx,ebp - shld ecx,ecx,5 - add ebx,edi - and esi,edx - xor edx,ebp - add ebx,ecx - shrd ecx,ecx,7 - xor esi,ebp - vpalignr xmm5,xmm2,xmm1,8 - mov edi,ebx - add eax,DWORD[16+rsp] - vpaddd xmm9,xmm11,xmm4 - xor ecx,edx - shld ebx,ebx,5 - vpsrldq xmm8,xmm4,4 - add eax,esi - and edi,ecx - vpxor xmm5,xmm5,xmm1 - xor ecx,edx - add eax,ebx - vpxor xmm8,xmm8,xmm3 - shrd ebx,ebx,7 - xor edi,edx - mov esi,eax - add ebp,DWORD[20+rsp] - vpxor xmm5,xmm5,xmm8 - xor ebx,ecx - shld eax,eax,5 - vmovdqa XMMWORD[rsp],xmm9 - add ebp,edi - and esi,ebx - vpsrld xmm8,xmm5,31 - xor ebx,ecx - add ebp,eax - shrd eax,eax,7 - xor esi,ecx - vpslldq xmm10,xmm5,12 - vpaddd xmm5,xmm5,xmm5 - mov edi,ebp - add edx,DWORD[24+rsp] - xor eax,ebx - shld ebp,ebp,5 - vpsrld xmm9,xmm10,30 - vpor xmm5,xmm5,xmm8 - add edx,esi - and edi,eax - xor eax,ebx - add edx,ebp - vpslld xmm10,xmm10,2 - vpxor xmm5,xmm5,xmm9 - shrd ebp,ebp,7 - xor edi,ebx - mov esi,edx - add ecx,DWORD[28+rsp] - vpxor xmm5,xmm5,xmm10 - xor ebp,eax - shld edx,edx,5 - vmovdqa xmm11,XMMWORD[((-32))+r14] - add ecx,edi - and esi,ebp - xor ebp,eax - add ecx,edx - shrd edx,edx,7 - xor esi,eax - vpalignr xmm6,xmm3,xmm2,8 - mov edi,ecx - add ebx,DWORD[32+rsp] - vpaddd xmm9,xmm11,xmm5 - xor edx,ebp - shld ecx,ecx,5 - vpsrldq xmm8,xmm5,4 - add ebx,esi - and edi,edx - vpxor xmm6,xmm6,xmm2 - xor edx,ebp - add ebx,ecx - vpxor xmm8,xmm8,xmm4 - shrd ecx,ecx,7 - xor edi,ebp - mov esi,ebx - add eax,DWORD[36+rsp] - vpxor xmm6,xmm6,xmm8 - xor ecx,edx - shld ebx,ebx,5 - vmovdqa XMMWORD[16+rsp],xmm9 - add eax,edi - and esi,ecx - vpsrld xmm8,xmm6,31 - xor ecx,edx - add eax,ebx - shrd ebx,ebx,7 - xor esi,edx - vpslldq xmm10,xmm6,12 - vpaddd xmm6,xmm6,xmm6 - mov edi,eax - add ebp,DWORD[40+rsp] - xor ebx,ecx - shld eax,eax,5 - vpsrld xmm9,xmm10,30 - vpor xmm6,xmm6,xmm8 - add ebp,esi - and edi,ebx - xor ebx,ecx - add ebp,eax - vpslld xmm10,xmm10,2 - vpxor xmm6,xmm6,xmm9 - shrd eax,eax,7 - xor edi,ecx - mov esi,ebp - add edx,DWORD[44+rsp] - vpxor xmm6,xmm6,xmm10 - xor eax,ebx - shld ebp,ebp,5 - add edx,edi - and esi,eax - xor eax,ebx - add edx,ebp - shrd ebp,ebp,7 - xor esi,ebx - vpalignr xmm7,xmm4,xmm3,8 - mov edi,edx - add ecx,DWORD[48+rsp] - vpaddd xmm9,xmm11,xmm6 - xor ebp,eax - shld edx,edx,5 - vpsrldq xmm8,xmm6,4 - add ecx,esi - and edi,ebp - vpxor xmm7,xmm7,xmm3 - xor ebp,eax - add ecx,edx - vpxor xmm8,xmm8,xmm5 - shrd edx,edx,7 - xor edi,eax - mov esi,ecx - add ebx,DWORD[52+rsp] - vpxor xmm7,xmm7,xmm8 - xor edx,ebp - shld ecx,ecx,5 - vmovdqa XMMWORD[32+rsp],xmm9 - add ebx,edi - and esi,edx - vpsrld xmm8,xmm7,31 - xor edx,ebp - add ebx,ecx - shrd ecx,ecx,7 - xor esi,ebp - vpslldq xmm10,xmm7,12 - vpaddd xmm7,xmm7,xmm7 - mov edi,ebx - add eax,DWORD[56+rsp] - xor ecx,edx - shld ebx,ebx,5 - vpsrld xmm9,xmm10,30 - vpor xmm7,xmm7,xmm8 - add eax,esi - and edi,ecx - xor ecx,edx - add eax,ebx - vpslld xmm10,xmm10,2 - vpxor xmm7,xmm7,xmm9 - shrd ebx,ebx,7 - xor edi,edx - mov esi,eax - add ebp,DWORD[60+rsp] - vpxor xmm7,xmm7,xmm10 - xor ebx,ecx - shld eax,eax,5 - add ebp,edi - and esi,ebx - xor ebx,ecx - add ebp,eax - vpalignr xmm8,xmm7,xmm6,8 - vpxor xmm0,xmm0,xmm4 - shrd eax,eax,7 - xor esi,ecx - mov edi,ebp - add edx,DWORD[rsp] - vpxor xmm0,xmm0,xmm1 - xor eax,ebx - shld ebp,ebp,5 - vpaddd xmm9,xmm11,xmm7 - add edx,esi - and edi,eax - vpxor xmm0,xmm0,xmm8 - xor eax,ebx - add edx,ebp - shrd ebp,ebp,7 - xor edi,ebx - vpsrld xmm8,xmm0,30 - vmovdqa XMMWORD[48+rsp],xmm9 - mov esi,edx - add ecx,DWORD[4+rsp] - xor ebp,eax - shld edx,edx,5 - vpslld xmm0,xmm0,2 - add ecx,edi - and esi,ebp - xor ebp,eax - add ecx,edx - shrd edx,edx,7 - xor esi,eax - mov edi,ecx - add ebx,DWORD[8+rsp] - vpor xmm0,xmm0,xmm8 - xor edx,ebp - shld ecx,ecx,5 - add ebx,esi - and edi,edx - xor edx,ebp - add ebx,ecx - add eax,DWORD[12+rsp] - xor edi,ebp - mov esi,ebx - shld ebx,ebx,5 - add eax,edi - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - vpalignr xmm8,xmm0,xmm7,8 - vpxor xmm1,xmm1,xmm5 - add ebp,DWORD[16+rsp] - xor esi,ecx - mov edi,eax - shld eax,eax,5 - vpxor xmm1,xmm1,xmm2 - add ebp,esi - xor edi,ecx - vpaddd xmm9,xmm11,xmm0 - shrd ebx,ebx,7 - add ebp,eax - vpxor xmm1,xmm1,xmm8 - add edx,DWORD[20+rsp] - xor edi,ebx - mov esi,ebp - shld ebp,ebp,5 - vpsrld xmm8,xmm1,30 - vmovdqa XMMWORD[rsp],xmm9 - add edx,edi - xor esi,ebx - shrd eax,eax,7 - add edx,ebp - vpslld xmm1,xmm1,2 - add ecx,DWORD[24+rsp] - xor esi,eax - mov edi,edx - shld edx,edx,5 - add ecx,esi - xor edi,eax - shrd ebp,ebp,7 - add ecx,edx - vpor xmm1,xmm1,xmm8 - add ebx,DWORD[28+rsp] - xor edi,ebp - mov esi,ecx - shld ecx,ecx,5 - add ebx,edi - xor esi,ebp - shrd edx,edx,7 - add ebx,ecx - vpalignr xmm8,xmm1,xmm0,8 - vpxor xmm2,xmm2,xmm6 - add eax,DWORD[32+rsp] - xor esi,edx - mov edi,ebx - shld ebx,ebx,5 - vpxor xmm2,xmm2,xmm3 - add eax,esi - xor edi,edx - vpaddd xmm9,xmm11,xmm1 - vmovdqa xmm11,XMMWORD[r14] - shrd ecx,ecx,7 - add eax,ebx - vpxor xmm2,xmm2,xmm8 - add ebp,DWORD[36+rsp] - xor edi,ecx - mov esi,eax - shld eax,eax,5 - vpsrld xmm8,xmm2,30 - vmovdqa XMMWORD[16+rsp],xmm9 - add ebp,edi - xor esi,ecx - shrd ebx,ebx,7 - add ebp,eax - vpslld xmm2,xmm2,2 - add edx,DWORD[40+rsp] - xor esi,ebx - mov edi,ebp - shld ebp,ebp,5 - add edx,esi - xor edi,ebx - shrd eax,eax,7 - add edx,ebp - vpor xmm2,xmm2,xmm8 - add ecx,DWORD[44+rsp] - xor edi,eax - mov esi,edx - shld edx,edx,5 - add ecx,edi - xor esi,eax - shrd ebp,ebp,7 - add ecx,edx - vpalignr xmm8,xmm2,xmm1,8 - vpxor xmm3,xmm3,xmm7 - add ebx,DWORD[48+rsp] - xor esi,ebp - mov edi,ecx - shld ecx,ecx,5 - vpxor xmm3,xmm3,xmm4 - add ebx,esi - xor edi,ebp - vpaddd xmm9,xmm11,xmm2 - shrd edx,edx,7 - add ebx,ecx - vpxor xmm3,xmm3,xmm8 - add eax,DWORD[52+rsp] - xor edi,edx - mov esi,ebx - shld ebx,ebx,5 - vpsrld xmm8,xmm3,30 - vmovdqa XMMWORD[32+rsp],xmm9 - add eax,edi - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - vpslld xmm3,xmm3,2 - add ebp,DWORD[56+rsp] - xor esi,ecx - mov edi,eax - shld eax,eax,5 - add ebp,esi - xor edi,ecx - shrd ebx,ebx,7 - add ebp,eax - vpor xmm3,xmm3,xmm8 - add edx,DWORD[60+rsp] - xor edi,ebx - mov esi,ebp - shld ebp,ebp,5 - add edx,edi - xor esi,ebx - shrd eax,eax,7 - add edx,ebp - vpalignr xmm8,xmm3,xmm2,8 - vpxor xmm4,xmm4,xmm0 - add ecx,DWORD[rsp] - xor esi,eax - mov edi,edx - shld edx,edx,5 - vpxor xmm4,xmm4,xmm5 - add ecx,esi - xor edi,eax - vpaddd xmm9,xmm11,xmm3 - shrd ebp,ebp,7 - add ecx,edx - vpxor xmm4,xmm4,xmm8 - add ebx,DWORD[4+rsp] - xor edi,ebp - mov esi,ecx - shld ecx,ecx,5 - vpsrld xmm8,xmm4,30 - vmovdqa XMMWORD[48+rsp],xmm9 - add ebx,edi - xor esi,ebp - shrd edx,edx,7 - add ebx,ecx - vpslld xmm4,xmm4,2 - add eax,DWORD[8+rsp] - xor esi,edx - mov edi,ebx - shld ebx,ebx,5 - add eax,esi - xor edi,edx - shrd ecx,ecx,7 - add eax,ebx - vpor xmm4,xmm4,xmm8 - add ebp,DWORD[12+rsp] - xor edi,ecx - mov esi,eax - shld eax,eax,5 - add ebp,edi - xor esi,ecx - shrd ebx,ebx,7 - add ebp,eax - vpalignr xmm8,xmm4,xmm3,8 - vpxor xmm5,xmm5,xmm1 - add edx,DWORD[16+rsp] - xor esi,ebx - mov edi,ebp - shld ebp,ebp,5 - vpxor xmm5,xmm5,xmm6 - add edx,esi - xor edi,ebx - vpaddd xmm9,xmm11,xmm4 - shrd eax,eax,7 - add edx,ebp - vpxor xmm5,xmm5,xmm8 - add ecx,DWORD[20+rsp] - xor edi,eax - mov esi,edx - shld edx,edx,5 - vpsrld xmm8,xmm5,30 - vmovdqa XMMWORD[rsp],xmm9 - add ecx,edi - xor esi,eax - shrd ebp,ebp,7 - add ecx,edx - vpslld xmm5,xmm5,2 - add ebx,DWORD[24+rsp] - xor esi,ebp - mov edi,ecx - shld ecx,ecx,5 - add ebx,esi - xor edi,ebp - shrd edx,edx,7 - add ebx,ecx - vpor xmm5,xmm5,xmm8 - add eax,DWORD[28+rsp] - shrd ecx,ecx,7 - mov esi,ebx - xor edi,edx - shld ebx,ebx,5 - add eax,edi - xor esi,ecx - xor ecx,edx - add eax,ebx - vpalignr xmm8,xmm5,xmm4,8 - vpxor xmm6,xmm6,xmm2 - add ebp,DWORD[32+rsp] - and esi,ecx - xor ecx,edx - shrd ebx,ebx,7 - vpxor xmm6,xmm6,xmm7 - mov edi,eax - xor esi,ecx - vpaddd xmm9,xmm11,xmm5 - shld eax,eax,5 - add ebp,esi - vpxor xmm6,xmm6,xmm8 - xor edi,ebx - xor ebx,ecx - add ebp,eax - add edx,DWORD[36+rsp] - vpsrld xmm8,xmm6,30 - vmovdqa XMMWORD[16+rsp],xmm9 - and edi,ebx - xor ebx,ecx - shrd eax,eax,7 - mov esi,ebp - vpslld xmm6,xmm6,2 - xor edi,ebx - shld ebp,ebp,5 - add edx,edi - xor esi,eax - xor eax,ebx - add edx,ebp - add ecx,DWORD[40+rsp] - and esi,eax - vpor xmm6,xmm6,xmm8 - xor eax,ebx - shrd ebp,ebp,7 - mov edi,edx - xor esi,eax - shld edx,edx,5 - add ecx,esi - xor edi,ebp - xor ebp,eax - add ecx,edx - add ebx,DWORD[44+rsp] - and edi,ebp - xor ebp,eax - shrd edx,edx,7 - mov esi,ecx - xor edi,ebp - shld ecx,ecx,5 - add ebx,edi - xor esi,edx - xor edx,ebp - add ebx,ecx - vpalignr xmm8,xmm6,xmm5,8 - vpxor xmm7,xmm7,xmm3 - add eax,DWORD[48+rsp] - and esi,edx - xor edx,ebp - shrd ecx,ecx,7 - vpxor xmm7,xmm7,xmm0 - mov edi,ebx - xor esi,edx - vpaddd xmm9,xmm11,xmm6 - vmovdqa xmm11,XMMWORD[32+r14] - shld ebx,ebx,5 - add eax,esi - vpxor xmm7,xmm7,xmm8 - xor edi,ecx - xor ecx,edx - add eax,ebx - add ebp,DWORD[52+rsp] - vpsrld xmm8,xmm7,30 - vmovdqa XMMWORD[32+rsp],xmm9 - and edi,ecx - xor ecx,edx - shrd ebx,ebx,7 - mov esi,eax - vpslld xmm7,xmm7,2 - xor edi,ecx - shld eax,eax,5 - add ebp,edi - xor esi,ebx - xor ebx,ecx - add ebp,eax - add edx,DWORD[56+rsp] - and esi,ebx - vpor xmm7,xmm7,xmm8 - xor ebx,ecx - shrd eax,eax,7 - mov edi,ebp - xor esi,ebx - shld ebp,ebp,5 - add edx,esi - xor edi,eax - xor eax,ebx - add edx,ebp - add ecx,DWORD[60+rsp] - and edi,eax - xor eax,ebx - shrd ebp,ebp,7 - mov esi,edx - xor edi,eax - shld edx,edx,5 - add ecx,edi - xor esi,ebp - xor ebp,eax - add ecx,edx - vpalignr xmm8,xmm7,xmm6,8 - vpxor xmm0,xmm0,xmm4 - add ebx,DWORD[rsp] - and esi,ebp - xor ebp,eax - shrd edx,edx,7 - vpxor xmm0,xmm0,xmm1 - mov edi,ecx - xor esi,ebp - vpaddd xmm9,xmm11,xmm7 - shld ecx,ecx,5 - add ebx,esi - vpxor xmm0,xmm0,xmm8 - xor edi,edx - xor edx,ebp - add ebx,ecx - add eax,DWORD[4+rsp] - vpsrld xmm8,xmm0,30 - vmovdqa XMMWORD[48+rsp],xmm9 - and edi,edx - xor edx,ebp - shrd ecx,ecx,7 - mov esi,ebx - vpslld xmm0,xmm0,2 - xor edi,edx - shld ebx,ebx,5 - add eax,edi - xor esi,ecx - xor ecx,edx - add eax,ebx - add ebp,DWORD[8+rsp] - and esi,ecx - vpor xmm0,xmm0,xmm8 - xor ecx,edx - shrd ebx,ebx,7 - mov edi,eax - xor esi,ecx - shld eax,eax,5 - add ebp,esi - xor edi,ebx - xor ebx,ecx - add ebp,eax - add edx,DWORD[12+rsp] - and edi,ebx - xor ebx,ecx - shrd eax,eax,7 - mov esi,ebp - xor edi,ebx - shld ebp,ebp,5 - add edx,edi - xor esi,eax - xor eax,ebx - add edx,ebp - vpalignr xmm8,xmm0,xmm7,8 - vpxor xmm1,xmm1,xmm5 - add ecx,DWORD[16+rsp] - and esi,eax - xor eax,ebx - shrd ebp,ebp,7 - vpxor xmm1,xmm1,xmm2 - mov edi,edx - xor esi,eax - vpaddd xmm9,xmm11,xmm0 - shld edx,edx,5 - add ecx,esi - vpxor xmm1,xmm1,xmm8 - xor edi,ebp - xor ebp,eax - add ecx,edx - add ebx,DWORD[20+rsp] - vpsrld xmm8,xmm1,30 - vmovdqa XMMWORD[rsp],xmm9 - and edi,ebp - xor ebp,eax - shrd edx,edx,7 - mov esi,ecx - vpslld xmm1,xmm1,2 - xor edi,ebp - shld ecx,ecx,5 - add ebx,edi - xor esi,edx - xor edx,ebp - add ebx,ecx - add eax,DWORD[24+rsp] - and esi,edx - vpor xmm1,xmm1,xmm8 - xor edx,ebp - shrd ecx,ecx,7 - mov edi,ebx - xor esi,edx - shld ebx,ebx,5 - add eax,esi - xor edi,ecx - xor ecx,edx - add eax,ebx - add ebp,DWORD[28+rsp] - and edi,ecx - xor ecx,edx - shrd ebx,ebx,7 - mov esi,eax - xor edi,ecx - shld eax,eax,5 - add ebp,edi - xor esi,ebx - xor ebx,ecx - add ebp,eax - vpalignr xmm8,xmm1,xmm0,8 - vpxor xmm2,xmm2,xmm6 - add edx,DWORD[32+rsp] - and esi,ebx - xor ebx,ecx - shrd eax,eax,7 - vpxor xmm2,xmm2,xmm3 - mov edi,ebp - xor esi,ebx - vpaddd xmm9,xmm11,xmm1 - shld ebp,ebp,5 - add edx,esi - vpxor xmm2,xmm2,xmm8 - xor edi,eax - xor eax,ebx - add edx,ebp - add ecx,DWORD[36+rsp] - vpsrld xmm8,xmm2,30 - vmovdqa XMMWORD[16+rsp],xmm9 - and edi,eax - xor eax,ebx - shrd ebp,ebp,7 - mov esi,edx - vpslld xmm2,xmm2,2 - xor edi,eax - shld edx,edx,5 - add ecx,edi - xor esi,ebp - xor ebp,eax - add ecx,edx - add ebx,DWORD[40+rsp] - and esi,ebp - vpor xmm2,xmm2,xmm8 - xor ebp,eax - shrd edx,edx,7 - mov edi,ecx - xor esi,ebp - shld ecx,ecx,5 - add ebx,esi - xor edi,edx - xor edx,ebp - add ebx,ecx - add eax,DWORD[44+rsp] - and edi,edx - xor edx,ebp - shrd ecx,ecx,7 - mov esi,ebx - xor edi,edx - shld ebx,ebx,5 - add eax,edi - xor esi,edx - add eax,ebx - vpalignr xmm8,xmm2,xmm1,8 - vpxor xmm3,xmm3,xmm7 - add ebp,DWORD[48+rsp] - xor esi,ecx - mov edi,eax - shld eax,eax,5 - vpxor xmm3,xmm3,xmm4 - add ebp,esi - xor edi,ecx - vpaddd xmm9,xmm11,xmm2 - shrd ebx,ebx,7 - add ebp,eax - vpxor xmm3,xmm3,xmm8 - add edx,DWORD[52+rsp] - xor edi,ebx - mov esi,ebp - shld ebp,ebp,5 - vpsrld xmm8,xmm3,30 - vmovdqa XMMWORD[32+rsp],xmm9 - add edx,edi - xor esi,ebx - shrd eax,eax,7 - add edx,ebp - vpslld xmm3,xmm3,2 - add ecx,DWORD[56+rsp] - xor esi,eax - mov edi,edx - shld edx,edx,5 - add ecx,esi - xor edi,eax - shrd ebp,ebp,7 - add ecx,edx - vpor xmm3,xmm3,xmm8 - add ebx,DWORD[60+rsp] - xor edi,ebp - mov esi,ecx - shld ecx,ecx,5 - add ebx,edi - xor esi,ebp - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD[rsp] - vpaddd xmm9,xmm11,xmm3 - xor esi,edx - mov edi,ebx - shld ebx,ebx,5 - add eax,esi - vmovdqa XMMWORD[48+rsp],xmm9 - xor edi,edx - shrd ecx,ecx,7 - add eax,ebx - add ebp,DWORD[4+rsp] - xor edi,ecx - mov esi,eax - shld eax,eax,5 - add ebp,edi - xor esi,ecx - shrd ebx,ebx,7 - add ebp,eax - add edx,DWORD[8+rsp] - xor esi,ebx - mov edi,ebp - shld ebp,ebp,5 - add edx,esi - xor edi,ebx - shrd eax,eax,7 - add edx,ebp - add ecx,DWORD[12+rsp] - xor edi,eax - mov esi,edx - shld edx,edx,5 - add ecx,edi - xor esi,eax - shrd ebp,ebp,7 - add ecx,edx - cmp r9,r10 - je NEAR $L$done_avx - vmovdqa xmm6,XMMWORD[64+r14] - vmovdqa xmm11,XMMWORD[((-64))+r14] - vmovdqu xmm0,XMMWORD[r9] - vmovdqu xmm1,XMMWORD[16+r9] - vmovdqu xmm2,XMMWORD[32+r9] - vmovdqu xmm3,XMMWORD[48+r9] - vpshufb xmm0,xmm0,xmm6 - add r9,64 - add ebx,DWORD[16+rsp] - xor esi,ebp - vpshufb xmm1,xmm1,xmm6 - mov edi,ecx - shld ecx,ecx,5 - vpaddd xmm4,xmm0,xmm11 - add ebx,esi - xor edi,ebp - shrd edx,edx,7 - add ebx,ecx - vmovdqa XMMWORD[rsp],xmm4 - add eax,DWORD[20+rsp] - xor edi,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,edi - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - add ebp,DWORD[24+rsp] - xor esi,ecx - mov edi,eax - shld eax,eax,5 - add ebp,esi - xor edi,ecx - shrd ebx,ebx,7 - add ebp,eax - add edx,DWORD[28+rsp] - xor edi,ebx - mov esi,ebp - shld ebp,ebp,5 - add edx,edi - xor esi,ebx - shrd eax,eax,7 - add edx,ebp - add ecx,DWORD[32+rsp] - xor esi,eax - vpshufb xmm2,xmm2,xmm6 - mov edi,edx - shld edx,edx,5 - vpaddd xmm5,xmm1,xmm11 - add ecx,esi - xor edi,eax - shrd ebp,ebp,7 - add ecx,edx - vmovdqa XMMWORD[16+rsp],xmm5 - add ebx,DWORD[36+rsp] - xor edi,ebp - mov esi,ecx - shld ecx,ecx,5 - add ebx,edi - xor esi,ebp - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD[40+rsp] - xor esi,edx - mov edi,ebx - shld ebx,ebx,5 - add eax,esi - xor edi,edx - shrd ecx,ecx,7 - add eax,ebx - add ebp,DWORD[44+rsp] - xor edi,ecx - mov esi,eax - shld eax,eax,5 - add ebp,edi - xor esi,ecx - shrd ebx,ebx,7 - add ebp,eax - add edx,DWORD[48+rsp] - xor esi,ebx - vpshufb xmm3,xmm3,xmm6 - mov edi,ebp - shld ebp,ebp,5 - vpaddd xmm6,xmm2,xmm11 - add edx,esi - xor edi,ebx - shrd eax,eax,7 - add edx,ebp - vmovdqa XMMWORD[32+rsp],xmm6 - add ecx,DWORD[52+rsp] - xor edi,eax - mov esi,edx - shld edx,edx,5 - add ecx,edi - xor esi,eax - shrd ebp,ebp,7 - add ecx,edx - add ebx,DWORD[56+rsp] - xor esi,ebp - mov edi,ecx - shld ecx,ecx,5 - add ebx,esi - xor edi,ebp - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD[60+rsp] - xor edi,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,edi - shrd ecx,ecx,7 - add eax,ebx - add eax,DWORD[r8] - add esi,DWORD[4+r8] - add ecx,DWORD[8+r8] - add edx,DWORD[12+r8] - mov DWORD[r8],eax - add ebp,DWORD[16+r8] - mov DWORD[4+r8],esi - mov ebx,esi - mov DWORD[8+r8],ecx - mov edi,ecx - mov DWORD[12+r8],edx - xor edi,edx - mov DWORD[16+r8],ebp - and esi,edi - jmp NEAR $L$oop_avx - -ALIGN 16 -$L$done_avx: - add ebx,DWORD[16+rsp] - xor esi,ebp - mov edi,ecx - shld ecx,ecx,5 - add ebx,esi - xor edi,ebp - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD[20+rsp] - xor edi,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,edi - xor esi,edx - shrd ecx,ecx,7 - add eax,ebx - add ebp,DWORD[24+rsp] - xor esi,ecx - mov edi,eax - shld eax,eax,5 - add ebp,esi - xor edi,ecx - shrd ebx,ebx,7 - add ebp,eax - add edx,DWORD[28+rsp] - xor edi,ebx - mov esi,ebp - shld ebp,ebp,5 - add edx,edi - xor esi,ebx - shrd eax,eax,7 - add edx,ebp - add ecx,DWORD[32+rsp] - xor esi,eax - mov edi,edx - shld edx,edx,5 - add ecx,esi - xor edi,eax - shrd ebp,ebp,7 - add ecx,edx - add ebx,DWORD[36+rsp] - xor edi,ebp - mov esi,ecx - shld ecx,ecx,5 - add ebx,edi - xor esi,ebp - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD[40+rsp] - xor esi,edx - mov edi,ebx - shld ebx,ebx,5 - add eax,esi - xor edi,edx - shrd ecx,ecx,7 - add eax,ebx - add ebp,DWORD[44+rsp] - xor edi,ecx - mov esi,eax - shld eax,eax,5 - add ebp,edi - xor esi,ecx - shrd ebx,ebx,7 - add ebp,eax - add edx,DWORD[48+rsp] - xor esi,ebx - mov edi,ebp - shld ebp,ebp,5 - add edx,esi - xor edi,ebx - shrd eax,eax,7 - add edx,ebp - add ecx,DWORD[52+rsp] - xor edi,eax - mov esi,edx - shld edx,edx,5 - add ecx,edi - xor esi,eax - shrd ebp,ebp,7 - add ecx,edx - add ebx,DWORD[56+rsp] - xor esi,ebp - mov edi,ecx - shld ecx,ecx,5 - add ebx,esi - xor edi,ebp - shrd edx,edx,7 - add ebx,ecx - add eax,DWORD[60+rsp] - xor edi,edx - mov esi,ebx - shld ebx,ebx,5 - add eax,edi - shrd ecx,ecx,7 - add eax,ebx - vzeroupper - - add eax,DWORD[r8] - add esi,DWORD[4+r8] - add ecx,DWORD[8+r8] - mov DWORD[r8],eax - add edx,DWORD[12+r8] - mov DWORD[4+r8],esi - add ebp,DWORD[16+r8] - mov DWORD[8+r8],ecx - mov DWORD[12+r8],edx - mov DWORD[16+r8],ebp - movaps xmm6,XMMWORD[((-40-96))+r11] - movaps xmm7,XMMWORD[((-40-80))+r11] - movaps xmm8,XMMWORD[((-40-64))+r11] - movaps xmm9,XMMWORD[((-40-48))+r11] - movaps xmm10,XMMWORD[((-40-32))+r11] - movaps xmm11,XMMWORD[((-40-16))+r11] - mov r14,QWORD[((-40))+r11] - - mov r13,QWORD[((-32))+r11] - - mov r12,QWORD[((-24))+r11] - - mov rbp,QWORD[((-16))+r11] - - mov rbx,QWORD[((-8))+r11] - - lea rsp,[r11] - -$L$epilogue_avx: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha1_block_data_order_avx: -ALIGN 64 -K_XX_XX: - DD 0x5a827999,0x5a827999,0x5a827999,0x5a827999 - DD 0x5a827999,0x5a827999,0x5a827999,0x5a827999 - DD 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 - DD 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 - DD 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc - DD 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc - DD 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 - DD 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 - DD 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f - DD 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f -DB 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0 -DB 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 -DB 102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44 -DB 32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60 -DB 97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114 -DB 103,62,0 -ALIGN 64 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - lea r10,[$L$prologue] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - lea r10,[$L$epilogue] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov rax,QWORD[64+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - - jmp NEAR $L$common_seh_tail - - -ALIGN 16 -ssse3_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[208+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea rsi,[((-40-96))+rax] - lea rdi,[512+r8] - mov ecx,12 - DD 0xa548f3fc - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_sha1_block_data_order wrt ..imagebase - DD $L$SEH_end_sha1_block_data_order wrt ..imagebase - DD $L$SEH_info_sha1_block_data_order wrt ..imagebase - DD $L$SEH_begin_sha1_block_data_order_ssse3 wrt ..imagebase - DD $L$SEH_end_sha1_block_data_order_ssse3 wrt ..imagebase - DD $L$SEH_info_sha1_block_data_order_ssse3 wrt ..imagebase - DD $L$SEH_begin_sha1_block_data_order_avx wrt ..imagebase - DD $L$SEH_end_sha1_block_data_order_avx wrt ..imagebase - DD $L$SEH_info_sha1_block_data_order_avx wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_sha1_block_data_order: -DB 9,0,0,0 - DD se_handler wrt ..imagebase -$L$SEH_info_sha1_block_data_order_ssse3: -DB 9,0,0,0 - DD ssse3_handler wrt ..imagebase - DD $L$prologue_ssse3 wrt ..imagebase,$L$epilogue_ssse3 wrt ..imagebase -$L$SEH_info_sha1_block_data_order_avx: -DB 9,0,0,0 - DD ssse3_handler wrt ..imagebase - DD $L$prologue_avx wrt ..imagebase,$L$epilogue_avx wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha256-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha256-x86_64.asm deleted file mode 100644 index 68c74cc1b9..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha256-x86_64.asm +++ /dev/null @@ -1,4142 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -EXTERN OPENSSL_ia32cap_P -global sha256_block_data_order - -ALIGN 16 -sha256_block_data_order: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha256_block_data_order: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea r11,[OPENSSL_ia32cap_P] - mov r9d,DWORD[r11] - mov r10d,DWORD[4+r11] - mov r11d,DWORD[8+r11] - and r9d,1073741824 - and r10d,268435968 - or r10d,r9d - cmp r10d,1342177792 - je NEAR $L$avx_shortcut - test r10d,512 - jnz NEAR $L$ssse3_shortcut - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - shl rdx,4 - sub rsp,16*4+4*8 - lea rdx,[rdx*4+rsi] - and rsp,-64 - mov QWORD[((64+0))+rsp],rdi - mov QWORD[((64+8))+rsp],rsi - mov QWORD[((64+16))+rsp],rdx - mov QWORD[88+rsp],rax - -$L$prologue: - - mov eax,DWORD[rdi] - mov ebx,DWORD[4+rdi] - mov ecx,DWORD[8+rdi] - mov edx,DWORD[12+rdi] - mov r8d,DWORD[16+rdi] - mov r9d,DWORD[20+rdi] - mov r10d,DWORD[24+rdi] - mov r11d,DWORD[28+rdi] - jmp NEAR $L$loop - -ALIGN 16 -$L$loop: - mov edi,ebx - lea rbp,[K256] - xor edi,ecx - mov r12d,DWORD[rsi] - mov r13d,r8d - mov r14d,eax - bswap r12d - ror r13d,14 - mov r15d,r9d - - xor r13d,r8d - ror r14d,9 - xor r15d,r10d - - mov DWORD[rsp],r12d - xor r14d,eax - and r15d,r8d - - ror r13d,5 - add r12d,r11d - xor r15d,r10d - - ror r14d,11 - xor r13d,r8d - add r12d,r15d - - mov r15d,eax - add r12d,DWORD[rbp] - xor r14d,eax - - xor r15d,ebx - ror r13d,6 - mov r11d,ebx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r11d,edi - add edx,r12d - add r11d,r12d - - lea rbp,[4+rbp] - add r11d,r14d - mov r12d,DWORD[4+rsi] - mov r13d,edx - mov r14d,r11d - bswap r12d - ror r13d,14 - mov edi,r8d - - xor r13d,edx - ror r14d,9 - xor edi,r9d - - mov DWORD[4+rsp],r12d - xor r14d,r11d - and edi,edx - - ror r13d,5 - add r12d,r10d - xor edi,r9d - - ror r14d,11 - xor r13d,edx - add r12d,edi - - mov edi,r11d - add r12d,DWORD[rbp] - xor r14d,r11d - - xor edi,eax - ror r13d,6 - mov r10d,eax - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r10d,r15d - add ecx,r12d - add r10d,r12d - - lea rbp,[4+rbp] - add r10d,r14d - mov r12d,DWORD[8+rsi] - mov r13d,ecx - mov r14d,r10d - bswap r12d - ror r13d,14 - mov r15d,edx - - xor r13d,ecx - ror r14d,9 - xor r15d,r8d - - mov DWORD[8+rsp],r12d - xor r14d,r10d - and r15d,ecx - - ror r13d,5 - add r12d,r9d - xor r15d,r8d - - ror r14d,11 - xor r13d,ecx - add r12d,r15d - - mov r15d,r10d - add r12d,DWORD[rbp] - xor r14d,r10d - - xor r15d,r11d - ror r13d,6 - mov r9d,r11d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r9d,edi - add ebx,r12d - add r9d,r12d - - lea rbp,[4+rbp] - add r9d,r14d - mov r12d,DWORD[12+rsi] - mov r13d,ebx - mov r14d,r9d - bswap r12d - ror r13d,14 - mov edi,ecx - - xor r13d,ebx - ror r14d,9 - xor edi,edx - - mov DWORD[12+rsp],r12d - xor r14d,r9d - and edi,ebx - - ror r13d,5 - add r12d,r8d - xor edi,edx - - ror r14d,11 - xor r13d,ebx - add r12d,edi - - mov edi,r9d - add r12d,DWORD[rbp] - xor r14d,r9d - - xor edi,r10d - ror r13d,6 - mov r8d,r10d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r8d,r15d - add eax,r12d - add r8d,r12d - - lea rbp,[20+rbp] - add r8d,r14d - mov r12d,DWORD[16+rsi] - mov r13d,eax - mov r14d,r8d - bswap r12d - ror r13d,14 - mov r15d,ebx - - xor r13d,eax - ror r14d,9 - xor r15d,ecx - - mov DWORD[16+rsp],r12d - xor r14d,r8d - and r15d,eax - - ror r13d,5 - add r12d,edx - xor r15d,ecx - - ror r14d,11 - xor r13d,eax - add r12d,r15d - - mov r15d,r8d - add r12d,DWORD[rbp] - xor r14d,r8d - - xor r15d,r9d - ror r13d,6 - mov edx,r9d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor edx,edi - add r11d,r12d - add edx,r12d - - lea rbp,[4+rbp] - add edx,r14d - mov r12d,DWORD[20+rsi] - mov r13d,r11d - mov r14d,edx - bswap r12d - ror r13d,14 - mov edi,eax - - xor r13d,r11d - ror r14d,9 - xor edi,ebx - - mov DWORD[20+rsp],r12d - xor r14d,edx - and edi,r11d - - ror r13d,5 - add r12d,ecx - xor edi,ebx - - ror r14d,11 - xor r13d,r11d - add r12d,edi - - mov edi,edx - add r12d,DWORD[rbp] - xor r14d,edx - - xor edi,r8d - ror r13d,6 - mov ecx,r8d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor ecx,r15d - add r10d,r12d - add ecx,r12d - - lea rbp,[4+rbp] - add ecx,r14d - mov r12d,DWORD[24+rsi] - mov r13d,r10d - mov r14d,ecx - bswap r12d - ror r13d,14 - mov r15d,r11d - - xor r13d,r10d - ror r14d,9 - xor r15d,eax - - mov DWORD[24+rsp],r12d - xor r14d,ecx - and r15d,r10d - - ror r13d,5 - add r12d,ebx - xor r15d,eax - - ror r14d,11 - xor r13d,r10d - add r12d,r15d - - mov r15d,ecx - add r12d,DWORD[rbp] - xor r14d,ecx - - xor r15d,edx - ror r13d,6 - mov ebx,edx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor ebx,edi - add r9d,r12d - add ebx,r12d - - lea rbp,[4+rbp] - add ebx,r14d - mov r12d,DWORD[28+rsi] - mov r13d,r9d - mov r14d,ebx - bswap r12d - ror r13d,14 - mov edi,r10d - - xor r13d,r9d - ror r14d,9 - xor edi,r11d - - mov DWORD[28+rsp],r12d - xor r14d,ebx - and edi,r9d - - ror r13d,5 - add r12d,eax - xor edi,r11d - - ror r14d,11 - xor r13d,r9d - add r12d,edi - - mov edi,ebx - add r12d,DWORD[rbp] - xor r14d,ebx - - xor edi,ecx - ror r13d,6 - mov eax,ecx - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor eax,r15d - add r8d,r12d - add eax,r12d - - lea rbp,[20+rbp] - add eax,r14d - mov r12d,DWORD[32+rsi] - mov r13d,r8d - mov r14d,eax - bswap r12d - ror r13d,14 - mov r15d,r9d - - xor r13d,r8d - ror r14d,9 - xor r15d,r10d - - mov DWORD[32+rsp],r12d - xor r14d,eax - and r15d,r8d - - ror r13d,5 - add r12d,r11d - xor r15d,r10d - - ror r14d,11 - xor r13d,r8d - add r12d,r15d - - mov r15d,eax - add r12d,DWORD[rbp] - xor r14d,eax - - xor r15d,ebx - ror r13d,6 - mov r11d,ebx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r11d,edi - add edx,r12d - add r11d,r12d - - lea rbp,[4+rbp] - add r11d,r14d - mov r12d,DWORD[36+rsi] - mov r13d,edx - mov r14d,r11d - bswap r12d - ror r13d,14 - mov edi,r8d - - xor r13d,edx - ror r14d,9 - xor edi,r9d - - mov DWORD[36+rsp],r12d - xor r14d,r11d - and edi,edx - - ror r13d,5 - add r12d,r10d - xor edi,r9d - - ror r14d,11 - xor r13d,edx - add r12d,edi - - mov edi,r11d - add r12d,DWORD[rbp] - xor r14d,r11d - - xor edi,eax - ror r13d,6 - mov r10d,eax - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r10d,r15d - add ecx,r12d - add r10d,r12d - - lea rbp,[4+rbp] - add r10d,r14d - mov r12d,DWORD[40+rsi] - mov r13d,ecx - mov r14d,r10d - bswap r12d - ror r13d,14 - mov r15d,edx - - xor r13d,ecx - ror r14d,9 - xor r15d,r8d - - mov DWORD[40+rsp],r12d - xor r14d,r10d - and r15d,ecx - - ror r13d,5 - add r12d,r9d - xor r15d,r8d - - ror r14d,11 - xor r13d,ecx - add r12d,r15d - - mov r15d,r10d - add r12d,DWORD[rbp] - xor r14d,r10d - - xor r15d,r11d - ror r13d,6 - mov r9d,r11d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r9d,edi - add ebx,r12d - add r9d,r12d - - lea rbp,[4+rbp] - add r9d,r14d - mov r12d,DWORD[44+rsi] - mov r13d,ebx - mov r14d,r9d - bswap r12d - ror r13d,14 - mov edi,ecx - - xor r13d,ebx - ror r14d,9 - xor edi,edx - - mov DWORD[44+rsp],r12d - xor r14d,r9d - and edi,ebx - - ror r13d,5 - add r12d,r8d - xor edi,edx - - ror r14d,11 - xor r13d,ebx - add r12d,edi - - mov edi,r9d - add r12d,DWORD[rbp] - xor r14d,r9d - - xor edi,r10d - ror r13d,6 - mov r8d,r10d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r8d,r15d - add eax,r12d - add r8d,r12d - - lea rbp,[20+rbp] - add r8d,r14d - mov r12d,DWORD[48+rsi] - mov r13d,eax - mov r14d,r8d - bswap r12d - ror r13d,14 - mov r15d,ebx - - xor r13d,eax - ror r14d,9 - xor r15d,ecx - - mov DWORD[48+rsp],r12d - xor r14d,r8d - and r15d,eax - - ror r13d,5 - add r12d,edx - xor r15d,ecx - - ror r14d,11 - xor r13d,eax - add r12d,r15d - - mov r15d,r8d - add r12d,DWORD[rbp] - xor r14d,r8d - - xor r15d,r9d - ror r13d,6 - mov edx,r9d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor edx,edi - add r11d,r12d - add edx,r12d - - lea rbp,[4+rbp] - add edx,r14d - mov r12d,DWORD[52+rsi] - mov r13d,r11d - mov r14d,edx - bswap r12d - ror r13d,14 - mov edi,eax - - xor r13d,r11d - ror r14d,9 - xor edi,ebx - - mov DWORD[52+rsp],r12d - xor r14d,edx - and edi,r11d - - ror r13d,5 - add r12d,ecx - xor edi,ebx - - ror r14d,11 - xor r13d,r11d - add r12d,edi - - mov edi,edx - add r12d,DWORD[rbp] - xor r14d,edx - - xor edi,r8d - ror r13d,6 - mov ecx,r8d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor ecx,r15d - add r10d,r12d - add ecx,r12d - - lea rbp,[4+rbp] - add ecx,r14d - mov r12d,DWORD[56+rsi] - mov r13d,r10d - mov r14d,ecx - bswap r12d - ror r13d,14 - mov r15d,r11d - - xor r13d,r10d - ror r14d,9 - xor r15d,eax - - mov DWORD[56+rsp],r12d - xor r14d,ecx - and r15d,r10d - - ror r13d,5 - add r12d,ebx - xor r15d,eax - - ror r14d,11 - xor r13d,r10d - add r12d,r15d - - mov r15d,ecx - add r12d,DWORD[rbp] - xor r14d,ecx - - xor r15d,edx - ror r13d,6 - mov ebx,edx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor ebx,edi - add r9d,r12d - add ebx,r12d - - lea rbp,[4+rbp] - add ebx,r14d - mov r12d,DWORD[60+rsi] - mov r13d,r9d - mov r14d,ebx - bswap r12d - ror r13d,14 - mov edi,r10d - - xor r13d,r9d - ror r14d,9 - xor edi,r11d - - mov DWORD[60+rsp],r12d - xor r14d,ebx - and edi,r9d - - ror r13d,5 - add r12d,eax - xor edi,r11d - - ror r14d,11 - xor r13d,r9d - add r12d,edi - - mov edi,ebx - add r12d,DWORD[rbp] - xor r14d,ebx - - xor edi,ecx - ror r13d,6 - mov eax,ecx - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor eax,r15d - add r8d,r12d - add eax,r12d - - lea rbp,[20+rbp] - jmp NEAR $L$rounds_16_xx -ALIGN 16 -$L$rounds_16_xx: - mov r13d,DWORD[4+rsp] - mov r15d,DWORD[56+rsp] - - mov r12d,r13d - ror r13d,11 - add eax,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[36+rsp] - - add r12d,DWORD[rsp] - mov r13d,r8d - add r12d,r15d - mov r14d,eax - ror r13d,14 - mov r15d,r9d - - xor r13d,r8d - ror r14d,9 - xor r15d,r10d - - mov DWORD[rsp],r12d - xor r14d,eax - and r15d,r8d - - ror r13d,5 - add r12d,r11d - xor r15d,r10d - - ror r14d,11 - xor r13d,r8d - add r12d,r15d - - mov r15d,eax - add r12d,DWORD[rbp] - xor r14d,eax - - xor r15d,ebx - ror r13d,6 - mov r11d,ebx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r11d,edi - add edx,r12d - add r11d,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[8+rsp] - mov edi,DWORD[60+rsp] - - mov r12d,r13d - ror r13d,11 - add r11d,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[40+rsp] - - add r12d,DWORD[4+rsp] - mov r13d,edx - add r12d,edi - mov r14d,r11d - ror r13d,14 - mov edi,r8d - - xor r13d,edx - ror r14d,9 - xor edi,r9d - - mov DWORD[4+rsp],r12d - xor r14d,r11d - and edi,edx - - ror r13d,5 - add r12d,r10d - xor edi,r9d - - ror r14d,11 - xor r13d,edx - add r12d,edi - - mov edi,r11d - add r12d,DWORD[rbp] - xor r14d,r11d - - xor edi,eax - ror r13d,6 - mov r10d,eax - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r10d,r15d - add ecx,r12d - add r10d,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[12+rsp] - mov r15d,DWORD[rsp] - - mov r12d,r13d - ror r13d,11 - add r10d,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[44+rsp] - - add r12d,DWORD[8+rsp] - mov r13d,ecx - add r12d,r15d - mov r14d,r10d - ror r13d,14 - mov r15d,edx - - xor r13d,ecx - ror r14d,9 - xor r15d,r8d - - mov DWORD[8+rsp],r12d - xor r14d,r10d - and r15d,ecx - - ror r13d,5 - add r12d,r9d - xor r15d,r8d - - ror r14d,11 - xor r13d,ecx - add r12d,r15d - - mov r15d,r10d - add r12d,DWORD[rbp] - xor r14d,r10d - - xor r15d,r11d - ror r13d,6 - mov r9d,r11d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r9d,edi - add ebx,r12d - add r9d,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[16+rsp] - mov edi,DWORD[4+rsp] - - mov r12d,r13d - ror r13d,11 - add r9d,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[48+rsp] - - add r12d,DWORD[12+rsp] - mov r13d,ebx - add r12d,edi - mov r14d,r9d - ror r13d,14 - mov edi,ecx - - xor r13d,ebx - ror r14d,9 - xor edi,edx - - mov DWORD[12+rsp],r12d - xor r14d,r9d - and edi,ebx - - ror r13d,5 - add r12d,r8d - xor edi,edx - - ror r14d,11 - xor r13d,ebx - add r12d,edi - - mov edi,r9d - add r12d,DWORD[rbp] - xor r14d,r9d - - xor edi,r10d - ror r13d,6 - mov r8d,r10d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r8d,r15d - add eax,r12d - add r8d,r12d - - lea rbp,[20+rbp] - mov r13d,DWORD[20+rsp] - mov r15d,DWORD[8+rsp] - - mov r12d,r13d - ror r13d,11 - add r8d,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[52+rsp] - - add r12d,DWORD[16+rsp] - mov r13d,eax - add r12d,r15d - mov r14d,r8d - ror r13d,14 - mov r15d,ebx - - xor r13d,eax - ror r14d,9 - xor r15d,ecx - - mov DWORD[16+rsp],r12d - xor r14d,r8d - and r15d,eax - - ror r13d,5 - add r12d,edx - xor r15d,ecx - - ror r14d,11 - xor r13d,eax - add r12d,r15d - - mov r15d,r8d - add r12d,DWORD[rbp] - xor r14d,r8d - - xor r15d,r9d - ror r13d,6 - mov edx,r9d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor edx,edi - add r11d,r12d - add edx,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[24+rsp] - mov edi,DWORD[12+rsp] - - mov r12d,r13d - ror r13d,11 - add edx,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[56+rsp] - - add r12d,DWORD[20+rsp] - mov r13d,r11d - add r12d,edi - mov r14d,edx - ror r13d,14 - mov edi,eax - - xor r13d,r11d - ror r14d,9 - xor edi,ebx - - mov DWORD[20+rsp],r12d - xor r14d,edx - and edi,r11d - - ror r13d,5 - add r12d,ecx - xor edi,ebx - - ror r14d,11 - xor r13d,r11d - add r12d,edi - - mov edi,edx - add r12d,DWORD[rbp] - xor r14d,edx - - xor edi,r8d - ror r13d,6 - mov ecx,r8d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor ecx,r15d - add r10d,r12d - add ecx,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[28+rsp] - mov r15d,DWORD[16+rsp] - - mov r12d,r13d - ror r13d,11 - add ecx,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[60+rsp] - - add r12d,DWORD[24+rsp] - mov r13d,r10d - add r12d,r15d - mov r14d,ecx - ror r13d,14 - mov r15d,r11d - - xor r13d,r10d - ror r14d,9 - xor r15d,eax - - mov DWORD[24+rsp],r12d - xor r14d,ecx - and r15d,r10d - - ror r13d,5 - add r12d,ebx - xor r15d,eax - - ror r14d,11 - xor r13d,r10d - add r12d,r15d - - mov r15d,ecx - add r12d,DWORD[rbp] - xor r14d,ecx - - xor r15d,edx - ror r13d,6 - mov ebx,edx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor ebx,edi - add r9d,r12d - add ebx,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[32+rsp] - mov edi,DWORD[20+rsp] - - mov r12d,r13d - ror r13d,11 - add ebx,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[rsp] - - add r12d,DWORD[28+rsp] - mov r13d,r9d - add r12d,edi - mov r14d,ebx - ror r13d,14 - mov edi,r10d - - xor r13d,r9d - ror r14d,9 - xor edi,r11d - - mov DWORD[28+rsp],r12d - xor r14d,ebx - and edi,r9d - - ror r13d,5 - add r12d,eax - xor edi,r11d - - ror r14d,11 - xor r13d,r9d - add r12d,edi - - mov edi,ebx - add r12d,DWORD[rbp] - xor r14d,ebx - - xor edi,ecx - ror r13d,6 - mov eax,ecx - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor eax,r15d - add r8d,r12d - add eax,r12d - - lea rbp,[20+rbp] - mov r13d,DWORD[36+rsp] - mov r15d,DWORD[24+rsp] - - mov r12d,r13d - ror r13d,11 - add eax,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[4+rsp] - - add r12d,DWORD[32+rsp] - mov r13d,r8d - add r12d,r15d - mov r14d,eax - ror r13d,14 - mov r15d,r9d - - xor r13d,r8d - ror r14d,9 - xor r15d,r10d - - mov DWORD[32+rsp],r12d - xor r14d,eax - and r15d,r8d - - ror r13d,5 - add r12d,r11d - xor r15d,r10d - - ror r14d,11 - xor r13d,r8d - add r12d,r15d - - mov r15d,eax - add r12d,DWORD[rbp] - xor r14d,eax - - xor r15d,ebx - ror r13d,6 - mov r11d,ebx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r11d,edi - add edx,r12d - add r11d,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[40+rsp] - mov edi,DWORD[28+rsp] - - mov r12d,r13d - ror r13d,11 - add r11d,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[8+rsp] - - add r12d,DWORD[36+rsp] - mov r13d,edx - add r12d,edi - mov r14d,r11d - ror r13d,14 - mov edi,r8d - - xor r13d,edx - ror r14d,9 - xor edi,r9d - - mov DWORD[36+rsp],r12d - xor r14d,r11d - and edi,edx - - ror r13d,5 - add r12d,r10d - xor edi,r9d - - ror r14d,11 - xor r13d,edx - add r12d,edi - - mov edi,r11d - add r12d,DWORD[rbp] - xor r14d,r11d - - xor edi,eax - ror r13d,6 - mov r10d,eax - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r10d,r15d - add ecx,r12d - add r10d,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[44+rsp] - mov r15d,DWORD[32+rsp] - - mov r12d,r13d - ror r13d,11 - add r10d,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[12+rsp] - - add r12d,DWORD[40+rsp] - mov r13d,ecx - add r12d,r15d - mov r14d,r10d - ror r13d,14 - mov r15d,edx - - xor r13d,ecx - ror r14d,9 - xor r15d,r8d - - mov DWORD[40+rsp],r12d - xor r14d,r10d - and r15d,ecx - - ror r13d,5 - add r12d,r9d - xor r15d,r8d - - ror r14d,11 - xor r13d,ecx - add r12d,r15d - - mov r15d,r10d - add r12d,DWORD[rbp] - xor r14d,r10d - - xor r15d,r11d - ror r13d,6 - mov r9d,r11d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor r9d,edi - add ebx,r12d - add r9d,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[48+rsp] - mov edi,DWORD[36+rsp] - - mov r12d,r13d - ror r13d,11 - add r9d,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[16+rsp] - - add r12d,DWORD[44+rsp] - mov r13d,ebx - add r12d,edi - mov r14d,r9d - ror r13d,14 - mov edi,ecx - - xor r13d,ebx - ror r14d,9 - xor edi,edx - - mov DWORD[44+rsp],r12d - xor r14d,r9d - and edi,ebx - - ror r13d,5 - add r12d,r8d - xor edi,edx - - ror r14d,11 - xor r13d,ebx - add r12d,edi - - mov edi,r9d - add r12d,DWORD[rbp] - xor r14d,r9d - - xor edi,r10d - ror r13d,6 - mov r8d,r10d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor r8d,r15d - add eax,r12d - add r8d,r12d - - lea rbp,[20+rbp] - mov r13d,DWORD[52+rsp] - mov r15d,DWORD[40+rsp] - - mov r12d,r13d - ror r13d,11 - add r8d,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[20+rsp] - - add r12d,DWORD[48+rsp] - mov r13d,eax - add r12d,r15d - mov r14d,r8d - ror r13d,14 - mov r15d,ebx - - xor r13d,eax - ror r14d,9 - xor r15d,ecx - - mov DWORD[48+rsp],r12d - xor r14d,r8d - and r15d,eax - - ror r13d,5 - add r12d,edx - xor r15d,ecx - - ror r14d,11 - xor r13d,eax - add r12d,r15d - - mov r15d,r8d - add r12d,DWORD[rbp] - xor r14d,r8d - - xor r15d,r9d - ror r13d,6 - mov edx,r9d - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor edx,edi - add r11d,r12d - add edx,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[56+rsp] - mov edi,DWORD[44+rsp] - - mov r12d,r13d - ror r13d,11 - add edx,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[24+rsp] - - add r12d,DWORD[52+rsp] - mov r13d,r11d - add r12d,edi - mov r14d,edx - ror r13d,14 - mov edi,eax - - xor r13d,r11d - ror r14d,9 - xor edi,ebx - - mov DWORD[52+rsp],r12d - xor r14d,edx - and edi,r11d - - ror r13d,5 - add r12d,ecx - xor edi,ebx - - ror r14d,11 - xor r13d,r11d - add r12d,edi - - mov edi,edx - add r12d,DWORD[rbp] - xor r14d,edx - - xor edi,r8d - ror r13d,6 - mov ecx,r8d - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor ecx,r15d - add r10d,r12d - add ecx,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[60+rsp] - mov r15d,DWORD[48+rsp] - - mov r12d,r13d - ror r13d,11 - add ecx,r14d - mov r14d,r15d - ror r15d,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor r15d,r14d - shr r14d,10 - - ror r15d,17 - xor r12d,r13d - xor r15d,r14d - add r12d,DWORD[28+rsp] - - add r12d,DWORD[56+rsp] - mov r13d,r10d - add r12d,r15d - mov r14d,ecx - ror r13d,14 - mov r15d,r11d - - xor r13d,r10d - ror r14d,9 - xor r15d,eax - - mov DWORD[56+rsp],r12d - xor r14d,ecx - and r15d,r10d - - ror r13d,5 - add r12d,ebx - xor r15d,eax - - ror r14d,11 - xor r13d,r10d - add r12d,r15d - - mov r15d,ecx - add r12d,DWORD[rbp] - xor r14d,ecx - - xor r15d,edx - ror r13d,6 - mov ebx,edx - - and edi,r15d - ror r14d,2 - add r12d,r13d - - xor ebx,edi - add r9d,r12d - add ebx,r12d - - lea rbp,[4+rbp] - mov r13d,DWORD[rsp] - mov edi,DWORD[52+rsp] - - mov r12d,r13d - ror r13d,11 - add ebx,r14d - mov r14d,edi - ror edi,2 - - xor r13d,r12d - shr r12d,3 - ror r13d,7 - xor edi,r14d - shr r14d,10 - - ror edi,17 - xor r12d,r13d - xor edi,r14d - add r12d,DWORD[32+rsp] - - add r12d,DWORD[60+rsp] - mov r13d,r9d - add r12d,edi - mov r14d,ebx - ror r13d,14 - mov edi,r10d - - xor r13d,r9d - ror r14d,9 - xor edi,r11d - - mov DWORD[60+rsp],r12d - xor r14d,ebx - and edi,r9d - - ror r13d,5 - add r12d,eax - xor edi,r11d - - ror r14d,11 - xor r13d,r9d - add r12d,edi - - mov edi,ebx - add r12d,DWORD[rbp] - xor r14d,ebx - - xor edi,ecx - ror r13d,6 - mov eax,ecx - - and r15d,edi - ror r14d,2 - add r12d,r13d - - xor eax,r15d - add r8d,r12d - add eax,r12d - - lea rbp,[20+rbp] - cmp BYTE[3+rbp],0 - jnz NEAR $L$rounds_16_xx - - mov rdi,QWORD[((64+0))+rsp] - add eax,r14d - lea rsi,[64+rsi] - - add eax,DWORD[rdi] - add ebx,DWORD[4+rdi] - add ecx,DWORD[8+rdi] - add edx,DWORD[12+rdi] - add r8d,DWORD[16+rdi] - add r9d,DWORD[20+rdi] - add r10d,DWORD[24+rdi] - add r11d,DWORD[28+rdi] - - cmp rsi,QWORD[((64+16))+rsp] - - mov DWORD[rdi],eax - mov DWORD[4+rdi],ebx - mov DWORD[8+rdi],ecx - mov DWORD[12+rdi],edx - mov DWORD[16+rdi],r8d - mov DWORD[20+rdi],r9d - mov DWORD[24+rdi],r10d - mov DWORD[28+rdi],r11d - jb NEAR $L$loop - - mov rsi,QWORD[88+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha256_block_data_order: -ALIGN 64 - -K256: - DD 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - DD 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - DD 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - DD 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - DD 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - DD 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - DD 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - DD 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - DD 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - DD 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - DD 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - DD 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - DD 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - DD 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - DD 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - DD 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - DD 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - DD 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - DD 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - DD 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - DD 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - DD 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - DD 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - DD 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - DD 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - DD 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - DD 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - DD 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - DD 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - DD 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - DD 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - DD 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - - DD 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f - DD 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f - DD 0x03020100,0x0b0a0908,0xffffffff,0xffffffff - DD 0x03020100,0x0b0a0908,0xffffffff,0xffffffff - DD 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 - DD 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 -DB 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 -DB 110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54 -DB 52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 -DB 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 -DB 111,114,103,62,0 - -ALIGN 64 -sha256_block_data_order_ssse3: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha256_block_data_order_ssse3: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$ssse3_shortcut: - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - shl rdx,4 - sub rsp,160 - lea rdx,[rdx*4+rsi] - and rsp,-64 - mov QWORD[((64+0))+rsp],rdi - mov QWORD[((64+8))+rsp],rsi - mov QWORD[((64+16))+rsp],rdx - mov QWORD[88+rsp],rax - - movaps XMMWORD[(64+32)+rsp],xmm6 - movaps XMMWORD[(64+48)+rsp],xmm7 - movaps XMMWORD[(64+64)+rsp],xmm8 - movaps XMMWORD[(64+80)+rsp],xmm9 -$L$prologue_ssse3: - - mov eax,DWORD[rdi] - mov ebx,DWORD[4+rdi] - mov ecx,DWORD[8+rdi] - mov edx,DWORD[12+rdi] - mov r8d,DWORD[16+rdi] - mov r9d,DWORD[20+rdi] - mov r10d,DWORD[24+rdi] - mov r11d,DWORD[28+rdi] - - - jmp NEAR $L$loop_ssse3 -ALIGN 16 -$L$loop_ssse3: - movdqa xmm7,XMMWORD[((K256+512))] - movdqu xmm0,XMMWORD[rsi] - movdqu xmm1,XMMWORD[16+rsi] - movdqu xmm2,XMMWORD[32+rsi] -DB 102,15,56,0,199 - movdqu xmm3,XMMWORD[48+rsi] - lea rbp,[K256] -DB 102,15,56,0,207 - movdqa xmm4,XMMWORD[rbp] - movdqa xmm5,XMMWORD[32+rbp] -DB 102,15,56,0,215 - paddd xmm4,xmm0 - movdqa xmm6,XMMWORD[64+rbp] -DB 102,15,56,0,223 - movdqa xmm7,XMMWORD[96+rbp] - paddd xmm5,xmm1 - paddd xmm6,xmm2 - paddd xmm7,xmm3 - movdqa XMMWORD[rsp],xmm4 - mov r14d,eax - movdqa XMMWORD[16+rsp],xmm5 - mov edi,ebx - movdqa XMMWORD[32+rsp],xmm6 - xor edi,ecx - movdqa XMMWORD[48+rsp],xmm7 - mov r13d,r8d - jmp NEAR $L$ssse3_00_47 - -ALIGN 16 -$L$ssse3_00_47: - sub rbp,-128 - ror r13d,14 - movdqa xmm4,xmm1 - mov eax,r14d - mov r12d,r9d - movdqa xmm7,xmm3 - ror r14d,9 - xor r13d,r8d - xor r12d,r10d - ror r13d,5 - xor r14d,eax -DB 102,15,58,15,224,4 - and r12d,r8d - xor r13d,r8d -DB 102,15,58,15,250,4 - add r11d,DWORD[rsp] - mov r15d,eax - xor r12d,r10d - ror r14d,11 - movdqa xmm5,xmm4 - xor r15d,ebx - add r11d,r12d - movdqa xmm6,xmm4 - ror r13d,6 - and edi,r15d - psrld xmm4,3 - xor r14d,eax - add r11d,r13d - xor edi,ebx - paddd xmm0,xmm7 - ror r14d,2 - add edx,r11d - psrld xmm6,7 - add r11d,edi - mov r13d,edx - pshufd xmm7,xmm3,250 - add r14d,r11d - ror r13d,14 - pslld xmm5,14 - mov r11d,r14d - mov r12d,r8d - pxor xmm4,xmm6 - ror r14d,9 - xor r13d,edx - xor r12d,r9d - ror r13d,5 - psrld xmm6,11 - xor r14d,r11d - pxor xmm4,xmm5 - and r12d,edx - xor r13d,edx - pslld xmm5,11 - add r10d,DWORD[4+rsp] - mov edi,r11d - pxor xmm4,xmm6 - xor r12d,r9d - ror r14d,11 - movdqa xmm6,xmm7 - xor edi,eax - add r10d,r12d - pxor xmm4,xmm5 - ror r13d,6 - and r15d,edi - xor r14d,r11d - psrld xmm7,10 - add r10d,r13d - xor r15d,eax - paddd xmm0,xmm4 - ror r14d,2 - add ecx,r10d - psrlq xmm6,17 - add r10d,r15d - mov r13d,ecx - add r14d,r10d - pxor xmm7,xmm6 - ror r13d,14 - mov r10d,r14d - mov r12d,edx - ror r14d,9 - psrlq xmm6,2 - xor r13d,ecx - xor r12d,r8d - pxor xmm7,xmm6 - ror r13d,5 - xor r14d,r10d - and r12d,ecx - pshufd xmm7,xmm7,128 - xor r13d,ecx - add r9d,DWORD[8+rsp] - mov r15d,r10d - psrldq xmm7,8 - xor r12d,r8d - ror r14d,11 - xor r15d,r11d - add r9d,r12d - ror r13d,6 - paddd xmm0,xmm7 - and edi,r15d - xor r14d,r10d - add r9d,r13d - pshufd xmm7,xmm0,80 - xor edi,r11d - ror r14d,2 - add ebx,r9d - movdqa xmm6,xmm7 - add r9d,edi - mov r13d,ebx - psrld xmm7,10 - add r14d,r9d - ror r13d,14 - psrlq xmm6,17 - mov r9d,r14d - mov r12d,ecx - pxor xmm7,xmm6 - ror r14d,9 - xor r13d,ebx - xor r12d,edx - ror r13d,5 - xor r14d,r9d - psrlq xmm6,2 - and r12d,ebx - xor r13d,ebx - add r8d,DWORD[12+rsp] - pxor xmm7,xmm6 - mov edi,r9d - xor r12d,edx - ror r14d,11 - pshufd xmm7,xmm7,8 - xor edi,r10d - add r8d,r12d - movdqa xmm6,XMMWORD[rbp] - ror r13d,6 - and r15d,edi - pslldq xmm7,8 - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - paddd xmm0,xmm7 - ror r14d,2 - add eax,r8d - add r8d,r15d - paddd xmm6,xmm0 - mov r13d,eax - add r14d,r8d - movdqa XMMWORD[rsp],xmm6 - ror r13d,14 - movdqa xmm4,xmm2 - mov r8d,r14d - mov r12d,ebx - movdqa xmm7,xmm0 - ror r14d,9 - xor r13d,eax - xor r12d,ecx - ror r13d,5 - xor r14d,r8d -DB 102,15,58,15,225,4 - and r12d,eax - xor r13d,eax -DB 102,15,58,15,251,4 - add edx,DWORD[16+rsp] - mov r15d,r8d - xor r12d,ecx - ror r14d,11 - movdqa xmm5,xmm4 - xor r15d,r9d - add edx,r12d - movdqa xmm6,xmm4 - ror r13d,6 - and edi,r15d - psrld xmm4,3 - xor r14d,r8d - add edx,r13d - xor edi,r9d - paddd xmm1,xmm7 - ror r14d,2 - add r11d,edx - psrld xmm6,7 - add edx,edi - mov r13d,r11d - pshufd xmm7,xmm0,250 - add r14d,edx - ror r13d,14 - pslld xmm5,14 - mov edx,r14d - mov r12d,eax - pxor xmm4,xmm6 - ror r14d,9 - xor r13d,r11d - xor r12d,ebx - ror r13d,5 - psrld xmm6,11 - xor r14d,edx - pxor xmm4,xmm5 - and r12d,r11d - xor r13d,r11d - pslld xmm5,11 - add ecx,DWORD[20+rsp] - mov edi,edx - pxor xmm4,xmm6 - xor r12d,ebx - ror r14d,11 - movdqa xmm6,xmm7 - xor edi,r8d - add ecx,r12d - pxor xmm4,xmm5 - ror r13d,6 - and r15d,edi - xor r14d,edx - psrld xmm7,10 - add ecx,r13d - xor r15d,r8d - paddd xmm1,xmm4 - ror r14d,2 - add r10d,ecx - psrlq xmm6,17 - add ecx,r15d - mov r13d,r10d - add r14d,ecx - pxor xmm7,xmm6 - ror r13d,14 - mov ecx,r14d - mov r12d,r11d - ror r14d,9 - psrlq xmm6,2 - xor r13d,r10d - xor r12d,eax - pxor xmm7,xmm6 - ror r13d,5 - xor r14d,ecx - and r12d,r10d - pshufd xmm7,xmm7,128 - xor r13d,r10d - add ebx,DWORD[24+rsp] - mov r15d,ecx - psrldq xmm7,8 - xor r12d,eax - ror r14d,11 - xor r15d,edx - add ebx,r12d - ror r13d,6 - paddd xmm1,xmm7 - and edi,r15d - xor r14d,ecx - add ebx,r13d - pshufd xmm7,xmm1,80 - xor edi,edx - ror r14d,2 - add r9d,ebx - movdqa xmm6,xmm7 - add ebx,edi - mov r13d,r9d - psrld xmm7,10 - add r14d,ebx - ror r13d,14 - psrlq xmm6,17 - mov ebx,r14d - mov r12d,r10d - pxor xmm7,xmm6 - ror r14d,9 - xor r13d,r9d - xor r12d,r11d - ror r13d,5 - xor r14d,ebx - psrlq xmm6,2 - and r12d,r9d - xor r13d,r9d - add eax,DWORD[28+rsp] - pxor xmm7,xmm6 - mov edi,ebx - xor r12d,r11d - ror r14d,11 - pshufd xmm7,xmm7,8 - xor edi,ecx - add eax,r12d - movdqa xmm6,XMMWORD[32+rbp] - ror r13d,6 - and r15d,edi - pslldq xmm7,8 - xor r14d,ebx - add eax,r13d - xor r15d,ecx - paddd xmm1,xmm7 - ror r14d,2 - add r8d,eax - add eax,r15d - paddd xmm6,xmm1 - mov r13d,r8d - add r14d,eax - movdqa XMMWORD[16+rsp],xmm6 - ror r13d,14 - movdqa xmm4,xmm3 - mov eax,r14d - mov r12d,r9d - movdqa xmm7,xmm1 - ror r14d,9 - xor r13d,r8d - xor r12d,r10d - ror r13d,5 - xor r14d,eax -DB 102,15,58,15,226,4 - and r12d,r8d - xor r13d,r8d -DB 102,15,58,15,248,4 - add r11d,DWORD[32+rsp] - mov r15d,eax - xor r12d,r10d - ror r14d,11 - movdqa xmm5,xmm4 - xor r15d,ebx - add r11d,r12d - movdqa xmm6,xmm4 - ror r13d,6 - and edi,r15d - psrld xmm4,3 - xor r14d,eax - add r11d,r13d - xor edi,ebx - paddd xmm2,xmm7 - ror r14d,2 - add edx,r11d - psrld xmm6,7 - add r11d,edi - mov r13d,edx - pshufd xmm7,xmm1,250 - add r14d,r11d - ror r13d,14 - pslld xmm5,14 - mov r11d,r14d - mov r12d,r8d - pxor xmm4,xmm6 - ror r14d,9 - xor r13d,edx - xor r12d,r9d - ror r13d,5 - psrld xmm6,11 - xor r14d,r11d - pxor xmm4,xmm5 - and r12d,edx - xor r13d,edx - pslld xmm5,11 - add r10d,DWORD[36+rsp] - mov edi,r11d - pxor xmm4,xmm6 - xor r12d,r9d - ror r14d,11 - movdqa xmm6,xmm7 - xor edi,eax - add r10d,r12d - pxor xmm4,xmm5 - ror r13d,6 - and r15d,edi - xor r14d,r11d - psrld xmm7,10 - add r10d,r13d - xor r15d,eax - paddd xmm2,xmm4 - ror r14d,2 - add ecx,r10d - psrlq xmm6,17 - add r10d,r15d - mov r13d,ecx - add r14d,r10d - pxor xmm7,xmm6 - ror r13d,14 - mov r10d,r14d - mov r12d,edx - ror r14d,9 - psrlq xmm6,2 - xor r13d,ecx - xor r12d,r8d - pxor xmm7,xmm6 - ror r13d,5 - xor r14d,r10d - and r12d,ecx - pshufd xmm7,xmm7,128 - xor r13d,ecx - add r9d,DWORD[40+rsp] - mov r15d,r10d - psrldq xmm7,8 - xor r12d,r8d - ror r14d,11 - xor r15d,r11d - add r9d,r12d - ror r13d,6 - paddd xmm2,xmm7 - and edi,r15d - xor r14d,r10d - add r9d,r13d - pshufd xmm7,xmm2,80 - xor edi,r11d - ror r14d,2 - add ebx,r9d - movdqa xmm6,xmm7 - add r9d,edi - mov r13d,ebx - psrld xmm7,10 - add r14d,r9d - ror r13d,14 - psrlq xmm6,17 - mov r9d,r14d - mov r12d,ecx - pxor xmm7,xmm6 - ror r14d,9 - xor r13d,ebx - xor r12d,edx - ror r13d,5 - xor r14d,r9d - psrlq xmm6,2 - and r12d,ebx - xor r13d,ebx - add r8d,DWORD[44+rsp] - pxor xmm7,xmm6 - mov edi,r9d - xor r12d,edx - ror r14d,11 - pshufd xmm7,xmm7,8 - xor edi,r10d - add r8d,r12d - movdqa xmm6,XMMWORD[64+rbp] - ror r13d,6 - and r15d,edi - pslldq xmm7,8 - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - paddd xmm2,xmm7 - ror r14d,2 - add eax,r8d - add r8d,r15d - paddd xmm6,xmm2 - mov r13d,eax - add r14d,r8d - movdqa XMMWORD[32+rsp],xmm6 - ror r13d,14 - movdqa xmm4,xmm0 - mov r8d,r14d - mov r12d,ebx - movdqa xmm7,xmm2 - ror r14d,9 - xor r13d,eax - xor r12d,ecx - ror r13d,5 - xor r14d,r8d -DB 102,15,58,15,227,4 - and r12d,eax - xor r13d,eax -DB 102,15,58,15,249,4 - add edx,DWORD[48+rsp] - mov r15d,r8d - xor r12d,ecx - ror r14d,11 - movdqa xmm5,xmm4 - xor r15d,r9d - add edx,r12d - movdqa xmm6,xmm4 - ror r13d,6 - and edi,r15d - psrld xmm4,3 - xor r14d,r8d - add edx,r13d - xor edi,r9d - paddd xmm3,xmm7 - ror r14d,2 - add r11d,edx - psrld xmm6,7 - add edx,edi - mov r13d,r11d - pshufd xmm7,xmm2,250 - add r14d,edx - ror r13d,14 - pslld xmm5,14 - mov edx,r14d - mov r12d,eax - pxor xmm4,xmm6 - ror r14d,9 - xor r13d,r11d - xor r12d,ebx - ror r13d,5 - psrld xmm6,11 - xor r14d,edx - pxor xmm4,xmm5 - and r12d,r11d - xor r13d,r11d - pslld xmm5,11 - add ecx,DWORD[52+rsp] - mov edi,edx - pxor xmm4,xmm6 - xor r12d,ebx - ror r14d,11 - movdqa xmm6,xmm7 - xor edi,r8d - add ecx,r12d - pxor xmm4,xmm5 - ror r13d,6 - and r15d,edi - xor r14d,edx - psrld xmm7,10 - add ecx,r13d - xor r15d,r8d - paddd xmm3,xmm4 - ror r14d,2 - add r10d,ecx - psrlq xmm6,17 - add ecx,r15d - mov r13d,r10d - add r14d,ecx - pxor xmm7,xmm6 - ror r13d,14 - mov ecx,r14d - mov r12d,r11d - ror r14d,9 - psrlq xmm6,2 - xor r13d,r10d - xor r12d,eax - pxor xmm7,xmm6 - ror r13d,5 - xor r14d,ecx - and r12d,r10d - pshufd xmm7,xmm7,128 - xor r13d,r10d - add ebx,DWORD[56+rsp] - mov r15d,ecx - psrldq xmm7,8 - xor r12d,eax - ror r14d,11 - xor r15d,edx - add ebx,r12d - ror r13d,6 - paddd xmm3,xmm7 - and edi,r15d - xor r14d,ecx - add ebx,r13d - pshufd xmm7,xmm3,80 - xor edi,edx - ror r14d,2 - add r9d,ebx - movdqa xmm6,xmm7 - add ebx,edi - mov r13d,r9d - psrld xmm7,10 - add r14d,ebx - ror r13d,14 - psrlq xmm6,17 - mov ebx,r14d - mov r12d,r10d - pxor xmm7,xmm6 - ror r14d,9 - xor r13d,r9d - xor r12d,r11d - ror r13d,5 - xor r14d,ebx - psrlq xmm6,2 - and r12d,r9d - xor r13d,r9d - add eax,DWORD[60+rsp] - pxor xmm7,xmm6 - mov edi,ebx - xor r12d,r11d - ror r14d,11 - pshufd xmm7,xmm7,8 - xor edi,ecx - add eax,r12d - movdqa xmm6,XMMWORD[96+rbp] - ror r13d,6 - and r15d,edi - pslldq xmm7,8 - xor r14d,ebx - add eax,r13d - xor r15d,ecx - paddd xmm3,xmm7 - ror r14d,2 - add r8d,eax - add eax,r15d - paddd xmm6,xmm3 - mov r13d,r8d - add r14d,eax - movdqa XMMWORD[48+rsp],xmm6 - cmp BYTE[131+rbp],0 - jne NEAR $L$ssse3_00_47 - ror r13d,14 - mov eax,r14d - mov r12d,r9d - ror r14d,9 - xor r13d,r8d - xor r12d,r10d - ror r13d,5 - xor r14d,eax - and r12d,r8d - xor r13d,r8d - add r11d,DWORD[rsp] - mov r15d,eax - xor r12d,r10d - ror r14d,11 - xor r15d,ebx - add r11d,r12d - ror r13d,6 - and edi,r15d - xor r14d,eax - add r11d,r13d - xor edi,ebx - ror r14d,2 - add edx,r11d - add r11d,edi - mov r13d,edx - add r14d,r11d - ror r13d,14 - mov r11d,r14d - mov r12d,r8d - ror r14d,9 - xor r13d,edx - xor r12d,r9d - ror r13d,5 - xor r14d,r11d - and r12d,edx - xor r13d,edx - add r10d,DWORD[4+rsp] - mov edi,r11d - xor r12d,r9d - ror r14d,11 - xor edi,eax - add r10d,r12d - ror r13d,6 - and r15d,edi - xor r14d,r11d - add r10d,r13d - xor r15d,eax - ror r14d,2 - add ecx,r10d - add r10d,r15d - mov r13d,ecx - add r14d,r10d - ror r13d,14 - mov r10d,r14d - mov r12d,edx - ror r14d,9 - xor r13d,ecx - xor r12d,r8d - ror r13d,5 - xor r14d,r10d - and r12d,ecx - xor r13d,ecx - add r9d,DWORD[8+rsp] - mov r15d,r10d - xor r12d,r8d - ror r14d,11 - xor r15d,r11d - add r9d,r12d - ror r13d,6 - and edi,r15d - xor r14d,r10d - add r9d,r13d - xor edi,r11d - ror r14d,2 - add ebx,r9d - add r9d,edi - mov r13d,ebx - add r14d,r9d - ror r13d,14 - mov r9d,r14d - mov r12d,ecx - ror r14d,9 - xor r13d,ebx - xor r12d,edx - ror r13d,5 - xor r14d,r9d - and r12d,ebx - xor r13d,ebx - add r8d,DWORD[12+rsp] - mov edi,r9d - xor r12d,edx - ror r14d,11 - xor edi,r10d - add r8d,r12d - ror r13d,6 - and r15d,edi - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - ror r14d,2 - add eax,r8d - add r8d,r15d - mov r13d,eax - add r14d,r8d - ror r13d,14 - mov r8d,r14d - mov r12d,ebx - ror r14d,9 - xor r13d,eax - xor r12d,ecx - ror r13d,5 - xor r14d,r8d - and r12d,eax - xor r13d,eax - add edx,DWORD[16+rsp] - mov r15d,r8d - xor r12d,ecx - ror r14d,11 - xor r15d,r9d - add edx,r12d - ror r13d,6 - and edi,r15d - xor r14d,r8d - add edx,r13d - xor edi,r9d - ror r14d,2 - add r11d,edx - add edx,edi - mov r13d,r11d - add r14d,edx - ror r13d,14 - mov edx,r14d - mov r12d,eax - ror r14d,9 - xor r13d,r11d - xor r12d,ebx - ror r13d,5 - xor r14d,edx - and r12d,r11d - xor r13d,r11d - add ecx,DWORD[20+rsp] - mov edi,edx - xor r12d,ebx - ror r14d,11 - xor edi,r8d - add ecx,r12d - ror r13d,6 - and r15d,edi - xor r14d,edx - add ecx,r13d - xor r15d,r8d - ror r14d,2 - add r10d,ecx - add ecx,r15d - mov r13d,r10d - add r14d,ecx - ror r13d,14 - mov ecx,r14d - mov r12d,r11d - ror r14d,9 - xor r13d,r10d - xor r12d,eax - ror r13d,5 - xor r14d,ecx - and r12d,r10d - xor r13d,r10d - add ebx,DWORD[24+rsp] - mov r15d,ecx - xor r12d,eax - ror r14d,11 - xor r15d,edx - add ebx,r12d - ror r13d,6 - and edi,r15d - xor r14d,ecx - add ebx,r13d - xor edi,edx - ror r14d,2 - add r9d,ebx - add ebx,edi - mov r13d,r9d - add r14d,ebx - ror r13d,14 - mov ebx,r14d - mov r12d,r10d - ror r14d,9 - xor r13d,r9d - xor r12d,r11d - ror r13d,5 - xor r14d,ebx - and r12d,r9d - xor r13d,r9d - add eax,DWORD[28+rsp] - mov edi,ebx - xor r12d,r11d - ror r14d,11 - xor edi,ecx - add eax,r12d - ror r13d,6 - and r15d,edi - xor r14d,ebx - add eax,r13d - xor r15d,ecx - ror r14d,2 - add r8d,eax - add eax,r15d - mov r13d,r8d - add r14d,eax - ror r13d,14 - mov eax,r14d - mov r12d,r9d - ror r14d,9 - xor r13d,r8d - xor r12d,r10d - ror r13d,5 - xor r14d,eax - and r12d,r8d - xor r13d,r8d - add r11d,DWORD[32+rsp] - mov r15d,eax - xor r12d,r10d - ror r14d,11 - xor r15d,ebx - add r11d,r12d - ror r13d,6 - and edi,r15d - xor r14d,eax - add r11d,r13d - xor edi,ebx - ror r14d,2 - add edx,r11d - add r11d,edi - mov r13d,edx - add r14d,r11d - ror r13d,14 - mov r11d,r14d - mov r12d,r8d - ror r14d,9 - xor r13d,edx - xor r12d,r9d - ror r13d,5 - xor r14d,r11d - and r12d,edx - xor r13d,edx - add r10d,DWORD[36+rsp] - mov edi,r11d - xor r12d,r9d - ror r14d,11 - xor edi,eax - add r10d,r12d - ror r13d,6 - and r15d,edi - xor r14d,r11d - add r10d,r13d - xor r15d,eax - ror r14d,2 - add ecx,r10d - add r10d,r15d - mov r13d,ecx - add r14d,r10d - ror r13d,14 - mov r10d,r14d - mov r12d,edx - ror r14d,9 - xor r13d,ecx - xor r12d,r8d - ror r13d,5 - xor r14d,r10d - and r12d,ecx - xor r13d,ecx - add r9d,DWORD[40+rsp] - mov r15d,r10d - xor r12d,r8d - ror r14d,11 - xor r15d,r11d - add r9d,r12d - ror r13d,6 - and edi,r15d - xor r14d,r10d - add r9d,r13d - xor edi,r11d - ror r14d,2 - add ebx,r9d - add r9d,edi - mov r13d,ebx - add r14d,r9d - ror r13d,14 - mov r9d,r14d - mov r12d,ecx - ror r14d,9 - xor r13d,ebx - xor r12d,edx - ror r13d,5 - xor r14d,r9d - and r12d,ebx - xor r13d,ebx - add r8d,DWORD[44+rsp] - mov edi,r9d - xor r12d,edx - ror r14d,11 - xor edi,r10d - add r8d,r12d - ror r13d,6 - and r15d,edi - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - ror r14d,2 - add eax,r8d - add r8d,r15d - mov r13d,eax - add r14d,r8d - ror r13d,14 - mov r8d,r14d - mov r12d,ebx - ror r14d,9 - xor r13d,eax - xor r12d,ecx - ror r13d,5 - xor r14d,r8d - and r12d,eax - xor r13d,eax - add edx,DWORD[48+rsp] - mov r15d,r8d - xor r12d,ecx - ror r14d,11 - xor r15d,r9d - add edx,r12d - ror r13d,6 - and edi,r15d - xor r14d,r8d - add edx,r13d - xor edi,r9d - ror r14d,2 - add r11d,edx - add edx,edi - mov r13d,r11d - add r14d,edx - ror r13d,14 - mov edx,r14d - mov r12d,eax - ror r14d,9 - xor r13d,r11d - xor r12d,ebx - ror r13d,5 - xor r14d,edx - and r12d,r11d - xor r13d,r11d - add ecx,DWORD[52+rsp] - mov edi,edx - xor r12d,ebx - ror r14d,11 - xor edi,r8d - add ecx,r12d - ror r13d,6 - and r15d,edi - xor r14d,edx - add ecx,r13d - xor r15d,r8d - ror r14d,2 - add r10d,ecx - add ecx,r15d - mov r13d,r10d - add r14d,ecx - ror r13d,14 - mov ecx,r14d - mov r12d,r11d - ror r14d,9 - xor r13d,r10d - xor r12d,eax - ror r13d,5 - xor r14d,ecx - and r12d,r10d - xor r13d,r10d - add ebx,DWORD[56+rsp] - mov r15d,ecx - xor r12d,eax - ror r14d,11 - xor r15d,edx - add ebx,r12d - ror r13d,6 - and edi,r15d - xor r14d,ecx - add ebx,r13d - xor edi,edx - ror r14d,2 - add r9d,ebx - add ebx,edi - mov r13d,r9d - add r14d,ebx - ror r13d,14 - mov ebx,r14d - mov r12d,r10d - ror r14d,9 - xor r13d,r9d - xor r12d,r11d - ror r13d,5 - xor r14d,ebx - and r12d,r9d - xor r13d,r9d - add eax,DWORD[60+rsp] - mov edi,ebx - xor r12d,r11d - ror r14d,11 - xor edi,ecx - add eax,r12d - ror r13d,6 - and r15d,edi - xor r14d,ebx - add eax,r13d - xor r15d,ecx - ror r14d,2 - add r8d,eax - add eax,r15d - mov r13d,r8d - add r14d,eax - mov rdi,QWORD[((64+0))+rsp] - mov eax,r14d - - add eax,DWORD[rdi] - lea rsi,[64+rsi] - add ebx,DWORD[4+rdi] - add ecx,DWORD[8+rdi] - add edx,DWORD[12+rdi] - add r8d,DWORD[16+rdi] - add r9d,DWORD[20+rdi] - add r10d,DWORD[24+rdi] - add r11d,DWORD[28+rdi] - - cmp rsi,QWORD[((64+16))+rsp] - - mov DWORD[rdi],eax - mov DWORD[4+rdi],ebx - mov DWORD[8+rdi],ecx - mov DWORD[12+rdi],edx - mov DWORD[16+rdi],r8d - mov DWORD[20+rdi],r9d - mov DWORD[24+rdi],r10d - mov DWORD[28+rdi],r11d - jb NEAR $L$loop_ssse3 - - mov rsi,QWORD[88+rsp] - - movaps xmm6,XMMWORD[((64+32))+rsp] - movaps xmm7,XMMWORD[((64+48))+rsp] - movaps xmm8,XMMWORD[((64+64))+rsp] - movaps xmm9,XMMWORD[((64+80))+rsp] - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$epilogue_ssse3: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha256_block_data_order_ssse3: - -ALIGN 64 -sha256_block_data_order_avx: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha256_block_data_order_avx: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$avx_shortcut: - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - shl rdx,4 - sub rsp,160 - lea rdx,[rdx*4+rsi] - and rsp,-64 - mov QWORD[((64+0))+rsp],rdi - mov QWORD[((64+8))+rsp],rsi - mov QWORD[((64+16))+rsp],rdx - mov QWORD[88+rsp],rax - - movaps XMMWORD[(64+32)+rsp],xmm6 - movaps XMMWORD[(64+48)+rsp],xmm7 - movaps XMMWORD[(64+64)+rsp],xmm8 - movaps XMMWORD[(64+80)+rsp],xmm9 -$L$prologue_avx: - - vzeroupper - mov eax,DWORD[rdi] - mov ebx,DWORD[4+rdi] - mov ecx,DWORD[8+rdi] - mov edx,DWORD[12+rdi] - mov r8d,DWORD[16+rdi] - mov r9d,DWORD[20+rdi] - mov r10d,DWORD[24+rdi] - mov r11d,DWORD[28+rdi] - vmovdqa xmm8,XMMWORD[((K256+512+32))] - vmovdqa xmm9,XMMWORD[((K256+512+64))] - jmp NEAR $L$loop_avx -ALIGN 16 -$L$loop_avx: - vmovdqa xmm7,XMMWORD[((K256+512))] - vmovdqu xmm0,XMMWORD[rsi] - vmovdqu xmm1,XMMWORD[16+rsi] - vmovdqu xmm2,XMMWORD[32+rsi] - vmovdqu xmm3,XMMWORD[48+rsi] - vpshufb xmm0,xmm0,xmm7 - lea rbp,[K256] - vpshufb xmm1,xmm1,xmm7 - vpshufb xmm2,xmm2,xmm7 - vpaddd xmm4,xmm0,XMMWORD[rbp] - vpshufb xmm3,xmm3,xmm7 - vpaddd xmm5,xmm1,XMMWORD[32+rbp] - vpaddd xmm6,xmm2,XMMWORD[64+rbp] - vpaddd xmm7,xmm3,XMMWORD[96+rbp] - vmovdqa XMMWORD[rsp],xmm4 - mov r14d,eax - vmovdqa XMMWORD[16+rsp],xmm5 - mov edi,ebx - vmovdqa XMMWORD[32+rsp],xmm6 - xor edi,ecx - vmovdqa XMMWORD[48+rsp],xmm7 - mov r13d,r8d - jmp NEAR $L$avx_00_47 - -ALIGN 16 -$L$avx_00_47: - sub rbp,-128 - vpalignr xmm4,xmm1,xmm0,4 - shrd r13d,r13d,14 - mov eax,r14d - mov r12d,r9d - vpalignr xmm7,xmm3,xmm2,4 - shrd r14d,r14d,9 - xor r13d,r8d - xor r12d,r10d - vpsrld xmm6,xmm4,7 - shrd r13d,r13d,5 - xor r14d,eax - and r12d,r8d - vpaddd xmm0,xmm0,xmm7 - xor r13d,r8d - add r11d,DWORD[rsp] - mov r15d,eax - vpsrld xmm7,xmm4,3 - xor r12d,r10d - shrd r14d,r14d,11 - xor r15d,ebx - vpslld xmm5,xmm4,14 - add r11d,r12d - shrd r13d,r13d,6 - and edi,r15d - vpxor xmm4,xmm7,xmm6 - xor r14d,eax - add r11d,r13d - xor edi,ebx - vpshufd xmm7,xmm3,250 - shrd r14d,r14d,2 - add edx,r11d - add r11d,edi - vpsrld xmm6,xmm6,11 - mov r13d,edx - add r14d,r11d - shrd r13d,r13d,14 - vpxor xmm4,xmm4,xmm5 - mov r11d,r14d - mov r12d,r8d - shrd r14d,r14d,9 - vpslld xmm5,xmm5,11 - xor r13d,edx - xor r12d,r9d - shrd r13d,r13d,5 - vpxor xmm4,xmm4,xmm6 - xor r14d,r11d - and r12d,edx - xor r13d,edx - vpsrld xmm6,xmm7,10 - add r10d,DWORD[4+rsp] - mov edi,r11d - xor r12d,r9d - vpxor xmm4,xmm4,xmm5 - shrd r14d,r14d,11 - xor edi,eax - add r10d,r12d - vpsrlq xmm7,xmm7,17 - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r11d - vpaddd xmm0,xmm0,xmm4 - add r10d,r13d - xor r15d,eax - shrd r14d,r14d,2 - vpxor xmm6,xmm6,xmm7 - add ecx,r10d - add r10d,r15d - mov r13d,ecx - vpsrlq xmm7,xmm7,2 - add r14d,r10d - shrd r13d,r13d,14 - mov r10d,r14d - vpxor xmm6,xmm6,xmm7 - mov r12d,edx - shrd r14d,r14d,9 - xor r13d,ecx - vpshufb xmm6,xmm6,xmm8 - xor r12d,r8d - shrd r13d,r13d,5 - xor r14d,r10d - vpaddd xmm0,xmm0,xmm6 - and r12d,ecx - xor r13d,ecx - add r9d,DWORD[8+rsp] - vpshufd xmm7,xmm0,80 - mov r15d,r10d - xor r12d,r8d - shrd r14d,r14d,11 - vpsrld xmm6,xmm7,10 - xor r15d,r11d - add r9d,r12d - shrd r13d,r13d,6 - vpsrlq xmm7,xmm7,17 - and edi,r15d - xor r14d,r10d - add r9d,r13d - vpxor xmm6,xmm6,xmm7 - xor edi,r11d - shrd r14d,r14d,2 - add ebx,r9d - vpsrlq xmm7,xmm7,2 - add r9d,edi - mov r13d,ebx - add r14d,r9d - vpxor xmm6,xmm6,xmm7 - shrd r13d,r13d,14 - mov r9d,r14d - mov r12d,ecx - vpshufb xmm6,xmm6,xmm9 - shrd r14d,r14d,9 - xor r13d,ebx - xor r12d,edx - vpaddd xmm0,xmm0,xmm6 - shrd r13d,r13d,5 - xor r14d,r9d - and r12d,ebx - vpaddd xmm6,xmm0,XMMWORD[rbp] - xor r13d,ebx - add r8d,DWORD[12+rsp] - mov edi,r9d - xor r12d,edx - shrd r14d,r14d,11 - xor edi,r10d - add r8d,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - shrd r14d,r14d,2 - add eax,r8d - add r8d,r15d - mov r13d,eax - add r14d,r8d - vmovdqa XMMWORD[rsp],xmm6 - vpalignr xmm4,xmm2,xmm1,4 - shrd r13d,r13d,14 - mov r8d,r14d - mov r12d,ebx - vpalignr xmm7,xmm0,xmm3,4 - shrd r14d,r14d,9 - xor r13d,eax - xor r12d,ecx - vpsrld xmm6,xmm4,7 - shrd r13d,r13d,5 - xor r14d,r8d - and r12d,eax - vpaddd xmm1,xmm1,xmm7 - xor r13d,eax - add edx,DWORD[16+rsp] - mov r15d,r8d - vpsrld xmm7,xmm4,3 - xor r12d,ecx - shrd r14d,r14d,11 - xor r15d,r9d - vpslld xmm5,xmm4,14 - add edx,r12d - shrd r13d,r13d,6 - and edi,r15d - vpxor xmm4,xmm7,xmm6 - xor r14d,r8d - add edx,r13d - xor edi,r9d - vpshufd xmm7,xmm0,250 - shrd r14d,r14d,2 - add r11d,edx - add edx,edi - vpsrld xmm6,xmm6,11 - mov r13d,r11d - add r14d,edx - shrd r13d,r13d,14 - vpxor xmm4,xmm4,xmm5 - mov edx,r14d - mov r12d,eax - shrd r14d,r14d,9 - vpslld xmm5,xmm5,11 - xor r13d,r11d - xor r12d,ebx - shrd r13d,r13d,5 - vpxor xmm4,xmm4,xmm6 - xor r14d,edx - and r12d,r11d - xor r13d,r11d - vpsrld xmm6,xmm7,10 - add ecx,DWORD[20+rsp] - mov edi,edx - xor r12d,ebx - vpxor xmm4,xmm4,xmm5 - shrd r14d,r14d,11 - xor edi,r8d - add ecx,r12d - vpsrlq xmm7,xmm7,17 - shrd r13d,r13d,6 - and r15d,edi - xor r14d,edx - vpaddd xmm1,xmm1,xmm4 - add ecx,r13d - xor r15d,r8d - shrd r14d,r14d,2 - vpxor xmm6,xmm6,xmm7 - add r10d,ecx - add ecx,r15d - mov r13d,r10d - vpsrlq xmm7,xmm7,2 - add r14d,ecx - shrd r13d,r13d,14 - mov ecx,r14d - vpxor xmm6,xmm6,xmm7 - mov r12d,r11d - shrd r14d,r14d,9 - xor r13d,r10d - vpshufb xmm6,xmm6,xmm8 - xor r12d,eax - shrd r13d,r13d,5 - xor r14d,ecx - vpaddd xmm1,xmm1,xmm6 - and r12d,r10d - xor r13d,r10d - add ebx,DWORD[24+rsp] - vpshufd xmm7,xmm1,80 - mov r15d,ecx - xor r12d,eax - shrd r14d,r14d,11 - vpsrld xmm6,xmm7,10 - xor r15d,edx - add ebx,r12d - shrd r13d,r13d,6 - vpsrlq xmm7,xmm7,17 - and edi,r15d - xor r14d,ecx - add ebx,r13d - vpxor xmm6,xmm6,xmm7 - xor edi,edx - shrd r14d,r14d,2 - add r9d,ebx - vpsrlq xmm7,xmm7,2 - add ebx,edi - mov r13d,r9d - add r14d,ebx - vpxor xmm6,xmm6,xmm7 - shrd r13d,r13d,14 - mov ebx,r14d - mov r12d,r10d - vpshufb xmm6,xmm6,xmm9 - shrd r14d,r14d,9 - xor r13d,r9d - xor r12d,r11d - vpaddd xmm1,xmm1,xmm6 - shrd r13d,r13d,5 - xor r14d,ebx - and r12d,r9d - vpaddd xmm6,xmm1,XMMWORD[32+rbp] - xor r13d,r9d - add eax,DWORD[28+rsp] - mov edi,ebx - xor r12d,r11d - shrd r14d,r14d,11 - xor edi,ecx - add eax,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,ebx - add eax,r13d - xor r15d,ecx - shrd r14d,r14d,2 - add r8d,eax - add eax,r15d - mov r13d,r8d - add r14d,eax - vmovdqa XMMWORD[16+rsp],xmm6 - vpalignr xmm4,xmm3,xmm2,4 - shrd r13d,r13d,14 - mov eax,r14d - mov r12d,r9d - vpalignr xmm7,xmm1,xmm0,4 - shrd r14d,r14d,9 - xor r13d,r8d - xor r12d,r10d - vpsrld xmm6,xmm4,7 - shrd r13d,r13d,5 - xor r14d,eax - and r12d,r8d - vpaddd xmm2,xmm2,xmm7 - xor r13d,r8d - add r11d,DWORD[32+rsp] - mov r15d,eax - vpsrld xmm7,xmm4,3 - xor r12d,r10d - shrd r14d,r14d,11 - xor r15d,ebx - vpslld xmm5,xmm4,14 - add r11d,r12d - shrd r13d,r13d,6 - and edi,r15d - vpxor xmm4,xmm7,xmm6 - xor r14d,eax - add r11d,r13d - xor edi,ebx - vpshufd xmm7,xmm1,250 - shrd r14d,r14d,2 - add edx,r11d - add r11d,edi - vpsrld xmm6,xmm6,11 - mov r13d,edx - add r14d,r11d - shrd r13d,r13d,14 - vpxor xmm4,xmm4,xmm5 - mov r11d,r14d - mov r12d,r8d - shrd r14d,r14d,9 - vpslld xmm5,xmm5,11 - xor r13d,edx - xor r12d,r9d - shrd r13d,r13d,5 - vpxor xmm4,xmm4,xmm6 - xor r14d,r11d - and r12d,edx - xor r13d,edx - vpsrld xmm6,xmm7,10 - add r10d,DWORD[36+rsp] - mov edi,r11d - xor r12d,r9d - vpxor xmm4,xmm4,xmm5 - shrd r14d,r14d,11 - xor edi,eax - add r10d,r12d - vpsrlq xmm7,xmm7,17 - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r11d - vpaddd xmm2,xmm2,xmm4 - add r10d,r13d - xor r15d,eax - shrd r14d,r14d,2 - vpxor xmm6,xmm6,xmm7 - add ecx,r10d - add r10d,r15d - mov r13d,ecx - vpsrlq xmm7,xmm7,2 - add r14d,r10d - shrd r13d,r13d,14 - mov r10d,r14d - vpxor xmm6,xmm6,xmm7 - mov r12d,edx - shrd r14d,r14d,9 - xor r13d,ecx - vpshufb xmm6,xmm6,xmm8 - xor r12d,r8d - shrd r13d,r13d,5 - xor r14d,r10d - vpaddd xmm2,xmm2,xmm6 - and r12d,ecx - xor r13d,ecx - add r9d,DWORD[40+rsp] - vpshufd xmm7,xmm2,80 - mov r15d,r10d - xor r12d,r8d - shrd r14d,r14d,11 - vpsrld xmm6,xmm7,10 - xor r15d,r11d - add r9d,r12d - shrd r13d,r13d,6 - vpsrlq xmm7,xmm7,17 - and edi,r15d - xor r14d,r10d - add r9d,r13d - vpxor xmm6,xmm6,xmm7 - xor edi,r11d - shrd r14d,r14d,2 - add ebx,r9d - vpsrlq xmm7,xmm7,2 - add r9d,edi - mov r13d,ebx - add r14d,r9d - vpxor xmm6,xmm6,xmm7 - shrd r13d,r13d,14 - mov r9d,r14d - mov r12d,ecx - vpshufb xmm6,xmm6,xmm9 - shrd r14d,r14d,9 - xor r13d,ebx - xor r12d,edx - vpaddd xmm2,xmm2,xmm6 - shrd r13d,r13d,5 - xor r14d,r9d - and r12d,ebx - vpaddd xmm6,xmm2,XMMWORD[64+rbp] - xor r13d,ebx - add r8d,DWORD[44+rsp] - mov edi,r9d - xor r12d,edx - shrd r14d,r14d,11 - xor edi,r10d - add r8d,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - shrd r14d,r14d,2 - add eax,r8d - add r8d,r15d - mov r13d,eax - add r14d,r8d - vmovdqa XMMWORD[32+rsp],xmm6 - vpalignr xmm4,xmm0,xmm3,4 - shrd r13d,r13d,14 - mov r8d,r14d - mov r12d,ebx - vpalignr xmm7,xmm2,xmm1,4 - shrd r14d,r14d,9 - xor r13d,eax - xor r12d,ecx - vpsrld xmm6,xmm4,7 - shrd r13d,r13d,5 - xor r14d,r8d - and r12d,eax - vpaddd xmm3,xmm3,xmm7 - xor r13d,eax - add edx,DWORD[48+rsp] - mov r15d,r8d - vpsrld xmm7,xmm4,3 - xor r12d,ecx - shrd r14d,r14d,11 - xor r15d,r9d - vpslld xmm5,xmm4,14 - add edx,r12d - shrd r13d,r13d,6 - and edi,r15d - vpxor xmm4,xmm7,xmm6 - xor r14d,r8d - add edx,r13d - xor edi,r9d - vpshufd xmm7,xmm2,250 - shrd r14d,r14d,2 - add r11d,edx - add edx,edi - vpsrld xmm6,xmm6,11 - mov r13d,r11d - add r14d,edx - shrd r13d,r13d,14 - vpxor xmm4,xmm4,xmm5 - mov edx,r14d - mov r12d,eax - shrd r14d,r14d,9 - vpslld xmm5,xmm5,11 - xor r13d,r11d - xor r12d,ebx - shrd r13d,r13d,5 - vpxor xmm4,xmm4,xmm6 - xor r14d,edx - and r12d,r11d - xor r13d,r11d - vpsrld xmm6,xmm7,10 - add ecx,DWORD[52+rsp] - mov edi,edx - xor r12d,ebx - vpxor xmm4,xmm4,xmm5 - shrd r14d,r14d,11 - xor edi,r8d - add ecx,r12d - vpsrlq xmm7,xmm7,17 - shrd r13d,r13d,6 - and r15d,edi - xor r14d,edx - vpaddd xmm3,xmm3,xmm4 - add ecx,r13d - xor r15d,r8d - shrd r14d,r14d,2 - vpxor xmm6,xmm6,xmm7 - add r10d,ecx - add ecx,r15d - mov r13d,r10d - vpsrlq xmm7,xmm7,2 - add r14d,ecx - shrd r13d,r13d,14 - mov ecx,r14d - vpxor xmm6,xmm6,xmm7 - mov r12d,r11d - shrd r14d,r14d,9 - xor r13d,r10d - vpshufb xmm6,xmm6,xmm8 - xor r12d,eax - shrd r13d,r13d,5 - xor r14d,ecx - vpaddd xmm3,xmm3,xmm6 - and r12d,r10d - xor r13d,r10d - add ebx,DWORD[56+rsp] - vpshufd xmm7,xmm3,80 - mov r15d,ecx - xor r12d,eax - shrd r14d,r14d,11 - vpsrld xmm6,xmm7,10 - xor r15d,edx - add ebx,r12d - shrd r13d,r13d,6 - vpsrlq xmm7,xmm7,17 - and edi,r15d - xor r14d,ecx - add ebx,r13d - vpxor xmm6,xmm6,xmm7 - xor edi,edx - shrd r14d,r14d,2 - add r9d,ebx - vpsrlq xmm7,xmm7,2 - add ebx,edi - mov r13d,r9d - add r14d,ebx - vpxor xmm6,xmm6,xmm7 - shrd r13d,r13d,14 - mov ebx,r14d - mov r12d,r10d - vpshufb xmm6,xmm6,xmm9 - shrd r14d,r14d,9 - xor r13d,r9d - xor r12d,r11d - vpaddd xmm3,xmm3,xmm6 - shrd r13d,r13d,5 - xor r14d,ebx - and r12d,r9d - vpaddd xmm6,xmm3,XMMWORD[96+rbp] - xor r13d,r9d - add eax,DWORD[60+rsp] - mov edi,ebx - xor r12d,r11d - shrd r14d,r14d,11 - xor edi,ecx - add eax,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,ebx - add eax,r13d - xor r15d,ecx - shrd r14d,r14d,2 - add r8d,eax - add eax,r15d - mov r13d,r8d - add r14d,eax - vmovdqa XMMWORD[48+rsp],xmm6 - cmp BYTE[131+rbp],0 - jne NEAR $L$avx_00_47 - shrd r13d,r13d,14 - mov eax,r14d - mov r12d,r9d - shrd r14d,r14d,9 - xor r13d,r8d - xor r12d,r10d - shrd r13d,r13d,5 - xor r14d,eax - and r12d,r8d - xor r13d,r8d - add r11d,DWORD[rsp] - mov r15d,eax - xor r12d,r10d - shrd r14d,r14d,11 - xor r15d,ebx - add r11d,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,eax - add r11d,r13d - xor edi,ebx - shrd r14d,r14d,2 - add edx,r11d - add r11d,edi - mov r13d,edx - add r14d,r11d - shrd r13d,r13d,14 - mov r11d,r14d - mov r12d,r8d - shrd r14d,r14d,9 - xor r13d,edx - xor r12d,r9d - shrd r13d,r13d,5 - xor r14d,r11d - and r12d,edx - xor r13d,edx - add r10d,DWORD[4+rsp] - mov edi,r11d - xor r12d,r9d - shrd r14d,r14d,11 - xor edi,eax - add r10d,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r11d - add r10d,r13d - xor r15d,eax - shrd r14d,r14d,2 - add ecx,r10d - add r10d,r15d - mov r13d,ecx - add r14d,r10d - shrd r13d,r13d,14 - mov r10d,r14d - mov r12d,edx - shrd r14d,r14d,9 - xor r13d,ecx - xor r12d,r8d - shrd r13d,r13d,5 - xor r14d,r10d - and r12d,ecx - xor r13d,ecx - add r9d,DWORD[8+rsp] - mov r15d,r10d - xor r12d,r8d - shrd r14d,r14d,11 - xor r15d,r11d - add r9d,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,r10d - add r9d,r13d - xor edi,r11d - shrd r14d,r14d,2 - add ebx,r9d - add r9d,edi - mov r13d,ebx - add r14d,r9d - shrd r13d,r13d,14 - mov r9d,r14d - mov r12d,ecx - shrd r14d,r14d,9 - xor r13d,ebx - xor r12d,edx - shrd r13d,r13d,5 - xor r14d,r9d - and r12d,ebx - xor r13d,ebx - add r8d,DWORD[12+rsp] - mov edi,r9d - xor r12d,edx - shrd r14d,r14d,11 - xor edi,r10d - add r8d,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - shrd r14d,r14d,2 - add eax,r8d - add r8d,r15d - mov r13d,eax - add r14d,r8d - shrd r13d,r13d,14 - mov r8d,r14d - mov r12d,ebx - shrd r14d,r14d,9 - xor r13d,eax - xor r12d,ecx - shrd r13d,r13d,5 - xor r14d,r8d - and r12d,eax - xor r13d,eax - add edx,DWORD[16+rsp] - mov r15d,r8d - xor r12d,ecx - shrd r14d,r14d,11 - xor r15d,r9d - add edx,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,r8d - add edx,r13d - xor edi,r9d - shrd r14d,r14d,2 - add r11d,edx - add edx,edi - mov r13d,r11d - add r14d,edx - shrd r13d,r13d,14 - mov edx,r14d - mov r12d,eax - shrd r14d,r14d,9 - xor r13d,r11d - xor r12d,ebx - shrd r13d,r13d,5 - xor r14d,edx - and r12d,r11d - xor r13d,r11d - add ecx,DWORD[20+rsp] - mov edi,edx - xor r12d,ebx - shrd r14d,r14d,11 - xor edi,r8d - add ecx,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,edx - add ecx,r13d - xor r15d,r8d - shrd r14d,r14d,2 - add r10d,ecx - add ecx,r15d - mov r13d,r10d - add r14d,ecx - shrd r13d,r13d,14 - mov ecx,r14d - mov r12d,r11d - shrd r14d,r14d,9 - xor r13d,r10d - xor r12d,eax - shrd r13d,r13d,5 - xor r14d,ecx - and r12d,r10d - xor r13d,r10d - add ebx,DWORD[24+rsp] - mov r15d,ecx - xor r12d,eax - shrd r14d,r14d,11 - xor r15d,edx - add ebx,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,ecx - add ebx,r13d - xor edi,edx - shrd r14d,r14d,2 - add r9d,ebx - add ebx,edi - mov r13d,r9d - add r14d,ebx - shrd r13d,r13d,14 - mov ebx,r14d - mov r12d,r10d - shrd r14d,r14d,9 - xor r13d,r9d - xor r12d,r11d - shrd r13d,r13d,5 - xor r14d,ebx - and r12d,r9d - xor r13d,r9d - add eax,DWORD[28+rsp] - mov edi,ebx - xor r12d,r11d - shrd r14d,r14d,11 - xor edi,ecx - add eax,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,ebx - add eax,r13d - xor r15d,ecx - shrd r14d,r14d,2 - add r8d,eax - add eax,r15d - mov r13d,r8d - add r14d,eax - shrd r13d,r13d,14 - mov eax,r14d - mov r12d,r9d - shrd r14d,r14d,9 - xor r13d,r8d - xor r12d,r10d - shrd r13d,r13d,5 - xor r14d,eax - and r12d,r8d - xor r13d,r8d - add r11d,DWORD[32+rsp] - mov r15d,eax - xor r12d,r10d - shrd r14d,r14d,11 - xor r15d,ebx - add r11d,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,eax - add r11d,r13d - xor edi,ebx - shrd r14d,r14d,2 - add edx,r11d - add r11d,edi - mov r13d,edx - add r14d,r11d - shrd r13d,r13d,14 - mov r11d,r14d - mov r12d,r8d - shrd r14d,r14d,9 - xor r13d,edx - xor r12d,r9d - shrd r13d,r13d,5 - xor r14d,r11d - and r12d,edx - xor r13d,edx - add r10d,DWORD[36+rsp] - mov edi,r11d - xor r12d,r9d - shrd r14d,r14d,11 - xor edi,eax - add r10d,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r11d - add r10d,r13d - xor r15d,eax - shrd r14d,r14d,2 - add ecx,r10d - add r10d,r15d - mov r13d,ecx - add r14d,r10d - shrd r13d,r13d,14 - mov r10d,r14d - mov r12d,edx - shrd r14d,r14d,9 - xor r13d,ecx - xor r12d,r8d - shrd r13d,r13d,5 - xor r14d,r10d - and r12d,ecx - xor r13d,ecx - add r9d,DWORD[40+rsp] - mov r15d,r10d - xor r12d,r8d - shrd r14d,r14d,11 - xor r15d,r11d - add r9d,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,r10d - add r9d,r13d - xor edi,r11d - shrd r14d,r14d,2 - add ebx,r9d - add r9d,edi - mov r13d,ebx - add r14d,r9d - shrd r13d,r13d,14 - mov r9d,r14d - mov r12d,ecx - shrd r14d,r14d,9 - xor r13d,ebx - xor r12d,edx - shrd r13d,r13d,5 - xor r14d,r9d - and r12d,ebx - xor r13d,ebx - add r8d,DWORD[44+rsp] - mov edi,r9d - xor r12d,edx - shrd r14d,r14d,11 - xor edi,r10d - add r8d,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,r9d - add r8d,r13d - xor r15d,r10d - shrd r14d,r14d,2 - add eax,r8d - add r8d,r15d - mov r13d,eax - add r14d,r8d - shrd r13d,r13d,14 - mov r8d,r14d - mov r12d,ebx - shrd r14d,r14d,9 - xor r13d,eax - xor r12d,ecx - shrd r13d,r13d,5 - xor r14d,r8d - and r12d,eax - xor r13d,eax - add edx,DWORD[48+rsp] - mov r15d,r8d - xor r12d,ecx - shrd r14d,r14d,11 - xor r15d,r9d - add edx,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,r8d - add edx,r13d - xor edi,r9d - shrd r14d,r14d,2 - add r11d,edx - add edx,edi - mov r13d,r11d - add r14d,edx - shrd r13d,r13d,14 - mov edx,r14d - mov r12d,eax - shrd r14d,r14d,9 - xor r13d,r11d - xor r12d,ebx - shrd r13d,r13d,5 - xor r14d,edx - and r12d,r11d - xor r13d,r11d - add ecx,DWORD[52+rsp] - mov edi,edx - xor r12d,ebx - shrd r14d,r14d,11 - xor edi,r8d - add ecx,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,edx - add ecx,r13d - xor r15d,r8d - shrd r14d,r14d,2 - add r10d,ecx - add ecx,r15d - mov r13d,r10d - add r14d,ecx - shrd r13d,r13d,14 - mov ecx,r14d - mov r12d,r11d - shrd r14d,r14d,9 - xor r13d,r10d - xor r12d,eax - shrd r13d,r13d,5 - xor r14d,ecx - and r12d,r10d - xor r13d,r10d - add ebx,DWORD[56+rsp] - mov r15d,ecx - xor r12d,eax - shrd r14d,r14d,11 - xor r15d,edx - add ebx,r12d - shrd r13d,r13d,6 - and edi,r15d - xor r14d,ecx - add ebx,r13d - xor edi,edx - shrd r14d,r14d,2 - add r9d,ebx - add ebx,edi - mov r13d,r9d - add r14d,ebx - shrd r13d,r13d,14 - mov ebx,r14d - mov r12d,r10d - shrd r14d,r14d,9 - xor r13d,r9d - xor r12d,r11d - shrd r13d,r13d,5 - xor r14d,ebx - and r12d,r9d - xor r13d,r9d - add eax,DWORD[60+rsp] - mov edi,ebx - xor r12d,r11d - shrd r14d,r14d,11 - xor edi,ecx - add eax,r12d - shrd r13d,r13d,6 - and r15d,edi - xor r14d,ebx - add eax,r13d - xor r15d,ecx - shrd r14d,r14d,2 - add r8d,eax - add eax,r15d - mov r13d,r8d - add r14d,eax - mov rdi,QWORD[((64+0))+rsp] - mov eax,r14d - - add eax,DWORD[rdi] - lea rsi,[64+rsi] - add ebx,DWORD[4+rdi] - add ecx,DWORD[8+rdi] - add edx,DWORD[12+rdi] - add r8d,DWORD[16+rdi] - add r9d,DWORD[20+rdi] - add r10d,DWORD[24+rdi] - add r11d,DWORD[28+rdi] - - cmp rsi,QWORD[((64+16))+rsp] - - mov DWORD[rdi],eax - mov DWORD[4+rdi],ebx - mov DWORD[8+rdi],ecx - mov DWORD[12+rdi],edx - mov DWORD[16+rdi],r8d - mov DWORD[20+rdi],r9d - mov DWORD[24+rdi],r10d - mov DWORD[28+rdi],r11d - jb NEAR $L$loop_avx - - mov rsi,QWORD[88+rsp] - - vzeroupper - movaps xmm6,XMMWORD[((64+32))+rsp] - movaps xmm7,XMMWORD[((64+48))+rsp] - movaps xmm8,XMMWORD[((64+64))+rsp] - movaps xmm9,XMMWORD[((64+80))+rsp] - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$epilogue_avx: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha256_block_data_order_avx: -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$in_prologue - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$in_prologue - mov rsi,rax - mov rax,QWORD[((64+24))+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - - lea r10,[$L$epilogue] - cmp rbx,r10 - jb NEAR $L$in_prologue - - lea rsi,[((64+32))+rsi] - lea rdi,[512+r8] - mov ecx,8 - DD 0xa548f3fc - -$L$in_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_sha256_block_data_order wrt ..imagebase - DD $L$SEH_end_sha256_block_data_order wrt ..imagebase - DD $L$SEH_info_sha256_block_data_order wrt ..imagebase - DD $L$SEH_begin_sha256_block_data_order_ssse3 wrt ..imagebase - DD $L$SEH_end_sha256_block_data_order_ssse3 wrt ..imagebase - DD $L$SEH_info_sha256_block_data_order_ssse3 wrt ..imagebase - DD $L$SEH_begin_sha256_block_data_order_avx wrt ..imagebase - DD $L$SEH_end_sha256_block_data_order_avx wrt ..imagebase - DD $L$SEH_info_sha256_block_data_order_avx wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_sha256_block_data_order: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$prologue wrt ..imagebase,$L$epilogue wrt ..imagebase -$L$SEH_info_sha256_block_data_order_ssse3: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$prologue_ssse3 wrt ..imagebase,$L$epilogue_ssse3 wrt ..imagebase -$L$SEH_info_sha256_block_data_order_avx: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$prologue_avx wrt ..imagebase,$L$epilogue_avx wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha512-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha512-x86_64.asm deleted file mode 100644 index 33dc2c2ede..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/sha512-x86_64.asm +++ /dev/null @@ -1,3139 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -EXTERN OPENSSL_ia32cap_P -global sha512_block_data_order - -ALIGN 16 -sha512_block_data_order: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha512_block_data_order: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea r11,[OPENSSL_ia32cap_P] - mov r9d,DWORD[r11] - mov r10d,DWORD[4+r11] - mov r11d,DWORD[8+r11] - and r9d,1073741824 - and r10d,268435968 - or r10d,r9d - cmp r10d,1342177792 - je NEAR $L$avx_shortcut - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - shl rdx,4 - sub rsp,16*8+4*8 - lea rdx,[rdx*8+rsi] - and rsp,-64 - mov QWORD[((128+0))+rsp],rdi - mov QWORD[((128+8))+rsp],rsi - mov QWORD[((128+16))+rsp],rdx - mov QWORD[152+rsp],rax - -$L$prologue: - - mov rax,QWORD[rdi] - mov rbx,QWORD[8+rdi] - mov rcx,QWORD[16+rdi] - mov rdx,QWORD[24+rdi] - mov r8,QWORD[32+rdi] - mov r9,QWORD[40+rdi] - mov r10,QWORD[48+rdi] - mov r11,QWORD[56+rdi] - jmp NEAR $L$loop - -ALIGN 16 -$L$loop: - mov rdi,rbx - lea rbp,[K512] - xor rdi,rcx - mov r12,QWORD[rsi] - mov r13,r8 - mov r14,rax - bswap r12 - ror r13,23 - mov r15,r9 - - xor r13,r8 - ror r14,5 - xor r15,r10 - - mov QWORD[rsp],r12 - xor r14,rax - and r15,r8 - - ror r13,4 - add r12,r11 - xor r15,r10 - - ror r14,6 - xor r13,r8 - add r12,r15 - - mov r15,rax - add r12,QWORD[rbp] - xor r14,rax - - xor r15,rbx - ror r13,14 - mov r11,rbx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r11,rdi - add rdx,r12 - add r11,r12 - - lea rbp,[8+rbp] - add r11,r14 - mov r12,QWORD[8+rsi] - mov r13,rdx - mov r14,r11 - bswap r12 - ror r13,23 - mov rdi,r8 - - xor r13,rdx - ror r14,5 - xor rdi,r9 - - mov QWORD[8+rsp],r12 - xor r14,r11 - and rdi,rdx - - ror r13,4 - add r12,r10 - xor rdi,r9 - - ror r14,6 - xor r13,rdx - add r12,rdi - - mov rdi,r11 - add r12,QWORD[rbp] - xor r14,r11 - - xor rdi,rax - ror r13,14 - mov r10,rax - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r10,r15 - add rcx,r12 - add r10,r12 - - lea rbp,[24+rbp] - add r10,r14 - mov r12,QWORD[16+rsi] - mov r13,rcx - mov r14,r10 - bswap r12 - ror r13,23 - mov r15,rdx - - xor r13,rcx - ror r14,5 - xor r15,r8 - - mov QWORD[16+rsp],r12 - xor r14,r10 - and r15,rcx - - ror r13,4 - add r12,r9 - xor r15,r8 - - ror r14,6 - xor r13,rcx - add r12,r15 - - mov r15,r10 - add r12,QWORD[rbp] - xor r14,r10 - - xor r15,r11 - ror r13,14 - mov r9,r11 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r9,rdi - add rbx,r12 - add r9,r12 - - lea rbp,[8+rbp] - add r9,r14 - mov r12,QWORD[24+rsi] - mov r13,rbx - mov r14,r9 - bswap r12 - ror r13,23 - mov rdi,rcx - - xor r13,rbx - ror r14,5 - xor rdi,rdx - - mov QWORD[24+rsp],r12 - xor r14,r9 - and rdi,rbx - - ror r13,4 - add r12,r8 - xor rdi,rdx - - ror r14,6 - xor r13,rbx - add r12,rdi - - mov rdi,r9 - add r12,QWORD[rbp] - xor r14,r9 - - xor rdi,r10 - ror r13,14 - mov r8,r10 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r8,r15 - add rax,r12 - add r8,r12 - - lea rbp,[24+rbp] - add r8,r14 - mov r12,QWORD[32+rsi] - mov r13,rax - mov r14,r8 - bswap r12 - ror r13,23 - mov r15,rbx - - xor r13,rax - ror r14,5 - xor r15,rcx - - mov QWORD[32+rsp],r12 - xor r14,r8 - and r15,rax - - ror r13,4 - add r12,rdx - xor r15,rcx - - ror r14,6 - xor r13,rax - add r12,r15 - - mov r15,r8 - add r12,QWORD[rbp] - xor r14,r8 - - xor r15,r9 - ror r13,14 - mov rdx,r9 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rdx,rdi - add r11,r12 - add rdx,r12 - - lea rbp,[8+rbp] - add rdx,r14 - mov r12,QWORD[40+rsi] - mov r13,r11 - mov r14,rdx - bswap r12 - ror r13,23 - mov rdi,rax - - xor r13,r11 - ror r14,5 - xor rdi,rbx - - mov QWORD[40+rsp],r12 - xor r14,rdx - and rdi,r11 - - ror r13,4 - add r12,rcx - xor rdi,rbx - - ror r14,6 - xor r13,r11 - add r12,rdi - - mov rdi,rdx - add r12,QWORD[rbp] - xor r14,rdx - - xor rdi,r8 - ror r13,14 - mov rcx,r8 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rcx,r15 - add r10,r12 - add rcx,r12 - - lea rbp,[24+rbp] - add rcx,r14 - mov r12,QWORD[48+rsi] - mov r13,r10 - mov r14,rcx - bswap r12 - ror r13,23 - mov r15,r11 - - xor r13,r10 - ror r14,5 - xor r15,rax - - mov QWORD[48+rsp],r12 - xor r14,rcx - and r15,r10 - - ror r13,4 - add r12,rbx - xor r15,rax - - ror r14,6 - xor r13,r10 - add r12,r15 - - mov r15,rcx - add r12,QWORD[rbp] - xor r14,rcx - - xor r15,rdx - ror r13,14 - mov rbx,rdx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rbx,rdi - add r9,r12 - add rbx,r12 - - lea rbp,[8+rbp] - add rbx,r14 - mov r12,QWORD[56+rsi] - mov r13,r9 - mov r14,rbx - bswap r12 - ror r13,23 - mov rdi,r10 - - xor r13,r9 - ror r14,5 - xor rdi,r11 - - mov QWORD[56+rsp],r12 - xor r14,rbx - and rdi,r9 - - ror r13,4 - add r12,rax - xor rdi,r11 - - ror r14,6 - xor r13,r9 - add r12,rdi - - mov rdi,rbx - add r12,QWORD[rbp] - xor r14,rbx - - xor rdi,rcx - ror r13,14 - mov rax,rcx - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rax,r15 - add r8,r12 - add rax,r12 - - lea rbp,[24+rbp] - add rax,r14 - mov r12,QWORD[64+rsi] - mov r13,r8 - mov r14,rax - bswap r12 - ror r13,23 - mov r15,r9 - - xor r13,r8 - ror r14,5 - xor r15,r10 - - mov QWORD[64+rsp],r12 - xor r14,rax - and r15,r8 - - ror r13,4 - add r12,r11 - xor r15,r10 - - ror r14,6 - xor r13,r8 - add r12,r15 - - mov r15,rax - add r12,QWORD[rbp] - xor r14,rax - - xor r15,rbx - ror r13,14 - mov r11,rbx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r11,rdi - add rdx,r12 - add r11,r12 - - lea rbp,[8+rbp] - add r11,r14 - mov r12,QWORD[72+rsi] - mov r13,rdx - mov r14,r11 - bswap r12 - ror r13,23 - mov rdi,r8 - - xor r13,rdx - ror r14,5 - xor rdi,r9 - - mov QWORD[72+rsp],r12 - xor r14,r11 - and rdi,rdx - - ror r13,4 - add r12,r10 - xor rdi,r9 - - ror r14,6 - xor r13,rdx - add r12,rdi - - mov rdi,r11 - add r12,QWORD[rbp] - xor r14,r11 - - xor rdi,rax - ror r13,14 - mov r10,rax - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r10,r15 - add rcx,r12 - add r10,r12 - - lea rbp,[24+rbp] - add r10,r14 - mov r12,QWORD[80+rsi] - mov r13,rcx - mov r14,r10 - bswap r12 - ror r13,23 - mov r15,rdx - - xor r13,rcx - ror r14,5 - xor r15,r8 - - mov QWORD[80+rsp],r12 - xor r14,r10 - and r15,rcx - - ror r13,4 - add r12,r9 - xor r15,r8 - - ror r14,6 - xor r13,rcx - add r12,r15 - - mov r15,r10 - add r12,QWORD[rbp] - xor r14,r10 - - xor r15,r11 - ror r13,14 - mov r9,r11 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r9,rdi - add rbx,r12 - add r9,r12 - - lea rbp,[8+rbp] - add r9,r14 - mov r12,QWORD[88+rsi] - mov r13,rbx - mov r14,r9 - bswap r12 - ror r13,23 - mov rdi,rcx - - xor r13,rbx - ror r14,5 - xor rdi,rdx - - mov QWORD[88+rsp],r12 - xor r14,r9 - and rdi,rbx - - ror r13,4 - add r12,r8 - xor rdi,rdx - - ror r14,6 - xor r13,rbx - add r12,rdi - - mov rdi,r9 - add r12,QWORD[rbp] - xor r14,r9 - - xor rdi,r10 - ror r13,14 - mov r8,r10 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r8,r15 - add rax,r12 - add r8,r12 - - lea rbp,[24+rbp] - add r8,r14 - mov r12,QWORD[96+rsi] - mov r13,rax - mov r14,r8 - bswap r12 - ror r13,23 - mov r15,rbx - - xor r13,rax - ror r14,5 - xor r15,rcx - - mov QWORD[96+rsp],r12 - xor r14,r8 - and r15,rax - - ror r13,4 - add r12,rdx - xor r15,rcx - - ror r14,6 - xor r13,rax - add r12,r15 - - mov r15,r8 - add r12,QWORD[rbp] - xor r14,r8 - - xor r15,r9 - ror r13,14 - mov rdx,r9 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rdx,rdi - add r11,r12 - add rdx,r12 - - lea rbp,[8+rbp] - add rdx,r14 - mov r12,QWORD[104+rsi] - mov r13,r11 - mov r14,rdx - bswap r12 - ror r13,23 - mov rdi,rax - - xor r13,r11 - ror r14,5 - xor rdi,rbx - - mov QWORD[104+rsp],r12 - xor r14,rdx - and rdi,r11 - - ror r13,4 - add r12,rcx - xor rdi,rbx - - ror r14,6 - xor r13,r11 - add r12,rdi - - mov rdi,rdx - add r12,QWORD[rbp] - xor r14,rdx - - xor rdi,r8 - ror r13,14 - mov rcx,r8 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rcx,r15 - add r10,r12 - add rcx,r12 - - lea rbp,[24+rbp] - add rcx,r14 - mov r12,QWORD[112+rsi] - mov r13,r10 - mov r14,rcx - bswap r12 - ror r13,23 - mov r15,r11 - - xor r13,r10 - ror r14,5 - xor r15,rax - - mov QWORD[112+rsp],r12 - xor r14,rcx - and r15,r10 - - ror r13,4 - add r12,rbx - xor r15,rax - - ror r14,6 - xor r13,r10 - add r12,r15 - - mov r15,rcx - add r12,QWORD[rbp] - xor r14,rcx - - xor r15,rdx - ror r13,14 - mov rbx,rdx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rbx,rdi - add r9,r12 - add rbx,r12 - - lea rbp,[8+rbp] - add rbx,r14 - mov r12,QWORD[120+rsi] - mov r13,r9 - mov r14,rbx - bswap r12 - ror r13,23 - mov rdi,r10 - - xor r13,r9 - ror r14,5 - xor rdi,r11 - - mov QWORD[120+rsp],r12 - xor r14,rbx - and rdi,r9 - - ror r13,4 - add r12,rax - xor rdi,r11 - - ror r14,6 - xor r13,r9 - add r12,rdi - - mov rdi,rbx - add r12,QWORD[rbp] - xor r14,rbx - - xor rdi,rcx - ror r13,14 - mov rax,rcx - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rax,r15 - add r8,r12 - add rax,r12 - - lea rbp,[24+rbp] - jmp NEAR $L$rounds_16_xx -ALIGN 16 -$L$rounds_16_xx: - mov r13,QWORD[8+rsp] - mov r15,QWORD[112+rsp] - - mov r12,r13 - ror r13,7 - add rax,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[72+rsp] - - add r12,QWORD[rsp] - mov r13,r8 - add r12,r15 - mov r14,rax - ror r13,23 - mov r15,r9 - - xor r13,r8 - ror r14,5 - xor r15,r10 - - mov QWORD[rsp],r12 - xor r14,rax - and r15,r8 - - ror r13,4 - add r12,r11 - xor r15,r10 - - ror r14,6 - xor r13,r8 - add r12,r15 - - mov r15,rax - add r12,QWORD[rbp] - xor r14,rax - - xor r15,rbx - ror r13,14 - mov r11,rbx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r11,rdi - add rdx,r12 - add r11,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[16+rsp] - mov rdi,QWORD[120+rsp] - - mov r12,r13 - ror r13,7 - add r11,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[80+rsp] - - add r12,QWORD[8+rsp] - mov r13,rdx - add r12,rdi - mov r14,r11 - ror r13,23 - mov rdi,r8 - - xor r13,rdx - ror r14,5 - xor rdi,r9 - - mov QWORD[8+rsp],r12 - xor r14,r11 - and rdi,rdx - - ror r13,4 - add r12,r10 - xor rdi,r9 - - ror r14,6 - xor r13,rdx - add r12,rdi - - mov rdi,r11 - add r12,QWORD[rbp] - xor r14,r11 - - xor rdi,rax - ror r13,14 - mov r10,rax - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r10,r15 - add rcx,r12 - add r10,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[24+rsp] - mov r15,QWORD[rsp] - - mov r12,r13 - ror r13,7 - add r10,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[88+rsp] - - add r12,QWORD[16+rsp] - mov r13,rcx - add r12,r15 - mov r14,r10 - ror r13,23 - mov r15,rdx - - xor r13,rcx - ror r14,5 - xor r15,r8 - - mov QWORD[16+rsp],r12 - xor r14,r10 - and r15,rcx - - ror r13,4 - add r12,r9 - xor r15,r8 - - ror r14,6 - xor r13,rcx - add r12,r15 - - mov r15,r10 - add r12,QWORD[rbp] - xor r14,r10 - - xor r15,r11 - ror r13,14 - mov r9,r11 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r9,rdi - add rbx,r12 - add r9,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[32+rsp] - mov rdi,QWORD[8+rsp] - - mov r12,r13 - ror r13,7 - add r9,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[96+rsp] - - add r12,QWORD[24+rsp] - mov r13,rbx - add r12,rdi - mov r14,r9 - ror r13,23 - mov rdi,rcx - - xor r13,rbx - ror r14,5 - xor rdi,rdx - - mov QWORD[24+rsp],r12 - xor r14,r9 - and rdi,rbx - - ror r13,4 - add r12,r8 - xor rdi,rdx - - ror r14,6 - xor r13,rbx - add r12,rdi - - mov rdi,r9 - add r12,QWORD[rbp] - xor r14,r9 - - xor rdi,r10 - ror r13,14 - mov r8,r10 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r8,r15 - add rax,r12 - add r8,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[40+rsp] - mov r15,QWORD[16+rsp] - - mov r12,r13 - ror r13,7 - add r8,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[104+rsp] - - add r12,QWORD[32+rsp] - mov r13,rax - add r12,r15 - mov r14,r8 - ror r13,23 - mov r15,rbx - - xor r13,rax - ror r14,5 - xor r15,rcx - - mov QWORD[32+rsp],r12 - xor r14,r8 - and r15,rax - - ror r13,4 - add r12,rdx - xor r15,rcx - - ror r14,6 - xor r13,rax - add r12,r15 - - mov r15,r8 - add r12,QWORD[rbp] - xor r14,r8 - - xor r15,r9 - ror r13,14 - mov rdx,r9 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rdx,rdi - add r11,r12 - add rdx,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[48+rsp] - mov rdi,QWORD[24+rsp] - - mov r12,r13 - ror r13,7 - add rdx,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[112+rsp] - - add r12,QWORD[40+rsp] - mov r13,r11 - add r12,rdi - mov r14,rdx - ror r13,23 - mov rdi,rax - - xor r13,r11 - ror r14,5 - xor rdi,rbx - - mov QWORD[40+rsp],r12 - xor r14,rdx - and rdi,r11 - - ror r13,4 - add r12,rcx - xor rdi,rbx - - ror r14,6 - xor r13,r11 - add r12,rdi - - mov rdi,rdx - add r12,QWORD[rbp] - xor r14,rdx - - xor rdi,r8 - ror r13,14 - mov rcx,r8 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rcx,r15 - add r10,r12 - add rcx,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[56+rsp] - mov r15,QWORD[32+rsp] - - mov r12,r13 - ror r13,7 - add rcx,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[120+rsp] - - add r12,QWORD[48+rsp] - mov r13,r10 - add r12,r15 - mov r14,rcx - ror r13,23 - mov r15,r11 - - xor r13,r10 - ror r14,5 - xor r15,rax - - mov QWORD[48+rsp],r12 - xor r14,rcx - and r15,r10 - - ror r13,4 - add r12,rbx - xor r15,rax - - ror r14,6 - xor r13,r10 - add r12,r15 - - mov r15,rcx - add r12,QWORD[rbp] - xor r14,rcx - - xor r15,rdx - ror r13,14 - mov rbx,rdx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rbx,rdi - add r9,r12 - add rbx,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[64+rsp] - mov rdi,QWORD[40+rsp] - - mov r12,r13 - ror r13,7 - add rbx,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[rsp] - - add r12,QWORD[56+rsp] - mov r13,r9 - add r12,rdi - mov r14,rbx - ror r13,23 - mov rdi,r10 - - xor r13,r9 - ror r14,5 - xor rdi,r11 - - mov QWORD[56+rsp],r12 - xor r14,rbx - and rdi,r9 - - ror r13,4 - add r12,rax - xor rdi,r11 - - ror r14,6 - xor r13,r9 - add r12,rdi - - mov rdi,rbx - add r12,QWORD[rbp] - xor r14,rbx - - xor rdi,rcx - ror r13,14 - mov rax,rcx - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rax,r15 - add r8,r12 - add rax,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[72+rsp] - mov r15,QWORD[48+rsp] - - mov r12,r13 - ror r13,7 - add rax,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[8+rsp] - - add r12,QWORD[64+rsp] - mov r13,r8 - add r12,r15 - mov r14,rax - ror r13,23 - mov r15,r9 - - xor r13,r8 - ror r14,5 - xor r15,r10 - - mov QWORD[64+rsp],r12 - xor r14,rax - and r15,r8 - - ror r13,4 - add r12,r11 - xor r15,r10 - - ror r14,6 - xor r13,r8 - add r12,r15 - - mov r15,rax - add r12,QWORD[rbp] - xor r14,rax - - xor r15,rbx - ror r13,14 - mov r11,rbx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r11,rdi - add rdx,r12 - add r11,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[80+rsp] - mov rdi,QWORD[56+rsp] - - mov r12,r13 - ror r13,7 - add r11,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[16+rsp] - - add r12,QWORD[72+rsp] - mov r13,rdx - add r12,rdi - mov r14,r11 - ror r13,23 - mov rdi,r8 - - xor r13,rdx - ror r14,5 - xor rdi,r9 - - mov QWORD[72+rsp],r12 - xor r14,r11 - and rdi,rdx - - ror r13,4 - add r12,r10 - xor rdi,r9 - - ror r14,6 - xor r13,rdx - add r12,rdi - - mov rdi,r11 - add r12,QWORD[rbp] - xor r14,r11 - - xor rdi,rax - ror r13,14 - mov r10,rax - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r10,r15 - add rcx,r12 - add r10,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[88+rsp] - mov r15,QWORD[64+rsp] - - mov r12,r13 - ror r13,7 - add r10,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[24+rsp] - - add r12,QWORD[80+rsp] - mov r13,rcx - add r12,r15 - mov r14,r10 - ror r13,23 - mov r15,rdx - - xor r13,rcx - ror r14,5 - xor r15,r8 - - mov QWORD[80+rsp],r12 - xor r14,r10 - and r15,rcx - - ror r13,4 - add r12,r9 - xor r15,r8 - - ror r14,6 - xor r13,rcx - add r12,r15 - - mov r15,r10 - add r12,QWORD[rbp] - xor r14,r10 - - xor r15,r11 - ror r13,14 - mov r9,r11 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor r9,rdi - add rbx,r12 - add r9,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[96+rsp] - mov rdi,QWORD[72+rsp] - - mov r12,r13 - ror r13,7 - add r9,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[32+rsp] - - add r12,QWORD[88+rsp] - mov r13,rbx - add r12,rdi - mov r14,r9 - ror r13,23 - mov rdi,rcx - - xor r13,rbx - ror r14,5 - xor rdi,rdx - - mov QWORD[88+rsp],r12 - xor r14,r9 - and rdi,rbx - - ror r13,4 - add r12,r8 - xor rdi,rdx - - ror r14,6 - xor r13,rbx - add r12,rdi - - mov rdi,r9 - add r12,QWORD[rbp] - xor r14,r9 - - xor rdi,r10 - ror r13,14 - mov r8,r10 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor r8,r15 - add rax,r12 - add r8,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[104+rsp] - mov r15,QWORD[80+rsp] - - mov r12,r13 - ror r13,7 - add r8,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[40+rsp] - - add r12,QWORD[96+rsp] - mov r13,rax - add r12,r15 - mov r14,r8 - ror r13,23 - mov r15,rbx - - xor r13,rax - ror r14,5 - xor r15,rcx - - mov QWORD[96+rsp],r12 - xor r14,r8 - and r15,rax - - ror r13,4 - add r12,rdx - xor r15,rcx - - ror r14,6 - xor r13,rax - add r12,r15 - - mov r15,r8 - add r12,QWORD[rbp] - xor r14,r8 - - xor r15,r9 - ror r13,14 - mov rdx,r9 - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rdx,rdi - add r11,r12 - add rdx,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[112+rsp] - mov rdi,QWORD[88+rsp] - - mov r12,r13 - ror r13,7 - add rdx,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[48+rsp] - - add r12,QWORD[104+rsp] - mov r13,r11 - add r12,rdi - mov r14,rdx - ror r13,23 - mov rdi,rax - - xor r13,r11 - ror r14,5 - xor rdi,rbx - - mov QWORD[104+rsp],r12 - xor r14,rdx - and rdi,r11 - - ror r13,4 - add r12,rcx - xor rdi,rbx - - ror r14,6 - xor r13,r11 - add r12,rdi - - mov rdi,rdx - add r12,QWORD[rbp] - xor r14,rdx - - xor rdi,r8 - ror r13,14 - mov rcx,r8 - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rcx,r15 - add r10,r12 - add rcx,r12 - - lea rbp,[24+rbp] - mov r13,QWORD[120+rsp] - mov r15,QWORD[96+rsp] - - mov r12,r13 - ror r13,7 - add rcx,r14 - mov r14,r15 - ror r15,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor r15,r14 - shr r14,6 - - ror r15,19 - xor r12,r13 - xor r15,r14 - add r12,QWORD[56+rsp] - - add r12,QWORD[112+rsp] - mov r13,r10 - add r12,r15 - mov r14,rcx - ror r13,23 - mov r15,r11 - - xor r13,r10 - ror r14,5 - xor r15,rax - - mov QWORD[112+rsp],r12 - xor r14,rcx - and r15,r10 - - ror r13,4 - add r12,rbx - xor r15,rax - - ror r14,6 - xor r13,r10 - add r12,r15 - - mov r15,rcx - add r12,QWORD[rbp] - xor r14,rcx - - xor r15,rdx - ror r13,14 - mov rbx,rdx - - and rdi,r15 - ror r14,28 - add r12,r13 - - xor rbx,rdi - add r9,r12 - add rbx,r12 - - lea rbp,[8+rbp] - mov r13,QWORD[rsp] - mov rdi,QWORD[104+rsp] - - mov r12,r13 - ror r13,7 - add rbx,r14 - mov r14,rdi - ror rdi,42 - - xor r13,r12 - shr r12,7 - ror r13,1 - xor rdi,r14 - shr r14,6 - - ror rdi,19 - xor r12,r13 - xor rdi,r14 - add r12,QWORD[64+rsp] - - add r12,QWORD[120+rsp] - mov r13,r9 - add r12,rdi - mov r14,rbx - ror r13,23 - mov rdi,r10 - - xor r13,r9 - ror r14,5 - xor rdi,r11 - - mov QWORD[120+rsp],r12 - xor r14,rbx - and rdi,r9 - - ror r13,4 - add r12,rax - xor rdi,r11 - - ror r14,6 - xor r13,r9 - add r12,rdi - - mov rdi,rbx - add r12,QWORD[rbp] - xor r14,rbx - - xor rdi,rcx - ror r13,14 - mov rax,rcx - - and r15,rdi - ror r14,28 - add r12,r13 - - xor rax,r15 - add r8,r12 - add rax,r12 - - lea rbp,[24+rbp] - cmp BYTE[7+rbp],0 - jnz NEAR $L$rounds_16_xx - - mov rdi,QWORD[((128+0))+rsp] - add rax,r14 - lea rsi,[128+rsi] - - add rax,QWORD[rdi] - add rbx,QWORD[8+rdi] - add rcx,QWORD[16+rdi] - add rdx,QWORD[24+rdi] - add r8,QWORD[32+rdi] - add r9,QWORD[40+rdi] - add r10,QWORD[48+rdi] - add r11,QWORD[56+rdi] - - cmp rsi,QWORD[((128+16))+rsp] - - mov QWORD[rdi],rax - mov QWORD[8+rdi],rbx - mov QWORD[16+rdi],rcx - mov QWORD[24+rdi],rdx - mov QWORD[32+rdi],r8 - mov QWORD[40+rdi],r9 - mov QWORD[48+rdi],r10 - mov QWORD[56+rdi],r11 - jb NEAR $L$loop - - mov rsi,QWORD[152+rsp] - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha512_block_data_order: -ALIGN 64 - -K512: - DQ 0x428a2f98d728ae22,0x7137449123ef65cd - DQ 0x428a2f98d728ae22,0x7137449123ef65cd - DQ 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc - DQ 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc - DQ 0x3956c25bf348b538,0x59f111f1b605d019 - DQ 0x3956c25bf348b538,0x59f111f1b605d019 - DQ 0x923f82a4af194f9b,0xab1c5ed5da6d8118 - DQ 0x923f82a4af194f9b,0xab1c5ed5da6d8118 - DQ 0xd807aa98a3030242,0x12835b0145706fbe - DQ 0xd807aa98a3030242,0x12835b0145706fbe - DQ 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 - DQ 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 - DQ 0x72be5d74f27b896f,0x80deb1fe3b1696b1 - DQ 0x72be5d74f27b896f,0x80deb1fe3b1696b1 - DQ 0x9bdc06a725c71235,0xc19bf174cf692694 - DQ 0x9bdc06a725c71235,0xc19bf174cf692694 - DQ 0xe49b69c19ef14ad2,0xefbe4786384f25e3 - DQ 0xe49b69c19ef14ad2,0xefbe4786384f25e3 - DQ 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 - DQ 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 - DQ 0x2de92c6f592b0275,0x4a7484aa6ea6e483 - DQ 0x2de92c6f592b0275,0x4a7484aa6ea6e483 - DQ 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 - DQ 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 - DQ 0x983e5152ee66dfab,0xa831c66d2db43210 - DQ 0x983e5152ee66dfab,0xa831c66d2db43210 - DQ 0xb00327c898fb213f,0xbf597fc7beef0ee4 - DQ 0xb00327c898fb213f,0xbf597fc7beef0ee4 - DQ 0xc6e00bf33da88fc2,0xd5a79147930aa725 - DQ 0xc6e00bf33da88fc2,0xd5a79147930aa725 - DQ 0x06ca6351e003826f,0x142929670a0e6e70 - DQ 0x06ca6351e003826f,0x142929670a0e6e70 - DQ 0x27b70a8546d22ffc,0x2e1b21385c26c926 - DQ 0x27b70a8546d22ffc,0x2e1b21385c26c926 - DQ 0x4d2c6dfc5ac42aed,0x53380d139d95b3df - DQ 0x4d2c6dfc5ac42aed,0x53380d139d95b3df - DQ 0x650a73548baf63de,0x766a0abb3c77b2a8 - DQ 0x650a73548baf63de,0x766a0abb3c77b2a8 - DQ 0x81c2c92e47edaee6,0x92722c851482353b - DQ 0x81c2c92e47edaee6,0x92722c851482353b - DQ 0xa2bfe8a14cf10364,0xa81a664bbc423001 - DQ 0xa2bfe8a14cf10364,0xa81a664bbc423001 - DQ 0xc24b8b70d0f89791,0xc76c51a30654be30 - DQ 0xc24b8b70d0f89791,0xc76c51a30654be30 - DQ 0xd192e819d6ef5218,0xd69906245565a910 - DQ 0xd192e819d6ef5218,0xd69906245565a910 - DQ 0xf40e35855771202a,0x106aa07032bbd1b8 - DQ 0xf40e35855771202a,0x106aa07032bbd1b8 - DQ 0x19a4c116b8d2d0c8,0x1e376c085141ab53 - DQ 0x19a4c116b8d2d0c8,0x1e376c085141ab53 - DQ 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 - DQ 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 - DQ 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb - DQ 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb - DQ 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 - DQ 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 - DQ 0x748f82ee5defb2fc,0x78a5636f43172f60 - DQ 0x748f82ee5defb2fc,0x78a5636f43172f60 - DQ 0x84c87814a1f0ab72,0x8cc702081a6439ec - DQ 0x84c87814a1f0ab72,0x8cc702081a6439ec - DQ 0x90befffa23631e28,0xa4506cebde82bde9 - DQ 0x90befffa23631e28,0xa4506cebde82bde9 - DQ 0xbef9a3f7b2c67915,0xc67178f2e372532b - DQ 0xbef9a3f7b2c67915,0xc67178f2e372532b - DQ 0xca273eceea26619c,0xd186b8c721c0c207 - DQ 0xca273eceea26619c,0xd186b8c721c0c207 - DQ 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 - DQ 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 - DQ 0x06f067aa72176fba,0x0a637dc5a2c898a6 - DQ 0x06f067aa72176fba,0x0a637dc5a2c898a6 - DQ 0x113f9804bef90dae,0x1b710b35131c471b - DQ 0x113f9804bef90dae,0x1b710b35131c471b - DQ 0x28db77f523047d84,0x32caab7b40c72493 - DQ 0x28db77f523047d84,0x32caab7b40c72493 - DQ 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c - DQ 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c - DQ 0x4cc5d4becb3e42b6,0x597f299cfc657e2a - DQ 0x4cc5d4becb3e42b6,0x597f299cfc657e2a - DQ 0x5fcb6fab3ad6faec,0x6c44198c4a475817 - DQ 0x5fcb6fab3ad6faec,0x6c44198c4a475817 - - DQ 0x0001020304050607,0x08090a0b0c0d0e0f - DQ 0x0001020304050607,0x08090a0b0c0d0e0f -DB 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 -DB 110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54 -DB 52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 -DB 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 -DB 111,114,103,62,0 - -ALIGN 64 -sha512_block_data_order_avx: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sha512_block_data_order_avx: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -$L$avx_shortcut: - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - shl rdx,4 - sub rsp,256 - lea rdx,[rdx*8+rsi] - and rsp,-64 - mov QWORD[((128+0))+rsp],rdi - mov QWORD[((128+8))+rsp],rsi - mov QWORD[((128+16))+rsp],rdx - mov QWORD[152+rsp],rax - - movaps XMMWORD[(128+32)+rsp],xmm6 - movaps XMMWORD[(128+48)+rsp],xmm7 - movaps XMMWORD[(128+64)+rsp],xmm8 - movaps XMMWORD[(128+80)+rsp],xmm9 - movaps XMMWORD[(128+96)+rsp],xmm10 - movaps XMMWORD[(128+112)+rsp],xmm11 -$L$prologue_avx: - - vzeroupper - mov rax,QWORD[rdi] - mov rbx,QWORD[8+rdi] - mov rcx,QWORD[16+rdi] - mov rdx,QWORD[24+rdi] - mov r8,QWORD[32+rdi] - mov r9,QWORD[40+rdi] - mov r10,QWORD[48+rdi] - mov r11,QWORD[56+rdi] - jmp NEAR $L$loop_avx -ALIGN 16 -$L$loop_avx: - vmovdqa xmm11,XMMWORD[((K512+1280))] - vmovdqu xmm0,XMMWORD[rsi] - lea rbp,[((K512+128))] - vmovdqu xmm1,XMMWORD[16+rsi] - vmovdqu xmm2,XMMWORD[32+rsi] - vpshufb xmm0,xmm0,xmm11 - vmovdqu xmm3,XMMWORD[48+rsi] - vpshufb xmm1,xmm1,xmm11 - vmovdqu xmm4,XMMWORD[64+rsi] - vpshufb xmm2,xmm2,xmm11 - vmovdqu xmm5,XMMWORD[80+rsi] - vpshufb xmm3,xmm3,xmm11 - vmovdqu xmm6,XMMWORD[96+rsi] - vpshufb xmm4,xmm4,xmm11 - vmovdqu xmm7,XMMWORD[112+rsi] - vpshufb xmm5,xmm5,xmm11 - vpaddq xmm8,xmm0,XMMWORD[((-128))+rbp] - vpshufb xmm6,xmm6,xmm11 - vpaddq xmm9,xmm1,XMMWORD[((-96))+rbp] - vpshufb xmm7,xmm7,xmm11 - vpaddq xmm10,xmm2,XMMWORD[((-64))+rbp] - vpaddq xmm11,xmm3,XMMWORD[((-32))+rbp] - vmovdqa XMMWORD[rsp],xmm8 - vpaddq xmm8,xmm4,XMMWORD[rbp] - vmovdqa XMMWORD[16+rsp],xmm9 - vpaddq xmm9,xmm5,XMMWORD[32+rbp] - vmovdqa XMMWORD[32+rsp],xmm10 - vpaddq xmm10,xmm6,XMMWORD[64+rbp] - vmovdqa XMMWORD[48+rsp],xmm11 - vpaddq xmm11,xmm7,XMMWORD[96+rbp] - vmovdqa XMMWORD[64+rsp],xmm8 - mov r14,rax - vmovdqa XMMWORD[80+rsp],xmm9 - mov rdi,rbx - vmovdqa XMMWORD[96+rsp],xmm10 - xor rdi,rcx - vmovdqa XMMWORD[112+rsp],xmm11 - mov r13,r8 - jmp NEAR $L$avx_00_47 - -ALIGN 16 -$L$avx_00_47: - add rbp,256 - vpalignr xmm8,xmm1,xmm0,8 - shrd r13,r13,23 - mov rax,r14 - vpalignr xmm11,xmm5,xmm4,8 - mov r12,r9 - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,r8 - xor r12,r10 - vpaddq xmm0,xmm0,xmm11 - shrd r13,r13,4 - xor r14,rax - vpsrlq xmm11,xmm8,7 - and r12,r8 - xor r13,r8 - vpsllq xmm9,xmm8,56 - add r11,QWORD[rsp] - mov r15,rax - vpxor xmm8,xmm11,xmm10 - xor r12,r10 - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,rbx - add r11,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,rax - add r11,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,rbx - shrd r14,r14,28 - vpsrlq xmm11,xmm7,6 - add rdx,r11 - add r11,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,rdx - add r14,r11 - vpsllq xmm10,xmm7,3 - shrd r13,r13,23 - mov r11,r14 - vpaddq xmm0,xmm0,xmm8 - mov r12,r8 - shrd r14,r14,5 - vpsrlq xmm9,xmm7,19 - xor r13,rdx - xor r12,r9 - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,r11 - vpsllq xmm10,xmm10,42 - and r12,rdx - xor r13,rdx - vpxor xmm11,xmm11,xmm9 - add r10,QWORD[8+rsp] - mov rdi,r11 - vpsrlq xmm9,xmm9,42 - xor r12,r9 - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,rax - add r10,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm0,xmm0,xmm11 - xor r14,r11 - add r10,r13 - vpaddq xmm10,xmm0,XMMWORD[((-128))+rbp] - xor r15,rax - shrd r14,r14,28 - add rcx,r10 - add r10,r15 - mov r13,rcx - add r14,r10 - vmovdqa XMMWORD[rsp],xmm10 - vpalignr xmm8,xmm2,xmm1,8 - shrd r13,r13,23 - mov r10,r14 - vpalignr xmm11,xmm6,xmm5,8 - mov r12,rdx - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,rcx - xor r12,r8 - vpaddq xmm1,xmm1,xmm11 - shrd r13,r13,4 - xor r14,r10 - vpsrlq xmm11,xmm8,7 - and r12,rcx - xor r13,rcx - vpsllq xmm9,xmm8,56 - add r9,QWORD[16+rsp] - mov r15,r10 - vpxor xmm8,xmm11,xmm10 - xor r12,r8 - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,r11 - add r9,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,r10 - add r9,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,r11 - shrd r14,r14,28 - vpsrlq xmm11,xmm0,6 - add rbx,r9 - add r9,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,rbx - add r14,r9 - vpsllq xmm10,xmm0,3 - shrd r13,r13,23 - mov r9,r14 - vpaddq xmm1,xmm1,xmm8 - mov r12,rcx - shrd r14,r14,5 - vpsrlq xmm9,xmm0,19 - xor r13,rbx - xor r12,rdx - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,r9 - vpsllq xmm10,xmm10,42 - and r12,rbx - xor r13,rbx - vpxor xmm11,xmm11,xmm9 - add r8,QWORD[24+rsp] - mov rdi,r9 - vpsrlq xmm9,xmm9,42 - xor r12,rdx - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,r10 - add r8,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm1,xmm1,xmm11 - xor r14,r9 - add r8,r13 - vpaddq xmm10,xmm1,XMMWORD[((-96))+rbp] - xor r15,r10 - shrd r14,r14,28 - add rax,r8 - add r8,r15 - mov r13,rax - add r14,r8 - vmovdqa XMMWORD[16+rsp],xmm10 - vpalignr xmm8,xmm3,xmm2,8 - shrd r13,r13,23 - mov r8,r14 - vpalignr xmm11,xmm7,xmm6,8 - mov r12,rbx - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,rax - xor r12,rcx - vpaddq xmm2,xmm2,xmm11 - shrd r13,r13,4 - xor r14,r8 - vpsrlq xmm11,xmm8,7 - and r12,rax - xor r13,rax - vpsllq xmm9,xmm8,56 - add rdx,QWORD[32+rsp] - mov r15,r8 - vpxor xmm8,xmm11,xmm10 - xor r12,rcx - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,r9 - add rdx,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,r8 - add rdx,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,r9 - shrd r14,r14,28 - vpsrlq xmm11,xmm1,6 - add r11,rdx - add rdx,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,r11 - add r14,rdx - vpsllq xmm10,xmm1,3 - shrd r13,r13,23 - mov rdx,r14 - vpaddq xmm2,xmm2,xmm8 - mov r12,rax - shrd r14,r14,5 - vpsrlq xmm9,xmm1,19 - xor r13,r11 - xor r12,rbx - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,rdx - vpsllq xmm10,xmm10,42 - and r12,r11 - xor r13,r11 - vpxor xmm11,xmm11,xmm9 - add rcx,QWORD[40+rsp] - mov rdi,rdx - vpsrlq xmm9,xmm9,42 - xor r12,rbx - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,r8 - add rcx,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm2,xmm2,xmm11 - xor r14,rdx - add rcx,r13 - vpaddq xmm10,xmm2,XMMWORD[((-64))+rbp] - xor r15,r8 - shrd r14,r14,28 - add r10,rcx - add rcx,r15 - mov r13,r10 - add r14,rcx - vmovdqa XMMWORD[32+rsp],xmm10 - vpalignr xmm8,xmm4,xmm3,8 - shrd r13,r13,23 - mov rcx,r14 - vpalignr xmm11,xmm0,xmm7,8 - mov r12,r11 - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,r10 - xor r12,rax - vpaddq xmm3,xmm3,xmm11 - shrd r13,r13,4 - xor r14,rcx - vpsrlq xmm11,xmm8,7 - and r12,r10 - xor r13,r10 - vpsllq xmm9,xmm8,56 - add rbx,QWORD[48+rsp] - mov r15,rcx - vpxor xmm8,xmm11,xmm10 - xor r12,rax - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,rdx - add rbx,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,rcx - add rbx,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,rdx - shrd r14,r14,28 - vpsrlq xmm11,xmm2,6 - add r9,rbx - add rbx,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,r9 - add r14,rbx - vpsllq xmm10,xmm2,3 - shrd r13,r13,23 - mov rbx,r14 - vpaddq xmm3,xmm3,xmm8 - mov r12,r10 - shrd r14,r14,5 - vpsrlq xmm9,xmm2,19 - xor r13,r9 - xor r12,r11 - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,rbx - vpsllq xmm10,xmm10,42 - and r12,r9 - xor r13,r9 - vpxor xmm11,xmm11,xmm9 - add rax,QWORD[56+rsp] - mov rdi,rbx - vpsrlq xmm9,xmm9,42 - xor r12,r11 - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,rcx - add rax,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm3,xmm3,xmm11 - xor r14,rbx - add rax,r13 - vpaddq xmm10,xmm3,XMMWORD[((-32))+rbp] - xor r15,rcx - shrd r14,r14,28 - add r8,rax - add rax,r15 - mov r13,r8 - add r14,rax - vmovdqa XMMWORD[48+rsp],xmm10 - vpalignr xmm8,xmm5,xmm4,8 - shrd r13,r13,23 - mov rax,r14 - vpalignr xmm11,xmm1,xmm0,8 - mov r12,r9 - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,r8 - xor r12,r10 - vpaddq xmm4,xmm4,xmm11 - shrd r13,r13,4 - xor r14,rax - vpsrlq xmm11,xmm8,7 - and r12,r8 - xor r13,r8 - vpsllq xmm9,xmm8,56 - add r11,QWORD[64+rsp] - mov r15,rax - vpxor xmm8,xmm11,xmm10 - xor r12,r10 - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,rbx - add r11,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,rax - add r11,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,rbx - shrd r14,r14,28 - vpsrlq xmm11,xmm3,6 - add rdx,r11 - add r11,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,rdx - add r14,r11 - vpsllq xmm10,xmm3,3 - shrd r13,r13,23 - mov r11,r14 - vpaddq xmm4,xmm4,xmm8 - mov r12,r8 - shrd r14,r14,5 - vpsrlq xmm9,xmm3,19 - xor r13,rdx - xor r12,r9 - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,r11 - vpsllq xmm10,xmm10,42 - and r12,rdx - xor r13,rdx - vpxor xmm11,xmm11,xmm9 - add r10,QWORD[72+rsp] - mov rdi,r11 - vpsrlq xmm9,xmm9,42 - xor r12,r9 - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,rax - add r10,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm4,xmm4,xmm11 - xor r14,r11 - add r10,r13 - vpaddq xmm10,xmm4,XMMWORD[rbp] - xor r15,rax - shrd r14,r14,28 - add rcx,r10 - add r10,r15 - mov r13,rcx - add r14,r10 - vmovdqa XMMWORD[64+rsp],xmm10 - vpalignr xmm8,xmm6,xmm5,8 - shrd r13,r13,23 - mov r10,r14 - vpalignr xmm11,xmm2,xmm1,8 - mov r12,rdx - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,rcx - xor r12,r8 - vpaddq xmm5,xmm5,xmm11 - shrd r13,r13,4 - xor r14,r10 - vpsrlq xmm11,xmm8,7 - and r12,rcx - xor r13,rcx - vpsllq xmm9,xmm8,56 - add r9,QWORD[80+rsp] - mov r15,r10 - vpxor xmm8,xmm11,xmm10 - xor r12,r8 - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,r11 - add r9,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,r10 - add r9,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,r11 - shrd r14,r14,28 - vpsrlq xmm11,xmm4,6 - add rbx,r9 - add r9,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,rbx - add r14,r9 - vpsllq xmm10,xmm4,3 - shrd r13,r13,23 - mov r9,r14 - vpaddq xmm5,xmm5,xmm8 - mov r12,rcx - shrd r14,r14,5 - vpsrlq xmm9,xmm4,19 - xor r13,rbx - xor r12,rdx - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,r9 - vpsllq xmm10,xmm10,42 - and r12,rbx - xor r13,rbx - vpxor xmm11,xmm11,xmm9 - add r8,QWORD[88+rsp] - mov rdi,r9 - vpsrlq xmm9,xmm9,42 - xor r12,rdx - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,r10 - add r8,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm5,xmm5,xmm11 - xor r14,r9 - add r8,r13 - vpaddq xmm10,xmm5,XMMWORD[32+rbp] - xor r15,r10 - shrd r14,r14,28 - add rax,r8 - add r8,r15 - mov r13,rax - add r14,r8 - vmovdqa XMMWORD[80+rsp],xmm10 - vpalignr xmm8,xmm7,xmm6,8 - shrd r13,r13,23 - mov r8,r14 - vpalignr xmm11,xmm3,xmm2,8 - mov r12,rbx - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,rax - xor r12,rcx - vpaddq xmm6,xmm6,xmm11 - shrd r13,r13,4 - xor r14,r8 - vpsrlq xmm11,xmm8,7 - and r12,rax - xor r13,rax - vpsllq xmm9,xmm8,56 - add rdx,QWORD[96+rsp] - mov r15,r8 - vpxor xmm8,xmm11,xmm10 - xor r12,rcx - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,r9 - add rdx,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,r8 - add rdx,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,r9 - shrd r14,r14,28 - vpsrlq xmm11,xmm5,6 - add r11,rdx - add rdx,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,r11 - add r14,rdx - vpsllq xmm10,xmm5,3 - shrd r13,r13,23 - mov rdx,r14 - vpaddq xmm6,xmm6,xmm8 - mov r12,rax - shrd r14,r14,5 - vpsrlq xmm9,xmm5,19 - xor r13,r11 - xor r12,rbx - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,rdx - vpsllq xmm10,xmm10,42 - and r12,r11 - xor r13,r11 - vpxor xmm11,xmm11,xmm9 - add rcx,QWORD[104+rsp] - mov rdi,rdx - vpsrlq xmm9,xmm9,42 - xor r12,rbx - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,r8 - add rcx,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm6,xmm6,xmm11 - xor r14,rdx - add rcx,r13 - vpaddq xmm10,xmm6,XMMWORD[64+rbp] - xor r15,r8 - shrd r14,r14,28 - add r10,rcx - add rcx,r15 - mov r13,r10 - add r14,rcx - vmovdqa XMMWORD[96+rsp],xmm10 - vpalignr xmm8,xmm0,xmm7,8 - shrd r13,r13,23 - mov rcx,r14 - vpalignr xmm11,xmm4,xmm3,8 - mov r12,r11 - shrd r14,r14,5 - vpsrlq xmm10,xmm8,1 - xor r13,r10 - xor r12,rax - vpaddq xmm7,xmm7,xmm11 - shrd r13,r13,4 - xor r14,rcx - vpsrlq xmm11,xmm8,7 - and r12,r10 - xor r13,r10 - vpsllq xmm9,xmm8,56 - add rbx,QWORD[112+rsp] - mov r15,rcx - vpxor xmm8,xmm11,xmm10 - xor r12,rax - shrd r14,r14,6 - vpsrlq xmm10,xmm10,7 - xor r15,rdx - add rbx,r12 - vpxor xmm8,xmm8,xmm9 - shrd r13,r13,14 - and rdi,r15 - vpsllq xmm9,xmm9,7 - xor r14,rcx - add rbx,r13 - vpxor xmm8,xmm8,xmm10 - xor rdi,rdx - shrd r14,r14,28 - vpsrlq xmm11,xmm6,6 - add r9,rbx - add rbx,rdi - vpxor xmm8,xmm8,xmm9 - mov r13,r9 - add r14,rbx - vpsllq xmm10,xmm6,3 - shrd r13,r13,23 - mov rbx,r14 - vpaddq xmm7,xmm7,xmm8 - mov r12,r10 - shrd r14,r14,5 - vpsrlq xmm9,xmm6,19 - xor r13,r9 - xor r12,r11 - vpxor xmm11,xmm11,xmm10 - shrd r13,r13,4 - xor r14,rbx - vpsllq xmm10,xmm10,42 - and r12,r9 - xor r13,r9 - vpxor xmm11,xmm11,xmm9 - add rax,QWORD[120+rsp] - mov rdi,rbx - vpsrlq xmm9,xmm9,42 - xor r12,r11 - shrd r14,r14,6 - vpxor xmm11,xmm11,xmm10 - xor rdi,rcx - add rax,r12 - vpxor xmm11,xmm11,xmm9 - shrd r13,r13,14 - and r15,rdi - vpaddq xmm7,xmm7,xmm11 - xor r14,rbx - add rax,r13 - vpaddq xmm10,xmm7,XMMWORD[96+rbp] - xor r15,rcx - shrd r14,r14,28 - add r8,rax - add rax,r15 - mov r13,r8 - add r14,rax - vmovdqa XMMWORD[112+rsp],xmm10 - cmp BYTE[135+rbp],0 - jne NEAR $L$avx_00_47 - shrd r13,r13,23 - mov rax,r14 - mov r12,r9 - shrd r14,r14,5 - xor r13,r8 - xor r12,r10 - shrd r13,r13,4 - xor r14,rax - and r12,r8 - xor r13,r8 - add r11,QWORD[rsp] - mov r15,rax - xor r12,r10 - shrd r14,r14,6 - xor r15,rbx - add r11,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,rax - add r11,r13 - xor rdi,rbx - shrd r14,r14,28 - add rdx,r11 - add r11,rdi - mov r13,rdx - add r14,r11 - shrd r13,r13,23 - mov r11,r14 - mov r12,r8 - shrd r14,r14,5 - xor r13,rdx - xor r12,r9 - shrd r13,r13,4 - xor r14,r11 - and r12,rdx - xor r13,rdx - add r10,QWORD[8+rsp] - mov rdi,r11 - xor r12,r9 - shrd r14,r14,6 - xor rdi,rax - add r10,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,r11 - add r10,r13 - xor r15,rax - shrd r14,r14,28 - add rcx,r10 - add r10,r15 - mov r13,rcx - add r14,r10 - shrd r13,r13,23 - mov r10,r14 - mov r12,rdx - shrd r14,r14,5 - xor r13,rcx - xor r12,r8 - shrd r13,r13,4 - xor r14,r10 - and r12,rcx - xor r13,rcx - add r9,QWORD[16+rsp] - mov r15,r10 - xor r12,r8 - shrd r14,r14,6 - xor r15,r11 - add r9,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,r10 - add r9,r13 - xor rdi,r11 - shrd r14,r14,28 - add rbx,r9 - add r9,rdi - mov r13,rbx - add r14,r9 - shrd r13,r13,23 - mov r9,r14 - mov r12,rcx - shrd r14,r14,5 - xor r13,rbx - xor r12,rdx - shrd r13,r13,4 - xor r14,r9 - and r12,rbx - xor r13,rbx - add r8,QWORD[24+rsp] - mov rdi,r9 - xor r12,rdx - shrd r14,r14,6 - xor rdi,r10 - add r8,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,r9 - add r8,r13 - xor r15,r10 - shrd r14,r14,28 - add rax,r8 - add r8,r15 - mov r13,rax - add r14,r8 - shrd r13,r13,23 - mov r8,r14 - mov r12,rbx - shrd r14,r14,5 - xor r13,rax - xor r12,rcx - shrd r13,r13,4 - xor r14,r8 - and r12,rax - xor r13,rax - add rdx,QWORD[32+rsp] - mov r15,r8 - xor r12,rcx - shrd r14,r14,6 - xor r15,r9 - add rdx,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,r8 - add rdx,r13 - xor rdi,r9 - shrd r14,r14,28 - add r11,rdx - add rdx,rdi - mov r13,r11 - add r14,rdx - shrd r13,r13,23 - mov rdx,r14 - mov r12,rax - shrd r14,r14,5 - xor r13,r11 - xor r12,rbx - shrd r13,r13,4 - xor r14,rdx - and r12,r11 - xor r13,r11 - add rcx,QWORD[40+rsp] - mov rdi,rdx - xor r12,rbx - shrd r14,r14,6 - xor rdi,r8 - add rcx,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,rdx - add rcx,r13 - xor r15,r8 - shrd r14,r14,28 - add r10,rcx - add rcx,r15 - mov r13,r10 - add r14,rcx - shrd r13,r13,23 - mov rcx,r14 - mov r12,r11 - shrd r14,r14,5 - xor r13,r10 - xor r12,rax - shrd r13,r13,4 - xor r14,rcx - and r12,r10 - xor r13,r10 - add rbx,QWORD[48+rsp] - mov r15,rcx - xor r12,rax - shrd r14,r14,6 - xor r15,rdx - add rbx,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,rcx - add rbx,r13 - xor rdi,rdx - shrd r14,r14,28 - add r9,rbx - add rbx,rdi - mov r13,r9 - add r14,rbx - shrd r13,r13,23 - mov rbx,r14 - mov r12,r10 - shrd r14,r14,5 - xor r13,r9 - xor r12,r11 - shrd r13,r13,4 - xor r14,rbx - and r12,r9 - xor r13,r9 - add rax,QWORD[56+rsp] - mov rdi,rbx - xor r12,r11 - shrd r14,r14,6 - xor rdi,rcx - add rax,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,rbx - add rax,r13 - xor r15,rcx - shrd r14,r14,28 - add r8,rax - add rax,r15 - mov r13,r8 - add r14,rax - shrd r13,r13,23 - mov rax,r14 - mov r12,r9 - shrd r14,r14,5 - xor r13,r8 - xor r12,r10 - shrd r13,r13,4 - xor r14,rax - and r12,r8 - xor r13,r8 - add r11,QWORD[64+rsp] - mov r15,rax - xor r12,r10 - shrd r14,r14,6 - xor r15,rbx - add r11,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,rax - add r11,r13 - xor rdi,rbx - shrd r14,r14,28 - add rdx,r11 - add r11,rdi - mov r13,rdx - add r14,r11 - shrd r13,r13,23 - mov r11,r14 - mov r12,r8 - shrd r14,r14,5 - xor r13,rdx - xor r12,r9 - shrd r13,r13,4 - xor r14,r11 - and r12,rdx - xor r13,rdx - add r10,QWORD[72+rsp] - mov rdi,r11 - xor r12,r9 - shrd r14,r14,6 - xor rdi,rax - add r10,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,r11 - add r10,r13 - xor r15,rax - shrd r14,r14,28 - add rcx,r10 - add r10,r15 - mov r13,rcx - add r14,r10 - shrd r13,r13,23 - mov r10,r14 - mov r12,rdx - shrd r14,r14,5 - xor r13,rcx - xor r12,r8 - shrd r13,r13,4 - xor r14,r10 - and r12,rcx - xor r13,rcx - add r9,QWORD[80+rsp] - mov r15,r10 - xor r12,r8 - shrd r14,r14,6 - xor r15,r11 - add r9,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,r10 - add r9,r13 - xor rdi,r11 - shrd r14,r14,28 - add rbx,r9 - add r9,rdi - mov r13,rbx - add r14,r9 - shrd r13,r13,23 - mov r9,r14 - mov r12,rcx - shrd r14,r14,5 - xor r13,rbx - xor r12,rdx - shrd r13,r13,4 - xor r14,r9 - and r12,rbx - xor r13,rbx - add r8,QWORD[88+rsp] - mov rdi,r9 - xor r12,rdx - shrd r14,r14,6 - xor rdi,r10 - add r8,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,r9 - add r8,r13 - xor r15,r10 - shrd r14,r14,28 - add rax,r8 - add r8,r15 - mov r13,rax - add r14,r8 - shrd r13,r13,23 - mov r8,r14 - mov r12,rbx - shrd r14,r14,5 - xor r13,rax - xor r12,rcx - shrd r13,r13,4 - xor r14,r8 - and r12,rax - xor r13,rax - add rdx,QWORD[96+rsp] - mov r15,r8 - xor r12,rcx - shrd r14,r14,6 - xor r15,r9 - add rdx,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,r8 - add rdx,r13 - xor rdi,r9 - shrd r14,r14,28 - add r11,rdx - add rdx,rdi - mov r13,r11 - add r14,rdx - shrd r13,r13,23 - mov rdx,r14 - mov r12,rax - shrd r14,r14,5 - xor r13,r11 - xor r12,rbx - shrd r13,r13,4 - xor r14,rdx - and r12,r11 - xor r13,r11 - add rcx,QWORD[104+rsp] - mov rdi,rdx - xor r12,rbx - shrd r14,r14,6 - xor rdi,r8 - add rcx,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,rdx - add rcx,r13 - xor r15,r8 - shrd r14,r14,28 - add r10,rcx - add rcx,r15 - mov r13,r10 - add r14,rcx - shrd r13,r13,23 - mov rcx,r14 - mov r12,r11 - shrd r14,r14,5 - xor r13,r10 - xor r12,rax - shrd r13,r13,4 - xor r14,rcx - and r12,r10 - xor r13,r10 - add rbx,QWORD[112+rsp] - mov r15,rcx - xor r12,rax - shrd r14,r14,6 - xor r15,rdx - add rbx,r12 - shrd r13,r13,14 - and rdi,r15 - xor r14,rcx - add rbx,r13 - xor rdi,rdx - shrd r14,r14,28 - add r9,rbx - add rbx,rdi - mov r13,r9 - add r14,rbx - shrd r13,r13,23 - mov rbx,r14 - mov r12,r10 - shrd r14,r14,5 - xor r13,r9 - xor r12,r11 - shrd r13,r13,4 - xor r14,rbx - and r12,r9 - xor r13,r9 - add rax,QWORD[120+rsp] - mov rdi,rbx - xor r12,r11 - shrd r14,r14,6 - xor rdi,rcx - add rax,r12 - shrd r13,r13,14 - and r15,rdi - xor r14,rbx - add rax,r13 - xor r15,rcx - shrd r14,r14,28 - add r8,rax - add rax,r15 - mov r13,r8 - add r14,rax - mov rdi,QWORD[((128+0))+rsp] - mov rax,r14 - - add rax,QWORD[rdi] - lea rsi,[128+rsi] - add rbx,QWORD[8+rdi] - add rcx,QWORD[16+rdi] - add rdx,QWORD[24+rdi] - add r8,QWORD[32+rdi] - add r9,QWORD[40+rdi] - add r10,QWORD[48+rdi] - add r11,QWORD[56+rdi] - - cmp rsi,QWORD[((128+16))+rsp] - - mov QWORD[rdi],rax - mov QWORD[8+rdi],rbx - mov QWORD[16+rdi],rcx - mov QWORD[24+rdi],rdx - mov QWORD[32+rdi],r8 - mov QWORD[40+rdi],r9 - mov QWORD[48+rdi],r10 - mov QWORD[56+rdi],r11 - jb NEAR $L$loop_avx - - mov rsi,QWORD[152+rsp] - - vzeroupper - movaps xmm6,XMMWORD[((128+32))+rsp] - movaps xmm7,XMMWORD[((128+48))+rsp] - movaps xmm8,XMMWORD[((128+64))+rsp] - movaps xmm9,XMMWORD[((128+80))+rsp] - movaps xmm10,XMMWORD[((128+96))+rsp] - movaps xmm11,XMMWORD[((128+112))+rsp] - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$epilogue_avx: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_sha512_block_data_order_avx: -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$in_prologue - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$in_prologue - mov rsi,rax - mov rax,QWORD[((128+24))+rax] - - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - - lea r10,[$L$epilogue] - cmp rbx,r10 - jb NEAR $L$in_prologue - - lea rsi,[((128+32))+rsi] - lea rdi,[512+r8] - mov ecx,12 - DD 0xa548f3fc - -$L$in_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_sha512_block_data_order wrt ..imagebase - DD $L$SEH_end_sha512_block_data_order wrt ..imagebase - DD $L$SEH_info_sha512_block_data_order wrt ..imagebase - DD $L$SEH_begin_sha512_block_data_order_avx wrt ..imagebase - DD $L$SEH_end_sha512_block_data_order_avx wrt ..imagebase - DD $L$SEH_info_sha512_block_data_order_avx wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_sha512_block_data_order: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$prologue wrt ..imagebase,$L$epilogue wrt ..imagebase -$L$SEH_info_sha512_block_data_order_avx: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$prologue_avx wrt ..imagebase,$L$epilogue_avx wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm deleted file mode 100644 index ccfc870a66..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm +++ /dev/null @@ -1,1472 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - - - - - - - - - - - - - - - - -ALIGN 16 -_vpaes_encrypt_core: - - mov r9,rdx - mov r11,16 - mov eax,DWORD[240+rdx] - movdqa xmm1,xmm9 - movdqa xmm2,XMMWORD[$L$k_ipt] - pandn xmm1,xmm0 - movdqu xmm5,XMMWORD[r9] - psrld xmm1,4 - pand xmm0,xmm9 -DB 102,15,56,0,208 - movdqa xmm0,XMMWORD[(($L$k_ipt+16))] -DB 102,15,56,0,193 - pxor xmm2,xmm5 - add r9,16 - pxor xmm0,xmm2 - lea r10,[$L$k_mc_backward] - jmp NEAR $L$enc_entry - -ALIGN 16 -$L$enc_loop: - - movdqa xmm4,xmm13 - movdqa xmm0,xmm12 -DB 102,15,56,0,226 -DB 102,15,56,0,195 - pxor xmm4,xmm5 - movdqa xmm5,xmm15 - pxor xmm0,xmm4 - movdqa xmm1,XMMWORD[((-64))+r10*1+r11] -DB 102,15,56,0,234 - movdqa xmm4,XMMWORD[r10*1+r11] - movdqa xmm2,xmm14 -DB 102,15,56,0,211 - movdqa xmm3,xmm0 - pxor xmm2,xmm5 -DB 102,15,56,0,193 - add r9,16 - pxor xmm0,xmm2 -DB 102,15,56,0,220 - add r11,16 - pxor xmm3,xmm0 -DB 102,15,56,0,193 - and r11,0x30 - sub rax,1 - pxor xmm0,xmm3 - -$L$enc_entry: - - movdqa xmm1,xmm9 - movdqa xmm5,xmm11 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm9 -DB 102,15,56,0,232 - movdqa xmm3,xmm10 - pxor xmm0,xmm1 -DB 102,15,56,0,217 - movdqa xmm4,xmm10 - pxor xmm3,xmm5 -DB 102,15,56,0,224 - movdqa xmm2,xmm10 - pxor xmm4,xmm5 -DB 102,15,56,0,211 - movdqa xmm3,xmm10 - pxor xmm2,xmm0 -DB 102,15,56,0,220 - movdqu xmm5,XMMWORD[r9] - pxor xmm3,xmm1 - jnz NEAR $L$enc_loop - - - movdqa xmm4,XMMWORD[((-96))+r10] - movdqa xmm0,XMMWORD[((-80))+r10] -DB 102,15,56,0,226 - pxor xmm4,xmm5 -DB 102,15,56,0,195 - movdqa xmm1,XMMWORD[64+r10*1+r11] - pxor xmm0,xmm4 -DB 102,15,56,0,193 - DB 0F3h,0C3h ;repret - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -ALIGN 16 -_vpaes_encrypt_core_2x: - - mov r9,rdx - mov r11,16 - mov eax,DWORD[240+rdx] - movdqa xmm1,xmm9 - movdqa xmm7,xmm9 - movdqa xmm2,XMMWORD[$L$k_ipt] - movdqa xmm8,xmm2 - pandn xmm1,xmm0 - pandn xmm7,xmm6 - movdqu xmm5,XMMWORD[r9] - - psrld xmm1,4 - psrld xmm7,4 - pand xmm0,xmm9 - pand xmm6,xmm9 -DB 102,15,56,0,208 -DB 102,68,15,56,0,198 - movdqa xmm0,XMMWORD[(($L$k_ipt+16))] - movdqa xmm6,xmm0 -DB 102,15,56,0,193 -DB 102,15,56,0,247 - pxor xmm2,xmm5 - pxor xmm8,xmm5 - add r9,16 - pxor xmm0,xmm2 - pxor xmm6,xmm8 - lea r10,[$L$k_mc_backward] - jmp NEAR $L$enc2x_entry - -ALIGN 16 -$L$enc2x_loop: - - movdqa xmm4,XMMWORD[$L$k_sb1] - movdqa xmm0,XMMWORD[(($L$k_sb1+16))] - movdqa xmm12,xmm4 - movdqa xmm6,xmm0 -DB 102,15,56,0,226 -DB 102,69,15,56,0,224 -DB 102,15,56,0,195 -DB 102,65,15,56,0,243 - pxor xmm4,xmm5 - pxor xmm12,xmm5 - movdqa xmm5,XMMWORD[$L$k_sb2] - movdqa xmm13,xmm5 - pxor xmm0,xmm4 - pxor xmm6,xmm12 - movdqa xmm1,XMMWORD[((-64))+r10*1+r11] - -DB 102,15,56,0,234 -DB 102,69,15,56,0,232 - movdqa xmm4,XMMWORD[r10*1+r11] - - movdqa xmm2,XMMWORD[(($L$k_sb2+16))] - movdqa xmm8,xmm2 -DB 102,15,56,0,211 -DB 102,69,15,56,0,195 - movdqa xmm3,xmm0 - movdqa xmm11,xmm6 - pxor xmm2,xmm5 - pxor xmm8,xmm13 -DB 102,15,56,0,193 -DB 102,15,56,0,241 - add r9,16 - pxor xmm0,xmm2 - pxor xmm6,xmm8 -DB 102,15,56,0,220 -DB 102,68,15,56,0,220 - add r11,16 - pxor xmm3,xmm0 - pxor xmm11,xmm6 -DB 102,15,56,0,193 -DB 102,15,56,0,241 - and r11,0x30 - sub rax,1 - pxor xmm0,xmm3 - pxor xmm6,xmm11 - -$L$enc2x_entry: - - movdqa xmm1,xmm9 - movdqa xmm7,xmm9 - movdqa xmm5,XMMWORD[(($L$k_inv+16))] - movdqa xmm13,xmm5 - pandn xmm1,xmm0 - pandn xmm7,xmm6 - psrld xmm1,4 - psrld xmm7,4 - pand xmm0,xmm9 - pand xmm6,xmm9 -DB 102,15,56,0,232 -DB 102,68,15,56,0,238 - movdqa xmm3,xmm10 - movdqa xmm11,xmm10 - pxor xmm0,xmm1 - pxor xmm6,xmm7 -DB 102,15,56,0,217 -DB 102,68,15,56,0,223 - movdqa xmm4,xmm10 - movdqa xmm12,xmm10 - pxor xmm3,xmm5 - pxor xmm11,xmm13 -DB 102,15,56,0,224 -DB 102,68,15,56,0,230 - movdqa xmm2,xmm10 - movdqa xmm8,xmm10 - pxor xmm4,xmm5 - pxor xmm12,xmm13 -DB 102,15,56,0,211 -DB 102,69,15,56,0,195 - movdqa xmm3,xmm10 - movdqa xmm11,xmm10 - pxor xmm2,xmm0 - pxor xmm8,xmm6 -DB 102,15,56,0,220 -DB 102,69,15,56,0,220 - movdqu xmm5,XMMWORD[r9] - - pxor xmm3,xmm1 - pxor xmm11,xmm7 - jnz NEAR $L$enc2x_loop - - - movdqa xmm4,XMMWORD[((-96))+r10] - movdqa xmm0,XMMWORD[((-80))+r10] - movdqa xmm12,xmm4 - movdqa xmm6,xmm0 -DB 102,15,56,0,226 -DB 102,69,15,56,0,224 - pxor xmm4,xmm5 - pxor xmm12,xmm5 -DB 102,15,56,0,195 -DB 102,65,15,56,0,243 - movdqa xmm1,XMMWORD[64+r10*1+r11] - - pxor xmm0,xmm4 - pxor xmm6,xmm12 -DB 102,15,56,0,193 -DB 102,15,56,0,241 - DB 0F3h,0C3h ;repret - - - - - - - - - -ALIGN 16 -_vpaes_decrypt_core: - - mov r9,rdx - mov eax,DWORD[240+rdx] - movdqa xmm1,xmm9 - movdqa xmm2,XMMWORD[$L$k_dipt] - pandn xmm1,xmm0 - mov r11,rax - psrld xmm1,4 - movdqu xmm5,XMMWORD[r9] - shl r11,4 - pand xmm0,xmm9 -DB 102,15,56,0,208 - movdqa xmm0,XMMWORD[(($L$k_dipt+16))] - xor r11,0x30 - lea r10,[$L$k_dsbd] -DB 102,15,56,0,193 - and r11,0x30 - pxor xmm2,xmm5 - movdqa xmm5,XMMWORD[(($L$k_mc_forward+48))] - pxor xmm0,xmm2 - add r9,16 - add r11,r10 - jmp NEAR $L$dec_entry - -ALIGN 16 -$L$dec_loop: - - - - movdqa xmm4,XMMWORD[((-32))+r10] - movdqa xmm1,XMMWORD[((-16))+r10] -DB 102,15,56,0,226 -DB 102,15,56,0,203 - pxor xmm0,xmm4 - movdqa xmm4,XMMWORD[r10] - pxor xmm0,xmm1 - movdqa xmm1,XMMWORD[16+r10] - -DB 102,15,56,0,226 -DB 102,15,56,0,197 -DB 102,15,56,0,203 - pxor xmm0,xmm4 - movdqa xmm4,XMMWORD[32+r10] - pxor xmm0,xmm1 - movdqa xmm1,XMMWORD[48+r10] - -DB 102,15,56,0,226 -DB 102,15,56,0,197 -DB 102,15,56,0,203 - pxor xmm0,xmm4 - movdqa xmm4,XMMWORD[64+r10] - pxor xmm0,xmm1 - movdqa xmm1,XMMWORD[80+r10] - -DB 102,15,56,0,226 -DB 102,15,56,0,197 -DB 102,15,56,0,203 - pxor xmm0,xmm4 - add r9,16 -DB 102,15,58,15,237,12 - pxor xmm0,xmm1 - sub rax,1 - -$L$dec_entry: - - movdqa xmm1,xmm9 - pandn xmm1,xmm0 - movdqa xmm2,xmm11 - psrld xmm1,4 - pand xmm0,xmm9 -DB 102,15,56,0,208 - movdqa xmm3,xmm10 - pxor xmm0,xmm1 -DB 102,15,56,0,217 - movdqa xmm4,xmm10 - pxor xmm3,xmm2 -DB 102,15,56,0,224 - pxor xmm4,xmm2 - movdqa xmm2,xmm10 -DB 102,15,56,0,211 - movdqa xmm3,xmm10 - pxor xmm2,xmm0 -DB 102,15,56,0,220 - movdqu xmm0,XMMWORD[r9] - pxor xmm3,xmm1 - jnz NEAR $L$dec_loop - - - movdqa xmm4,XMMWORD[96+r10] -DB 102,15,56,0,226 - pxor xmm4,xmm0 - movdqa xmm0,XMMWORD[112+r10] - movdqa xmm2,XMMWORD[((-352))+r11] -DB 102,15,56,0,195 - pxor xmm0,xmm4 -DB 102,15,56,0,194 - DB 0F3h,0C3h ;repret - - - - - - - - - -ALIGN 16 -_vpaes_schedule_core: - - - - - - - call _vpaes_preheat - movdqa xmm8,XMMWORD[$L$k_rcon] - movdqu xmm0,XMMWORD[rdi] - - - movdqa xmm3,xmm0 - lea r11,[$L$k_ipt] - call _vpaes_schedule_transform - movdqa xmm7,xmm0 - - lea r10,[$L$k_sr] - test rcx,rcx - jnz NEAR $L$schedule_am_decrypting - - - movdqu XMMWORD[rdx],xmm0 - jmp NEAR $L$schedule_go - -$L$schedule_am_decrypting: - - movdqa xmm1,XMMWORD[r10*1+r8] -DB 102,15,56,0,217 - movdqu XMMWORD[rdx],xmm3 - xor r8,0x30 - -$L$schedule_go: - cmp esi,192 - ja NEAR $L$schedule_256 - je NEAR $L$schedule_192 - - - - - - - - - - -$L$schedule_128: - mov esi,10 - -$L$oop_schedule_128: - call _vpaes_schedule_round - dec rsi - jz NEAR $L$schedule_mangle_last - call _vpaes_schedule_mangle - jmp NEAR $L$oop_schedule_128 - - - - - - - - - - - - - - - - -ALIGN 16 -$L$schedule_192: - movdqu xmm0,XMMWORD[8+rdi] - call _vpaes_schedule_transform - movdqa xmm6,xmm0 - pxor xmm4,xmm4 - movhlps xmm6,xmm4 - mov esi,4 - -$L$oop_schedule_192: - call _vpaes_schedule_round -DB 102,15,58,15,198,8 - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - call _vpaes_schedule_mangle - call _vpaes_schedule_round - dec rsi - jz NEAR $L$schedule_mangle_last - call _vpaes_schedule_mangle - call _vpaes_schedule_192_smear - jmp NEAR $L$oop_schedule_192 - - - - - - - - - - - -ALIGN 16 -$L$schedule_256: - movdqu xmm0,XMMWORD[16+rdi] - call _vpaes_schedule_transform - mov esi,7 - -$L$oop_schedule_256: - call _vpaes_schedule_mangle - movdqa xmm6,xmm0 - - - call _vpaes_schedule_round - dec rsi - jz NEAR $L$schedule_mangle_last - call _vpaes_schedule_mangle - - - pshufd xmm0,xmm0,0xFF - movdqa xmm5,xmm7 - movdqa xmm7,xmm6 - call _vpaes_schedule_low_round - movdqa xmm7,xmm5 - - jmp NEAR $L$oop_schedule_256 - - - - - - - - - - - - -ALIGN 16 -$L$schedule_mangle_last: - - lea r11,[$L$k_deskew] - test rcx,rcx - jnz NEAR $L$schedule_mangle_last_dec - - - movdqa xmm1,XMMWORD[r10*1+r8] -DB 102,15,56,0,193 - lea r11,[$L$k_opt] - add rdx,32 - -$L$schedule_mangle_last_dec: - add rdx,-16 - pxor xmm0,XMMWORD[$L$k_s63] - call _vpaes_schedule_transform - movdqu XMMWORD[rdx],xmm0 - - - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - pxor xmm6,xmm6 - pxor xmm7,xmm7 - DB 0F3h,0C3h ;repret - - - - - - - - - - - - - - - - - - -ALIGN 16 -_vpaes_schedule_192_smear: - - pshufd xmm1,xmm6,0x80 - pshufd xmm0,xmm7,0xFE - pxor xmm6,xmm1 - pxor xmm1,xmm1 - pxor xmm6,xmm0 - movdqa xmm0,xmm6 - movhlps xmm6,xmm1 - DB 0F3h,0C3h ;repret - - - - - - - - - - - - - - - - - - - - - - -ALIGN 16 -_vpaes_schedule_round: - - - pxor xmm1,xmm1 -DB 102,65,15,58,15,200,15 -DB 102,69,15,58,15,192,15 - pxor xmm7,xmm1 - - - pshufd xmm0,xmm0,0xFF -DB 102,15,58,15,192,1 - - - - -_vpaes_schedule_low_round: - - movdqa xmm1,xmm7 - pslldq xmm7,4 - pxor xmm7,xmm1 - movdqa xmm1,xmm7 - pslldq xmm7,8 - pxor xmm7,xmm1 - pxor xmm7,XMMWORD[$L$k_s63] - - - movdqa xmm1,xmm9 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm9 - movdqa xmm2,xmm11 -DB 102,15,56,0,208 - pxor xmm0,xmm1 - movdqa xmm3,xmm10 -DB 102,15,56,0,217 - pxor xmm3,xmm2 - movdqa xmm4,xmm10 -DB 102,15,56,0,224 - pxor xmm4,xmm2 - movdqa xmm2,xmm10 -DB 102,15,56,0,211 - pxor xmm2,xmm0 - movdqa xmm3,xmm10 -DB 102,15,56,0,220 - pxor xmm3,xmm1 - movdqa xmm4,xmm13 -DB 102,15,56,0,226 - movdqa xmm0,xmm12 -DB 102,15,56,0,195 - pxor xmm0,xmm4 - - - pxor xmm0,xmm7 - movdqa xmm7,xmm0 - DB 0F3h,0C3h ;repret - - - - - - - - - - - - - -ALIGN 16 -_vpaes_schedule_transform: - - movdqa xmm1,xmm9 - pandn xmm1,xmm0 - psrld xmm1,4 - pand xmm0,xmm9 - movdqa xmm2,XMMWORD[r11] -DB 102,15,56,0,208 - movdqa xmm0,XMMWORD[16+r11] -DB 102,15,56,0,193 - pxor xmm0,xmm2 - DB 0F3h,0C3h ;repret - - - - - - - - - - - - - - - - - - - - - - - - - - - -ALIGN 16 -_vpaes_schedule_mangle: - - movdqa xmm4,xmm0 - movdqa xmm5,XMMWORD[$L$k_mc_forward] - test rcx,rcx - jnz NEAR $L$schedule_mangle_dec - - - add rdx,16 - pxor xmm4,XMMWORD[$L$k_s63] -DB 102,15,56,0,229 - movdqa xmm3,xmm4 -DB 102,15,56,0,229 - pxor xmm3,xmm4 -DB 102,15,56,0,229 - pxor xmm3,xmm4 - - jmp NEAR $L$schedule_mangle_both -ALIGN 16 -$L$schedule_mangle_dec: - - lea r11,[$L$k_dksd] - movdqa xmm1,xmm9 - pandn xmm1,xmm4 - psrld xmm1,4 - pand xmm4,xmm9 - - movdqa xmm2,XMMWORD[r11] -DB 102,15,56,0,212 - movdqa xmm3,XMMWORD[16+r11] -DB 102,15,56,0,217 - pxor xmm3,xmm2 -DB 102,15,56,0,221 - - movdqa xmm2,XMMWORD[32+r11] -DB 102,15,56,0,212 - pxor xmm2,xmm3 - movdqa xmm3,XMMWORD[48+r11] -DB 102,15,56,0,217 - pxor xmm3,xmm2 -DB 102,15,56,0,221 - - movdqa xmm2,XMMWORD[64+r11] -DB 102,15,56,0,212 - pxor xmm2,xmm3 - movdqa xmm3,XMMWORD[80+r11] -DB 102,15,56,0,217 - pxor xmm3,xmm2 -DB 102,15,56,0,221 - - movdqa xmm2,XMMWORD[96+r11] -DB 102,15,56,0,212 - pxor xmm2,xmm3 - movdqa xmm3,XMMWORD[112+r11] -DB 102,15,56,0,217 - pxor xmm3,xmm2 - - add rdx,-16 - -$L$schedule_mangle_both: - movdqa xmm1,XMMWORD[r10*1+r8] -DB 102,15,56,0,217 - add r8,-16 - and r8,0x30 - movdqu XMMWORD[rdx],xmm3 - DB 0F3h,0C3h ;repret - - - - - - -global vpaes_set_encrypt_key - -ALIGN 16 -vpaes_set_encrypt_key: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_vpaes_set_encrypt_key: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -%ifdef BORINGSSL_DISPATCH_TEST -EXTERN BORINGSSL_function_hit - mov BYTE[((BORINGSSL_function_hit+5))],1 -%endif - - lea rsp,[((-184))+rsp] - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$enc_key_body: - mov eax,esi - shr eax,5 - add eax,5 - mov DWORD[240+rdx],eax - - mov ecx,0 - mov r8d,0x30 - call _vpaes_schedule_core - movaps xmm6,XMMWORD[16+rsp] - movaps xmm7,XMMWORD[32+rsp] - movaps xmm8,XMMWORD[48+rsp] - movaps xmm9,XMMWORD[64+rsp] - movaps xmm10,XMMWORD[80+rsp] - movaps xmm11,XMMWORD[96+rsp] - movaps xmm12,XMMWORD[112+rsp] - movaps xmm13,XMMWORD[128+rsp] - movaps xmm14,XMMWORD[144+rsp] - movaps xmm15,XMMWORD[160+rsp] - lea rsp,[184+rsp] -$L$enc_key_epilogue: - xor eax,eax - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_vpaes_set_encrypt_key: - -global vpaes_set_decrypt_key - -ALIGN 16 -vpaes_set_decrypt_key: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_vpaes_set_decrypt_key: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rsp,[((-184))+rsp] - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$dec_key_body: - mov eax,esi - shr eax,5 - add eax,5 - mov DWORD[240+rdx],eax - shl eax,4 - lea rdx,[16+rax*1+rdx] - - mov ecx,1 - mov r8d,esi - shr r8d,1 - and r8d,32 - xor r8d,32 - call _vpaes_schedule_core - movaps xmm6,XMMWORD[16+rsp] - movaps xmm7,XMMWORD[32+rsp] - movaps xmm8,XMMWORD[48+rsp] - movaps xmm9,XMMWORD[64+rsp] - movaps xmm10,XMMWORD[80+rsp] - movaps xmm11,XMMWORD[96+rsp] - movaps xmm12,XMMWORD[112+rsp] - movaps xmm13,XMMWORD[128+rsp] - movaps xmm14,XMMWORD[144+rsp] - movaps xmm15,XMMWORD[160+rsp] - lea rsp,[184+rsp] -$L$dec_key_epilogue: - xor eax,eax - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_vpaes_set_decrypt_key: - -global vpaes_encrypt - -ALIGN 16 -vpaes_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_vpaes_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - -%ifdef BORINGSSL_DISPATCH_TEST -EXTERN BORINGSSL_function_hit - mov BYTE[((BORINGSSL_function_hit+4))],1 -%endif - lea rsp,[((-184))+rsp] - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$enc_body: - movdqu xmm0,XMMWORD[rdi] - call _vpaes_preheat - call _vpaes_encrypt_core - movdqu XMMWORD[rsi],xmm0 - movaps xmm6,XMMWORD[16+rsp] - movaps xmm7,XMMWORD[32+rsp] - movaps xmm8,XMMWORD[48+rsp] - movaps xmm9,XMMWORD[64+rsp] - movaps xmm10,XMMWORD[80+rsp] - movaps xmm11,XMMWORD[96+rsp] - movaps xmm12,XMMWORD[112+rsp] - movaps xmm13,XMMWORD[128+rsp] - movaps xmm14,XMMWORD[144+rsp] - movaps xmm15,XMMWORD[160+rsp] - lea rsp,[184+rsp] -$L$enc_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_vpaes_encrypt: - -global vpaes_decrypt - -ALIGN 16 -vpaes_decrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_vpaes_decrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - lea rsp,[((-184))+rsp] - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$dec_body: - movdqu xmm0,XMMWORD[rdi] - call _vpaes_preheat - call _vpaes_decrypt_core - movdqu XMMWORD[rsi],xmm0 - movaps xmm6,XMMWORD[16+rsp] - movaps xmm7,XMMWORD[32+rsp] - movaps xmm8,XMMWORD[48+rsp] - movaps xmm9,XMMWORD[64+rsp] - movaps xmm10,XMMWORD[80+rsp] - movaps xmm11,XMMWORD[96+rsp] - movaps xmm12,XMMWORD[112+rsp] - movaps xmm13,XMMWORD[128+rsp] - movaps xmm14,XMMWORD[144+rsp] - movaps xmm15,XMMWORD[160+rsp] - lea rsp,[184+rsp] -$L$dec_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_vpaes_decrypt: -global vpaes_cbc_encrypt - -ALIGN 16 -vpaes_cbc_encrypt: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_vpaes_cbc_encrypt: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - xchg rdx,rcx - sub rcx,16 - jc NEAR $L$cbc_abort - lea rsp,[((-184))+rsp] - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$cbc_body: - movdqu xmm6,XMMWORD[r8] - sub rsi,rdi - call _vpaes_preheat - cmp r9d,0 - je NEAR $L$cbc_dec_loop - jmp NEAR $L$cbc_enc_loop -ALIGN 16 -$L$cbc_enc_loop: - movdqu xmm0,XMMWORD[rdi] - pxor xmm0,xmm6 - call _vpaes_encrypt_core - movdqa xmm6,xmm0 - movdqu XMMWORD[rdi*1+rsi],xmm0 - lea rdi,[16+rdi] - sub rcx,16 - jnc NEAR $L$cbc_enc_loop - jmp NEAR $L$cbc_done -ALIGN 16 -$L$cbc_dec_loop: - movdqu xmm0,XMMWORD[rdi] - movdqa xmm7,xmm0 - call _vpaes_decrypt_core - pxor xmm0,xmm6 - movdqa xmm6,xmm7 - movdqu XMMWORD[rdi*1+rsi],xmm0 - lea rdi,[16+rdi] - sub rcx,16 - jnc NEAR $L$cbc_dec_loop -$L$cbc_done: - movdqu XMMWORD[r8],xmm6 - movaps xmm6,XMMWORD[16+rsp] - movaps xmm7,XMMWORD[32+rsp] - movaps xmm8,XMMWORD[48+rsp] - movaps xmm9,XMMWORD[64+rsp] - movaps xmm10,XMMWORD[80+rsp] - movaps xmm11,XMMWORD[96+rsp] - movaps xmm12,XMMWORD[112+rsp] - movaps xmm13,XMMWORD[128+rsp] - movaps xmm14,XMMWORD[144+rsp] - movaps xmm15,XMMWORD[160+rsp] - lea rsp,[184+rsp] -$L$cbc_epilogue: -$L$cbc_abort: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_vpaes_cbc_encrypt: -global vpaes_ctr32_encrypt_blocks - -ALIGN 16 -vpaes_ctr32_encrypt_blocks: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_vpaes_ctr32_encrypt_blocks: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - - - - - xchg rdx,rcx - test rcx,rcx - jz NEAR $L$ctr32_abort - lea rsp,[((-184))+rsp] - movaps XMMWORD[16+rsp],xmm6 - movaps XMMWORD[32+rsp],xmm7 - movaps XMMWORD[48+rsp],xmm8 - movaps XMMWORD[64+rsp],xmm9 - movaps XMMWORD[80+rsp],xmm10 - movaps XMMWORD[96+rsp],xmm11 - movaps XMMWORD[112+rsp],xmm12 - movaps XMMWORD[128+rsp],xmm13 - movaps XMMWORD[144+rsp],xmm14 - movaps XMMWORD[160+rsp],xmm15 -$L$ctr32_body: - movdqu xmm0,XMMWORD[r8] - movdqa xmm8,XMMWORD[$L$ctr_add_one] - sub rsi,rdi - call _vpaes_preheat - movdqa xmm6,xmm0 - pshufb xmm6,XMMWORD[$L$rev_ctr] - - test rcx,1 - jz NEAR $L$ctr32_prep_loop - - - - movdqu xmm7,XMMWORD[rdi] - call _vpaes_encrypt_core - pxor xmm0,xmm7 - paddd xmm6,xmm8 - movdqu XMMWORD[rdi*1+rsi],xmm0 - sub rcx,1 - lea rdi,[16+rdi] - jz NEAR $L$ctr32_done - -$L$ctr32_prep_loop: - - - movdqa xmm14,xmm6 - movdqa xmm15,xmm6 - paddd xmm15,xmm8 - -$L$ctr32_loop: - movdqa xmm1,XMMWORD[$L$rev_ctr] - movdqa xmm0,xmm14 - movdqa xmm6,xmm15 -DB 102,15,56,0,193 -DB 102,15,56,0,241 - call _vpaes_encrypt_core_2x - movdqu xmm1,XMMWORD[rdi] - movdqu xmm2,XMMWORD[16+rdi] - movdqa xmm3,XMMWORD[$L$ctr_add_two] - pxor xmm0,xmm1 - pxor xmm6,xmm2 - paddd xmm14,xmm3 - paddd xmm15,xmm3 - movdqu XMMWORD[rdi*1+rsi],xmm0 - movdqu XMMWORD[16+rdi*1+rsi],xmm6 - sub rcx,2 - lea rdi,[32+rdi] - jnz NEAR $L$ctr32_loop - -$L$ctr32_done: - movaps xmm6,XMMWORD[16+rsp] - movaps xmm7,XMMWORD[32+rsp] - movaps xmm8,XMMWORD[48+rsp] - movaps xmm9,XMMWORD[64+rsp] - movaps xmm10,XMMWORD[80+rsp] - movaps xmm11,XMMWORD[96+rsp] - movaps xmm12,XMMWORD[112+rsp] - movaps xmm13,XMMWORD[128+rsp] - movaps xmm14,XMMWORD[144+rsp] - movaps xmm15,XMMWORD[160+rsp] - lea rsp,[184+rsp] -$L$ctr32_epilogue: -$L$ctr32_abort: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_vpaes_ctr32_encrypt_blocks: - - - - - - - -ALIGN 16 -_vpaes_preheat: - - lea r10,[$L$k_s0F] - movdqa xmm10,XMMWORD[((-32))+r10] - movdqa xmm11,XMMWORD[((-16))+r10] - movdqa xmm9,XMMWORD[r10] - movdqa xmm13,XMMWORD[48+r10] - movdqa xmm12,XMMWORD[64+r10] - movdqa xmm15,XMMWORD[80+r10] - movdqa xmm14,XMMWORD[96+r10] - DB 0F3h,0C3h ;repret - - - - - - - - -ALIGN 64 -_vpaes_consts: -$L$k_inv: - DQ 0x0E05060F0D080180,0x040703090A0B0C02 - DQ 0x01040A060F0B0780,0x030D0E0C02050809 - -$L$k_s0F: - DQ 0x0F0F0F0F0F0F0F0F,0x0F0F0F0F0F0F0F0F - -$L$k_ipt: - DQ 0xC2B2E8985A2A7000,0xCABAE09052227808 - DQ 0x4C01307D317C4D00,0xCD80B1FCB0FDCC81 - -$L$k_sb1: - DQ 0xB19BE18FCB503E00,0xA5DF7A6E142AF544 - DQ 0x3618D415FAE22300,0x3BF7CCC10D2ED9EF -$L$k_sb2: - DQ 0xE27A93C60B712400,0x5EB7E955BC982FCD - DQ 0x69EB88400AE12900,0xC2A163C8AB82234A -$L$k_sbo: - DQ 0xD0D26D176FBDC700,0x15AABF7AC502A878 - DQ 0xCFE474A55FBB6A00,0x8E1E90D1412B35FA - -$L$k_mc_forward: - DQ 0x0407060500030201,0x0C0F0E0D080B0A09 - DQ 0x080B0A0904070605,0x000302010C0F0E0D - DQ 0x0C0F0E0D080B0A09,0x0407060500030201 - DQ 0x000302010C0F0E0D,0x080B0A0904070605 - -$L$k_mc_backward: - DQ 0x0605040702010003,0x0E0D0C0F0A09080B - DQ 0x020100030E0D0C0F,0x0A09080B06050407 - DQ 0x0E0D0C0F0A09080B,0x0605040702010003 - DQ 0x0A09080B06050407,0x020100030E0D0C0F - -$L$k_sr: - DQ 0x0706050403020100,0x0F0E0D0C0B0A0908 - DQ 0x030E09040F0A0500,0x0B06010C07020D08 - DQ 0x0F060D040B020900,0x070E050C030A0108 - DQ 0x0B0E0104070A0D00,0x0306090C0F020508 - -$L$k_rcon: - DQ 0x1F8391B9AF9DEEB6,0x702A98084D7C7D81 - -$L$k_s63: - DQ 0x5B5B5B5B5B5B5B5B,0x5B5B5B5B5B5B5B5B - -$L$k_opt: - DQ 0xFF9F4929D6B66000,0xF7974121DEBE6808 - DQ 0x01EDBD5150BCEC00,0xE10D5DB1B05C0CE0 - -$L$k_deskew: - DQ 0x07E4A34047A4E300,0x1DFEB95A5DBEF91A - DQ 0x5F36B5DC83EA6900,0x2841C2ABF49D1E77 - - - - - -$L$k_dksd: - DQ 0xFEB91A5DA3E44700,0x0740E3A45A1DBEF9 - DQ 0x41C277F4B5368300,0x5FDC69EAAB289D1E -$L$k_dksb: - DQ 0x9A4FCA1F8550D500,0x03D653861CC94C99 - DQ 0x115BEDA7B6FC4A00,0xD993256F7E3482C8 -$L$k_dkse: - DQ 0xD5031CCA1FC9D600,0x53859A4C994F5086 - DQ 0xA23196054FDC7BE8,0xCD5EF96A20B31487 -$L$k_dks9: - DQ 0xB6116FC87ED9A700,0x4AED933482255BFC - DQ 0x4576516227143300,0x8BB89FACE9DAFDCE - - - - - -$L$k_dipt: - DQ 0x0F505B040B545F00,0x154A411E114E451A - DQ 0x86E383E660056500,0x12771772F491F194 - -$L$k_dsb9: - DQ 0x851C03539A86D600,0xCAD51F504F994CC9 - DQ 0xC03B1789ECD74900,0x725E2C9EB2FBA565 -$L$k_dsbd: - DQ 0x7D57CCDFE6B1A200,0xF56E9B13882A4439 - DQ 0x3CE2FAF724C6CB00,0x2931180D15DEEFD3 -$L$k_dsbb: - DQ 0xD022649296B44200,0x602646F6B0F2D404 - DQ 0xC19498A6CD596700,0xF3FF0C3E3255AA6B -$L$k_dsbe: - DQ 0x46F2929626D4D000,0x2242600464B4F6B0 - DQ 0x0C55A6CDFFAAC100,0x9467F36B98593E32 -$L$k_dsbo: - DQ 0x1387EA537EF94000,0xC7AA6DB9D4943E2D - DQ 0x12D7560F93441D00,0xCA4B8159D8C58E9C - - -$L$rev_ctr: - DQ 0x0706050403020100,0x0c0d0e0f0b0a0908 - - -$L$ctr_add_one: - DQ 0x0000000000000000,0x0000000100000000 -$L$ctr_add_two: - DQ 0x0000000000000000,0x0000000200000000 - -DB 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 -DB 111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54 -DB 52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97 -DB 109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32 -DB 85,110,105,118,101,114,115,105,116,121,41,0 -ALIGN 64 - -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -se_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$in_prologue - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$in_prologue - - lea rsi,[16+rax] - lea rdi,[512+r8] - mov ecx,20 - DD 0xa548f3fc - lea rax,[184+rax] - -$L$in_prologue: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_vpaes_set_encrypt_key wrt ..imagebase - DD $L$SEH_end_vpaes_set_encrypt_key wrt ..imagebase - DD $L$SEH_info_vpaes_set_encrypt_key wrt ..imagebase - - DD $L$SEH_begin_vpaes_set_decrypt_key wrt ..imagebase - DD $L$SEH_end_vpaes_set_decrypt_key wrt ..imagebase - DD $L$SEH_info_vpaes_set_decrypt_key wrt ..imagebase - - DD $L$SEH_begin_vpaes_encrypt wrt ..imagebase - DD $L$SEH_end_vpaes_encrypt wrt ..imagebase - DD $L$SEH_info_vpaes_encrypt wrt ..imagebase - - DD $L$SEH_begin_vpaes_decrypt wrt ..imagebase - DD $L$SEH_end_vpaes_decrypt wrt ..imagebase - DD $L$SEH_info_vpaes_decrypt wrt ..imagebase - - DD $L$SEH_begin_vpaes_cbc_encrypt wrt ..imagebase - DD $L$SEH_end_vpaes_cbc_encrypt wrt ..imagebase - DD $L$SEH_info_vpaes_cbc_encrypt wrt ..imagebase - - DD $L$SEH_begin_vpaes_ctr32_encrypt_blocks wrt ..imagebase - DD $L$SEH_end_vpaes_ctr32_encrypt_blocks wrt ..imagebase - DD $L$SEH_info_vpaes_ctr32_encrypt_blocks wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_vpaes_set_encrypt_key: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$enc_key_body wrt ..imagebase,$L$enc_key_epilogue wrt ..imagebase -$L$SEH_info_vpaes_set_decrypt_key: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$dec_key_body wrt ..imagebase,$L$dec_key_epilogue wrt ..imagebase -$L$SEH_info_vpaes_encrypt: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$enc_body wrt ..imagebase,$L$enc_epilogue wrt ..imagebase -$L$SEH_info_vpaes_decrypt: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$dec_body wrt ..imagebase,$L$dec_epilogue wrt ..imagebase -$L$SEH_info_vpaes_cbc_encrypt: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$cbc_body wrt ..imagebase,$L$cbc_epilogue wrt ..imagebase -$L$SEH_info_vpaes_ctr32_encrypt_blocks: -DB 9,0,0,0 - DD se_handler wrt ..imagebase - DD $L$ctr32_body wrt ..imagebase,$L$ctr32_epilogue wrt ..imagebase diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/x86_64-mont.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/x86_64-mont.asm deleted file mode 100644 index d6d8bdd6d4..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/x86_64-mont.asm +++ /dev/null @@ -1,1481 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -EXTERN OPENSSL_ia32cap_P - -global bn_mul_mont - -ALIGN 16 -bn_mul_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_mul_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov r9d,r9d - mov rax,rsp - - test r9d,3 - jnz NEAR $L$mul_enter - cmp r9d,8 - jb NEAR $L$mul_enter - lea r11,[OPENSSL_ia32cap_P] - mov r11d,DWORD[8+r11] - cmp rdx,rsi - jne NEAR $L$mul4x_enter - test r9d,7 - jz NEAR $L$sqr8x_enter - jmp NEAR $L$mul4x_enter - -ALIGN 16 -$L$mul_enter: - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - - neg r9 - mov r11,rsp - lea r10,[((-16))+r9*8+rsp] - neg r9 - and r10,-1024 - - - - - - - - - - sub r11,r10 - and r11,-4096 - lea rsp,[r11*1+r10] - mov r11,QWORD[rsp] - cmp rsp,r10 - ja NEAR $L$mul_page_walk - jmp NEAR $L$mul_page_walk_done - -ALIGN 16 -$L$mul_page_walk: - lea rsp,[((-4096))+rsp] - mov r11,QWORD[rsp] - cmp rsp,r10 - ja NEAR $L$mul_page_walk -$L$mul_page_walk_done: - - mov QWORD[8+r9*8+rsp],rax - -$L$mul_body: - mov r12,rdx - mov r8,QWORD[r8] - mov rbx,QWORD[r12] - mov rax,QWORD[rsi] - - xor r14,r14 - xor r15,r15 - - mov rbp,r8 - mul rbx - mov r10,rax - mov rax,QWORD[rcx] - - imul rbp,r10 - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+rsi] - adc rdx,0 - mov r13,rdx - - lea r15,[1+r15] - jmp NEAR $L$1st_enter - -ALIGN 16 -$L$1st: - add r13,rax - mov rax,QWORD[r15*8+rsi] - adc rdx,0 - add r13,r11 - mov r11,r10 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],r13 - mov r13,rdx - -$L$1st_enter: - mul rbx - add r11,rax - mov rax,QWORD[r15*8+rcx] - adc rdx,0 - lea r15,[1+r15] - mov r10,rdx - - mul rbp - cmp r15,r9 - jne NEAR $L$1st - - add r13,rax - mov rax,QWORD[rsi] - adc rdx,0 - add r13,r11 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],r13 - mov r13,rdx - mov r11,r10 - - xor rdx,rdx - add r13,r11 - adc rdx,0 - mov QWORD[((-8))+r9*8+rsp],r13 - mov QWORD[r9*8+rsp],rdx - - lea r14,[1+r14] - jmp NEAR $L$outer -ALIGN 16 -$L$outer: - mov rbx,QWORD[r14*8+r12] - xor r15,r15 - mov rbp,r8 - mov r10,QWORD[rsp] - mul rbx - add r10,rax - mov rax,QWORD[rcx] - adc rdx,0 - - imul rbp,r10 - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+rsi] - adc rdx,0 - mov r10,QWORD[8+rsp] - mov r13,rdx - - lea r15,[1+r15] - jmp NEAR $L$inner_enter - -ALIGN 16 -$L$inner: - add r13,rax - mov rax,QWORD[r15*8+rsi] - adc rdx,0 - add r13,r10 - mov r10,QWORD[r15*8+rsp] - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],r13 - mov r13,rdx - -$L$inner_enter: - mul rbx - add r11,rax - mov rax,QWORD[r15*8+rcx] - adc rdx,0 - add r10,r11 - mov r11,rdx - adc r11,0 - lea r15,[1+r15] - - mul rbp - cmp r15,r9 - jne NEAR $L$inner - - add r13,rax - mov rax,QWORD[rsi] - adc rdx,0 - add r13,r10 - mov r10,QWORD[r15*8+rsp] - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],r13 - mov r13,rdx - - xor rdx,rdx - add r13,r11 - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-8))+r9*8+rsp],r13 - mov QWORD[r9*8+rsp],rdx - - lea r14,[1+r14] - cmp r14,r9 - jb NEAR $L$outer - - xor r14,r14 - mov rax,QWORD[rsp] - mov r15,r9 - -ALIGN 16 -$L$sub: sbb rax,QWORD[r14*8+rcx] - mov QWORD[r14*8+rdi],rax - mov rax,QWORD[8+r14*8+rsp] - lea r14,[1+r14] - dec r15 - jnz NEAR $L$sub - - sbb rax,0 - mov rbx,-1 - xor rbx,rax - xor r14,r14 - mov r15,r9 - -$L$copy: - mov rcx,QWORD[r14*8+rdi] - mov rdx,QWORD[r14*8+rsp] - and rcx,rbx - and rdx,rax - mov QWORD[r14*8+rsp],r9 - or rdx,rcx - mov QWORD[r14*8+rdi],rdx - lea r14,[1+r14] - sub r15,1 - jnz NEAR $L$copy - - mov rsi,QWORD[8+r9*8+rsp] - - mov rax,1 - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$mul_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_mul_mont: - -ALIGN 16 -bn_mul4x_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_mul4x_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov r9d,r9d - mov rax,rsp - -$L$mul4x_enter: - and r11d,0x80100 - cmp r11d,0x80100 - je NEAR $L$mulx4x_enter - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - - neg r9 - mov r11,rsp - lea r10,[((-32))+r9*8+rsp] - neg r9 - and r10,-1024 - - sub r11,r10 - and r11,-4096 - lea rsp,[r11*1+r10] - mov r11,QWORD[rsp] - cmp rsp,r10 - ja NEAR $L$mul4x_page_walk - jmp NEAR $L$mul4x_page_walk_done - -$L$mul4x_page_walk: - lea rsp,[((-4096))+rsp] - mov r11,QWORD[rsp] - cmp rsp,r10 - ja NEAR $L$mul4x_page_walk -$L$mul4x_page_walk_done: - - mov QWORD[8+r9*8+rsp],rax - -$L$mul4x_body: - mov QWORD[16+r9*8+rsp],rdi - mov r12,rdx - mov r8,QWORD[r8] - mov rbx,QWORD[r12] - mov rax,QWORD[rsi] - - xor r14,r14 - xor r15,r15 - - mov rbp,r8 - mul rbx - mov r10,rax - mov rax,QWORD[rcx] - - imul rbp,r10 - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+rsi] - adc rdx,0 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[16+rsi] - adc rdx,0 - add rdi,r11 - lea r15,[4+r15] - adc rdx,0 - mov QWORD[rsp],rdi - mov r13,rdx - jmp NEAR $L$1st4x -ALIGN 16 -$L$1st4x: - mul rbx - add r10,rax - mov rax,QWORD[((-16))+r15*8+rcx] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+r15*8+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-24))+r15*8+rsp],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+r15*8+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[r15*8+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],rdi - mov r13,rdx - - mul rbx - add r10,rax - mov rax,QWORD[r15*8+rcx] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[8+r15*8+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-8))+r15*8+rsp],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+r15*8+rcx] - adc rdx,0 - lea r15,[4+r15] - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[((-16))+r15*8+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-32))+r15*8+rsp],rdi - mov r13,rdx - cmp r15,r9 - jb NEAR $L$1st4x - - mul rbx - add r10,rax - mov rax,QWORD[((-16))+r15*8+rcx] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+r15*8+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-24))+r15*8+rsp],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+r15*8+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],rdi - mov r13,rdx - - xor rdi,rdi - add r13,r10 - adc rdi,0 - mov QWORD[((-8))+r15*8+rsp],r13 - mov QWORD[r15*8+rsp],rdi - - lea r14,[1+r14] -ALIGN 4 -$L$outer4x: - mov rbx,QWORD[r14*8+r12] - xor r15,r15 - mov r10,QWORD[rsp] - mov rbp,r8 - mul rbx - add r10,rax - mov rax,QWORD[rcx] - adc rdx,0 - - imul rbp,r10 - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+rsi] - adc rdx,0 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+rcx] - adc rdx,0 - add r11,QWORD[8+rsp] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[16+rsi] - adc rdx,0 - add rdi,r11 - lea r15,[4+r15] - adc rdx,0 - mov QWORD[rsp],rdi - mov r13,rdx - jmp NEAR $L$inner4x -ALIGN 16 -$L$inner4x: - mul rbx - add r10,rax - mov rax,QWORD[((-16))+r15*8+rcx] - adc rdx,0 - add r10,QWORD[((-16))+r15*8+rsp] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+r15*8+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-24))+r15*8+rsp],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+r15*8+rcx] - adc rdx,0 - add r11,QWORD[((-8))+r15*8+rsp] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[r15*8+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],rdi - mov r13,rdx - - mul rbx - add r10,rax - mov rax,QWORD[r15*8+rcx] - adc rdx,0 - add r10,QWORD[r15*8+rsp] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[8+r15*8+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-8))+r15*8+rsp],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+r15*8+rcx] - adc rdx,0 - add r11,QWORD[8+r15*8+rsp] - adc rdx,0 - lea r15,[4+r15] - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[((-16))+r15*8+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-32))+r15*8+rsp],rdi - mov r13,rdx - cmp r15,r9 - jb NEAR $L$inner4x - - mul rbx - add r10,rax - mov rax,QWORD[((-16))+r15*8+rcx] - adc rdx,0 - add r10,QWORD[((-16))+r15*8+rsp] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+r15*8+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-24))+r15*8+rsp],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+r15*8+rcx] - adc rdx,0 - add r11,QWORD[((-8))+r15*8+rsp] - adc rdx,0 - lea r14,[1+r14] - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],rdi - mov r13,rdx - - xor rdi,rdi - add r13,r10 - adc rdi,0 - add r13,QWORD[r9*8+rsp] - adc rdi,0 - mov QWORD[((-8))+r15*8+rsp],r13 - mov QWORD[r15*8+rsp],rdi - - cmp r14,r9 - jb NEAR $L$outer4x - mov rdi,QWORD[16+r9*8+rsp] - lea r15,[((-4))+r9] - mov rax,QWORD[rsp] - mov rdx,QWORD[8+rsp] - shr r15,2 - lea rsi,[rsp] - xor r14,r14 - - sub rax,QWORD[rcx] - mov rbx,QWORD[16+rsi] - mov rbp,QWORD[24+rsi] - sbb rdx,QWORD[8+rcx] - -$L$sub4x: - mov QWORD[r14*8+rdi],rax - mov QWORD[8+r14*8+rdi],rdx - sbb rbx,QWORD[16+r14*8+rcx] - mov rax,QWORD[32+r14*8+rsi] - mov rdx,QWORD[40+r14*8+rsi] - sbb rbp,QWORD[24+r14*8+rcx] - mov QWORD[16+r14*8+rdi],rbx - mov QWORD[24+r14*8+rdi],rbp - sbb rax,QWORD[32+r14*8+rcx] - mov rbx,QWORD[48+r14*8+rsi] - mov rbp,QWORD[56+r14*8+rsi] - sbb rdx,QWORD[40+r14*8+rcx] - lea r14,[4+r14] - dec r15 - jnz NEAR $L$sub4x - - mov QWORD[r14*8+rdi],rax - mov rax,QWORD[32+r14*8+rsi] - sbb rbx,QWORD[16+r14*8+rcx] - mov QWORD[8+r14*8+rdi],rdx - sbb rbp,QWORD[24+r14*8+rcx] - mov QWORD[16+r14*8+rdi],rbx - - sbb rax,0 - mov QWORD[24+r14*8+rdi],rbp - pxor xmm0,xmm0 -DB 102,72,15,110,224 - pcmpeqd xmm5,xmm5 - pshufd xmm4,xmm4,0 - mov r15,r9 - pxor xmm5,xmm4 - shr r15,2 - xor eax,eax - - jmp NEAR $L$copy4x -ALIGN 16 -$L$copy4x: - movdqa xmm1,XMMWORD[rax*1+rsp] - movdqu xmm2,XMMWORD[rax*1+rdi] - pand xmm1,xmm4 - pand xmm2,xmm5 - movdqa xmm3,XMMWORD[16+rax*1+rsp] - movdqa XMMWORD[rax*1+rsp],xmm0 - por xmm1,xmm2 - movdqu xmm2,XMMWORD[16+rax*1+rdi] - movdqu XMMWORD[rax*1+rdi],xmm1 - pand xmm3,xmm4 - pand xmm2,xmm5 - movdqa XMMWORD[16+rax*1+rsp],xmm0 - por xmm3,xmm2 - movdqu XMMWORD[16+rax*1+rdi],xmm3 - lea rax,[32+rax] - dec r15 - jnz NEAR $L$copy4x - mov rsi,QWORD[8+r9*8+rsp] - - mov rax,1 - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$mul4x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_mul4x_mont: -EXTERN bn_sqrx8x_internal -EXTERN bn_sqr8x_internal - - -ALIGN 32 -bn_sqr8x_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_sqr8x_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov rax,rsp - -$L$sqr8x_enter: - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$sqr8x_prologue: - - mov r10d,r9d - shl r9d,3 - shl r10,3+2 - neg r9 - - - - - - - lea r11,[((-64))+r9*2+rsp] - mov rbp,rsp - mov r8,QWORD[r8] - sub r11,rsi - and r11,4095 - cmp r10,r11 - jb NEAR $L$sqr8x_sp_alt - sub rbp,r11 - lea rbp,[((-64))+r9*2+rbp] - jmp NEAR $L$sqr8x_sp_done - -ALIGN 32 -$L$sqr8x_sp_alt: - lea r10,[((4096-64))+r9*2] - lea rbp,[((-64))+r9*2+rbp] - sub r11,r10 - mov r10,0 - cmovc r11,r10 - sub rbp,r11 -$L$sqr8x_sp_done: - and rbp,-64 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$sqr8x_page_walk - jmp NEAR $L$sqr8x_page_walk_done - -ALIGN 16 -$L$sqr8x_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$sqr8x_page_walk -$L$sqr8x_page_walk_done: - - mov r10,r9 - neg r9 - - mov QWORD[32+rsp],r8 - mov QWORD[40+rsp],rax - -$L$sqr8x_body: - -DB 102,72,15,110,209 - pxor xmm0,xmm0 -DB 102,72,15,110,207 -DB 102,73,15,110,218 - lea rax,[OPENSSL_ia32cap_P] - mov eax,DWORD[8+rax] - and eax,0x80100 - cmp eax,0x80100 - jne NEAR $L$sqr8x_nox - - call bn_sqrx8x_internal - - - - - lea rbx,[rcx*1+r8] - mov r9,rcx - mov rdx,rcx -DB 102,72,15,126,207 - sar rcx,3+2 - jmp NEAR $L$sqr8x_sub - -ALIGN 32 -$L$sqr8x_nox: - call bn_sqr8x_internal - - - - - lea rbx,[r9*1+rdi] - mov rcx,r9 - mov rdx,r9 -DB 102,72,15,126,207 - sar rcx,3+2 - jmp NEAR $L$sqr8x_sub - -ALIGN 32 -$L$sqr8x_sub: - mov r12,QWORD[rbx] - mov r13,QWORD[8+rbx] - mov r14,QWORD[16+rbx] - mov r15,QWORD[24+rbx] - lea rbx,[32+rbx] - sbb r12,QWORD[rbp] - sbb r13,QWORD[8+rbp] - sbb r14,QWORD[16+rbp] - sbb r15,QWORD[24+rbp] - lea rbp,[32+rbp] - mov QWORD[rdi],r12 - mov QWORD[8+rdi],r13 - mov QWORD[16+rdi],r14 - mov QWORD[24+rdi],r15 - lea rdi,[32+rdi] - inc rcx - jnz NEAR $L$sqr8x_sub - - sbb rax,0 - lea rbx,[r9*1+rbx] - lea rdi,[r9*1+rdi] - -DB 102,72,15,110,200 - pxor xmm0,xmm0 - pshufd xmm1,xmm1,0 - mov rsi,QWORD[40+rsp] - - jmp NEAR $L$sqr8x_cond_copy - -ALIGN 32 -$L$sqr8x_cond_copy: - movdqa xmm2,XMMWORD[rbx] - movdqa xmm3,XMMWORD[16+rbx] - lea rbx,[32+rbx] - movdqu xmm4,XMMWORD[rdi] - movdqu xmm5,XMMWORD[16+rdi] - lea rdi,[32+rdi] - movdqa XMMWORD[(-32)+rbx],xmm0 - movdqa XMMWORD[(-16)+rbx],xmm0 - movdqa XMMWORD[(-32)+rdx*1+rbx],xmm0 - movdqa XMMWORD[(-16)+rdx*1+rbx],xmm0 - pcmpeqd xmm0,xmm1 - pand xmm2,xmm1 - pand xmm3,xmm1 - pand xmm4,xmm0 - pand xmm5,xmm0 - pxor xmm0,xmm0 - por xmm4,xmm2 - por xmm5,xmm3 - movdqu XMMWORD[(-32)+rdi],xmm4 - movdqu XMMWORD[(-16)+rdi],xmm5 - add r9,32 - jnz NEAR $L$sqr8x_cond_copy - - mov rax,1 - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$sqr8x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_sqr8x_mont: - -ALIGN 32 -bn_mulx4x_mont: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_mulx4x_mont: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov rax,rsp - -$L$mulx4x_enter: - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$mulx4x_prologue: - - shl r9d,3 - xor r10,r10 - sub r10,r9 - mov r8,QWORD[r8] - lea rbp,[((-72))+r10*1+rsp] - and rbp,-128 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$mulx4x_page_walk - jmp NEAR $L$mulx4x_page_walk_done - -ALIGN 16 -$L$mulx4x_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$mulx4x_page_walk -$L$mulx4x_page_walk_done: - - lea r10,[r9*1+rdx] - - - - - - - - - - - - - mov QWORD[rsp],r9 - shr r9,5 - mov QWORD[16+rsp],r10 - sub r9,1 - mov QWORD[24+rsp],r8 - mov QWORD[32+rsp],rdi - mov QWORD[40+rsp],rax - - mov QWORD[48+rsp],r9 - jmp NEAR $L$mulx4x_body - -ALIGN 32 -$L$mulx4x_body: - lea rdi,[8+rdx] - mov rdx,QWORD[rdx] - lea rbx,[((64+32))+rsp] - mov r9,rdx - - mulx rax,r8,QWORD[rsi] - mulx r14,r11,QWORD[8+rsi] - add r11,rax - mov QWORD[8+rsp],rdi - mulx r13,r12,QWORD[16+rsi] - adc r12,r14 - adc r13,0 - - mov rdi,r8 - imul r8,QWORD[24+rsp] - xor rbp,rbp - - mulx r14,rax,QWORD[24+rsi] - mov rdx,r8 - lea rsi,[32+rsi] - adcx r13,rax - adcx r14,rbp - - mulx r10,rax,QWORD[rcx] - adcx rdi,rax - adox r10,r11 - mulx r11,rax,QWORD[8+rcx] - adcx r10,rax - adox r11,r12 -DB 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 - mov rdi,QWORD[48+rsp] - mov QWORD[((-32))+rbx],r10 - adcx r11,rax - adox r12,r13 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov QWORD[((-24))+rbx],r11 - adcx r12,rax - adox r15,rbp - lea rcx,[32+rcx] - mov QWORD[((-16))+rbx],r12 - - jmp NEAR $L$mulx4x_1st - -ALIGN 32 -$L$mulx4x_1st: - adcx r15,rbp - mulx rax,r10,QWORD[rsi] - adcx r10,r14 - mulx r14,r11,QWORD[8+rsi] - adcx r11,rax - mulx rax,r12,QWORD[16+rsi] - adcx r12,r14 - mulx r14,r13,QWORD[24+rsi] -DB 0x67,0x67 - mov rdx,r8 - adcx r13,rax - adcx r14,rbp - lea rsi,[32+rsi] - lea rbx,[32+rbx] - - adox r10,r15 - mulx r15,rax,QWORD[rcx] - adcx r10,rax - adox r11,r15 - mulx r15,rax,QWORD[8+rcx] - adcx r11,rax - adox r12,r15 - mulx r15,rax,QWORD[16+rcx] - mov QWORD[((-40))+rbx],r10 - adcx r12,rax - mov QWORD[((-32))+rbx],r11 - adox r13,r15 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov QWORD[((-24))+rbx],r12 - adcx r13,rax - adox r15,rbp - lea rcx,[32+rcx] - mov QWORD[((-16))+rbx],r13 - - dec rdi - jnz NEAR $L$mulx4x_1st - - mov rax,QWORD[rsp] - mov rdi,QWORD[8+rsp] - adc r15,rbp - add r14,r15 - sbb r15,r15 - mov QWORD[((-8))+rbx],r14 - jmp NEAR $L$mulx4x_outer - -ALIGN 32 -$L$mulx4x_outer: - mov rdx,QWORD[rdi] - lea rdi,[8+rdi] - sub rsi,rax - mov QWORD[rbx],r15 - lea rbx,[((64+32))+rsp] - sub rcx,rax - - mulx r11,r8,QWORD[rsi] - xor ebp,ebp - mov r9,rdx - mulx r12,r14,QWORD[8+rsi] - adox r8,QWORD[((-32))+rbx] - adcx r11,r14 - mulx r13,r15,QWORD[16+rsi] - adox r11,QWORD[((-24))+rbx] - adcx r12,r15 - adox r12,QWORD[((-16))+rbx] - adcx r13,rbp - adox r13,rbp - - mov QWORD[8+rsp],rdi - mov r15,r8 - imul r8,QWORD[24+rsp] - xor ebp,ebp - - mulx r14,rax,QWORD[24+rsi] - mov rdx,r8 - adcx r13,rax - adox r13,QWORD[((-8))+rbx] - adcx r14,rbp - lea rsi,[32+rsi] - adox r14,rbp - - mulx r10,rax,QWORD[rcx] - adcx r15,rax - adox r10,r11 - mulx r11,rax,QWORD[8+rcx] - adcx r10,rax - adox r11,r12 - mulx r12,rax,QWORD[16+rcx] - mov QWORD[((-32))+rbx],r10 - adcx r11,rax - adox r12,r13 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov QWORD[((-24))+rbx],r11 - lea rcx,[32+rcx] - adcx r12,rax - adox r15,rbp - mov rdi,QWORD[48+rsp] - mov QWORD[((-16))+rbx],r12 - - jmp NEAR $L$mulx4x_inner - -ALIGN 32 -$L$mulx4x_inner: - mulx rax,r10,QWORD[rsi] - adcx r15,rbp - adox r10,r14 - mulx r14,r11,QWORD[8+rsi] - adcx r10,QWORD[rbx] - adox r11,rax - mulx rax,r12,QWORD[16+rsi] - adcx r11,QWORD[8+rbx] - adox r12,r14 - mulx r14,r13,QWORD[24+rsi] - mov rdx,r8 - adcx r12,QWORD[16+rbx] - adox r13,rax - adcx r13,QWORD[24+rbx] - adox r14,rbp - lea rsi,[32+rsi] - lea rbx,[32+rbx] - adcx r14,rbp - - adox r10,r15 - mulx r15,rax,QWORD[rcx] - adcx r10,rax - adox r11,r15 - mulx r15,rax,QWORD[8+rcx] - adcx r11,rax - adox r12,r15 - mulx r15,rax,QWORD[16+rcx] - mov QWORD[((-40))+rbx],r10 - adcx r12,rax - adox r13,r15 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov QWORD[((-32))+rbx],r11 - mov QWORD[((-24))+rbx],r12 - adcx r13,rax - adox r15,rbp - lea rcx,[32+rcx] - mov QWORD[((-16))+rbx],r13 - - dec rdi - jnz NEAR $L$mulx4x_inner - - mov rax,QWORD[rsp] - mov rdi,QWORD[8+rsp] - adc r15,rbp - sub rbp,QWORD[rbx] - adc r14,r15 - sbb r15,r15 - mov QWORD[((-8))+rbx],r14 - - cmp rdi,QWORD[16+rsp] - jne NEAR $L$mulx4x_outer - - lea rbx,[64+rsp] - sub rcx,rax - neg r15 - mov rdx,rax - shr rax,3+2 - mov rdi,QWORD[32+rsp] - jmp NEAR $L$mulx4x_sub - -ALIGN 32 -$L$mulx4x_sub: - mov r11,QWORD[rbx] - mov r12,QWORD[8+rbx] - mov r13,QWORD[16+rbx] - mov r14,QWORD[24+rbx] - lea rbx,[32+rbx] - sbb r11,QWORD[rcx] - sbb r12,QWORD[8+rcx] - sbb r13,QWORD[16+rcx] - sbb r14,QWORD[24+rcx] - lea rcx,[32+rcx] - mov QWORD[rdi],r11 - mov QWORD[8+rdi],r12 - mov QWORD[16+rdi],r13 - mov QWORD[24+rdi],r14 - lea rdi,[32+rdi] - dec rax - jnz NEAR $L$mulx4x_sub - - sbb r15,0 - lea rbx,[64+rsp] - sub rdi,rdx - -DB 102,73,15,110,207 - pxor xmm0,xmm0 - pshufd xmm1,xmm1,0 - mov rsi,QWORD[40+rsp] - - jmp NEAR $L$mulx4x_cond_copy - -ALIGN 32 -$L$mulx4x_cond_copy: - movdqa xmm2,XMMWORD[rbx] - movdqa xmm3,XMMWORD[16+rbx] - lea rbx,[32+rbx] - movdqu xmm4,XMMWORD[rdi] - movdqu xmm5,XMMWORD[16+rdi] - lea rdi,[32+rdi] - movdqa XMMWORD[(-32)+rbx],xmm0 - movdqa XMMWORD[(-16)+rbx],xmm0 - pcmpeqd xmm0,xmm1 - pand xmm2,xmm1 - pand xmm3,xmm1 - pand xmm4,xmm0 - pand xmm5,xmm0 - pxor xmm0,xmm0 - por xmm4,xmm2 - por xmm5,xmm3 - movdqu XMMWORD[(-32)+rdi],xmm4 - movdqu XMMWORD[(-16)+rdi],xmm5 - sub rdx,32 - jnz NEAR $L$mulx4x_cond_copy - - mov QWORD[rbx],rdx - - mov rax,1 - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$mulx4x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_mulx4x_mont: -DB 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 -DB 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 -DB 54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83 -DB 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 -DB 115,108,46,111,114,103,62,0 -ALIGN 16 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -mul_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov r10,QWORD[192+r8] - mov rax,QWORD[8+r10*8+rax] - - jmp NEAR $L$common_pop_regs - - - -ALIGN 16 -sqr_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_pop_regs - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[8+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - mov rax,QWORD[40+rax] - -$L$common_pop_regs: - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_bn_mul_mont wrt ..imagebase - DD $L$SEH_end_bn_mul_mont wrt ..imagebase - DD $L$SEH_info_bn_mul_mont wrt ..imagebase - - DD $L$SEH_begin_bn_mul4x_mont wrt ..imagebase - DD $L$SEH_end_bn_mul4x_mont wrt ..imagebase - DD $L$SEH_info_bn_mul4x_mont wrt ..imagebase - - DD $L$SEH_begin_bn_sqr8x_mont wrt ..imagebase - DD $L$SEH_end_bn_sqr8x_mont wrt ..imagebase - DD $L$SEH_info_bn_sqr8x_mont wrt ..imagebase - DD $L$SEH_begin_bn_mulx4x_mont wrt ..imagebase - DD $L$SEH_end_bn_mulx4x_mont wrt ..imagebase - DD $L$SEH_info_bn_mulx4x_mont wrt ..imagebase -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_bn_mul_mont: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$mul_body wrt ..imagebase,$L$mul_epilogue wrt ..imagebase -$L$SEH_info_bn_mul4x_mont: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$mul4x_body wrt ..imagebase,$L$mul4x_epilogue wrt ..imagebase -$L$SEH_info_bn_sqr8x_mont: -DB 9,0,0,0 - DD sqr_handler wrt ..imagebase - DD $L$sqr8x_prologue wrt ..imagebase,$L$sqr8x_body wrt ..imagebase,$L$sqr8x_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_mulx4x_mont: -DB 9,0,0,0 - DD sqr_handler wrt ..imagebase - DD $L$mulx4x_prologue wrt ..imagebase,$L$mulx4x_body wrt ..imagebase,$L$mulx4x_epilogue wrt ..imagebase -ALIGN 8 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/x86_64-mont5.asm b/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/x86_64-mont5.asm deleted file mode 100644 index 7a1d5dbd9c..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/fipsmodule/x86_64-mont5.asm +++ /dev/null @@ -1,4036 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - -EXTERN OPENSSL_ia32cap_P - -global bn_mul_mont_gather5 - -ALIGN 64 -bn_mul_mont_gather5: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_mul_mont_gather5: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov r9d,r9d - mov rax,rsp - - test r9d,7 - jnz NEAR $L$mul_enter - lea r11,[OPENSSL_ia32cap_P] - mov r11d,DWORD[8+r11] - jmp NEAR $L$mul4x_enter - -ALIGN 16 -$L$mul_enter: - movd xmm5,DWORD[56+rsp] - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - - - neg r9 - mov r11,rsp - lea r10,[((-280))+r9*8+rsp] - neg r9 - and r10,-1024 - - - - - - - - - - sub r11,r10 - and r11,-4096 - lea rsp,[r11*1+r10] - mov r11,QWORD[rsp] - cmp rsp,r10 - ja NEAR $L$mul_page_walk - jmp NEAR $L$mul_page_walk_done - -$L$mul_page_walk: - lea rsp,[((-4096))+rsp] - mov r11,QWORD[rsp] - cmp rsp,r10 - ja NEAR $L$mul_page_walk -$L$mul_page_walk_done: - - lea r10,[$L$inc] - mov QWORD[8+r9*8+rsp],rax - -$L$mul_body: - - lea r12,[128+rdx] - movdqa xmm0,XMMWORD[r10] - movdqa xmm1,XMMWORD[16+r10] - lea r10,[((24-112))+r9*8+rsp] - and r10,-16 - - pshufd xmm5,xmm5,0 - movdqa xmm4,xmm1 - movdqa xmm2,xmm1 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 -DB 0x67 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[112+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[128+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[144+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[160+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[176+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[192+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[208+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[224+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[240+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[256+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[272+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[288+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[304+r10],xmm0 - - paddd xmm3,xmm2 -DB 0x67 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[320+r10],xmm1 - - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[336+r10],xmm2 - pand xmm0,XMMWORD[64+r12] - - pand xmm1,XMMWORD[80+r12] - pand xmm2,XMMWORD[96+r12] - movdqa XMMWORD[352+r10],xmm3 - pand xmm3,XMMWORD[112+r12] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[((-128))+r12] - movdqa xmm5,XMMWORD[((-112))+r12] - movdqa xmm2,XMMWORD[((-96))+r12] - pand xmm4,XMMWORD[112+r10] - movdqa xmm3,XMMWORD[((-80))+r12] - pand xmm5,XMMWORD[128+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[144+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[160+r10] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[((-64))+r12] - movdqa xmm5,XMMWORD[((-48))+r12] - movdqa xmm2,XMMWORD[((-32))+r12] - pand xmm4,XMMWORD[176+r10] - movdqa xmm3,XMMWORD[((-16))+r12] - pand xmm5,XMMWORD[192+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[208+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[224+r10] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[r12] - movdqa xmm5,XMMWORD[16+r12] - movdqa xmm2,XMMWORD[32+r12] - pand xmm4,XMMWORD[240+r10] - movdqa xmm3,XMMWORD[48+r12] - pand xmm5,XMMWORD[256+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[272+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[288+r10] - por xmm0,xmm2 - por xmm1,xmm3 - por xmm0,xmm1 - pshufd xmm1,xmm0,0x4e - por xmm0,xmm1 - lea r12,[256+r12] -DB 102,72,15,126,195 - - mov r8,QWORD[r8] - mov rax,QWORD[rsi] - - xor r14,r14 - xor r15,r15 - - mov rbp,r8 - mul rbx - mov r10,rax - mov rax,QWORD[rcx] - - imul rbp,r10 - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+rsi] - adc rdx,0 - mov r13,rdx - - lea r15,[1+r15] - jmp NEAR $L$1st_enter - -ALIGN 16 -$L$1st: - add r13,rax - mov rax,QWORD[r15*8+rsi] - adc rdx,0 - add r13,r11 - mov r11,r10 - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],r13 - mov r13,rdx - -$L$1st_enter: - mul rbx - add r11,rax - mov rax,QWORD[r15*8+rcx] - adc rdx,0 - lea r15,[1+r15] - mov r10,rdx - - mul rbp - cmp r15,r9 - jne NEAR $L$1st - - - add r13,rax - adc rdx,0 - add r13,r11 - adc rdx,0 - mov QWORD[((-16))+r9*8+rsp],r13 - mov r13,rdx - mov r11,r10 - - xor rdx,rdx - add r13,r11 - adc rdx,0 - mov QWORD[((-8))+r9*8+rsp],r13 - mov QWORD[r9*8+rsp],rdx - - lea r14,[1+r14] - jmp NEAR $L$outer -ALIGN 16 -$L$outer: - lea rdx,[((24+128))+r9*8+rsp] - and rdx,-16 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - movdqa xmm0,XMMWORD[((-128))+r12] - movdqa xmm1,XMMWORD[((-112))+r12] - movdqa xmm2,XMMWORD[((-96))+r12] - movdqa xmm3,XMMWORD[((-80))+r12] - pand xmm0,XMMWORD[((-128))+rdx] - pand xmm1,XMMWORD[((-112))+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[((-96))+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[((-80))+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[((-64))+r12] - movdqa xmm1,XMMWORD[((-48))+r12] - movdqa xmm2,XMMWORD[((-32))+r12] - movdqa xmm3,XMMWORD[((-16))+r12] - pand xmm0,XMMWORD[((-64))+rdx] - pand xmm1,XMMWORD[((-48))+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[((-32))+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[((-16))+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[r12] - movdqa xmm1,XMMWORD[16+r12] - movdqa xmm2,XMMWORD[32+r12] - movdqa xmm3,XMMWORD[48+r12] - pand xmm0,XMMWORD[rdx] - pand xmm1,XMMWORD[16+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[32+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[48+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[64+r12] - movdqa xmm1,XMMWORD[80+r12] - movdqa xmm2,XMMWORD[96+r12] - movdqa xmm3,XMMWORD[112+r12] - pand xmm0,XMMWORD[64+rdx] - pand xmm1,XMMWORD[80+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[96+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[112+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - por xmm4,xmm5 - pshufd xmm0,xmm4,0x4e - por xmm0,xmm4 - lea r12,[256+r12] - - mov rax,QWORD[rsi] -DB 102,72,15,126,195 - - xor r15,r15 - mov rbp,r8 - mov r10,QWORD[rsp] - - mul rbx - add r10,rax - mov rax,QWORD[rcx] - adc rdx,0 - - imul rbp,r10 - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+rsi] - adc rdx,0 - mov r10,QWORD[8+rsp] - mov r13,rdx - - lea r15,[1+r15] - jmp NEAR $L$inner_enter - -ALIGN 16 -$L$inner: - add r13,rax - mov rax,QWORD[r15*8+rsi] - adc rdx,0 - add r13,r10 - mov r10,QWORD[r15*8+rsp] - adc rdx,0 - mov QWORD[((-16))+r15*8+rsp],r13 - mov r13,rdx - -$L$inner_enter: - mul rbx - add r11,rax - mov rax,QWORD[r15*8+rcx] - adc rdx,0 - add r10,r11 - mov r11,rdx - adc r11,0 - lea r15,[1+r15] - - mul rbp - cmp r15,r9 - jne NEAR $L$inner - - add r13,rax - adc rdx,0 - add r13,r10 - mov r10,QWORD[r9*8+rsp] - adc rdx,0 - mov QWORD[((-16))+r9*8+rsp],r13 - mov r13,rdx - - xor rdx,rdx - add r13,r11 - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-8))+r9*8+rsp],r13 - mov QWORD[r9*8+rsp],rdx - - lea r14,[1+r14] - cmp r14,r9 - jb NEAR $L$outer - - xor r14,r14 - mov rax,QWORD[rsp] - lea rsi,[rsp] - mov r15,r9 - jmp NEAR $L$sub -ALIGN 16 -$L$sub: sbb rax,QWORD[r14*8+rcx] - mov QWORD[r14*8+rdi],rax - mov rax,QWORD[8+r14*8+rsi] - lea r14,[1+r14] - dec r15 - jnz NEAR $L$sub - - sbb rax,0 - mov rbx,-1 - xor rbx,rax - xor r14,r14 - mov r15,r9 - -$L$copy: - mov rcx,QWORD[r14*8+rdi] - mov rdx,QWORD[r14*8+rsp] - and rcx,rbx - and rdx,rax - mov QWORD[r14*8+rsp],r14 - or rdx,rcx - mov QWORD[r14*8+rdi],rdx - lea r14,[1+r14] - sub r15,1 - jnz NEAR $L$copy - - mov rsi,QWORD[8+r9*8+rsp] - - mov rax,1 - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$mul_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_mul_mont_gather5: - -ALIGN 32 -bn_mul4x_mont_gather5: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_mul4x_mont_gather5: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - -DB 0x67 - mov rax,rsp - -$L$mul4x_enter: - and r11d,0x80108 - cmp r11d,0x80108 - je NEAR $L$mulx4x_enter - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$mul4x_prologue: - -DB 0x67 - shl r9d,3 - lea r10,[r9*2+r9] - neg r9 - - - - - - - - - - - lea r11,[((-320))+r9*2+rsp] - mov rbp,rsp - sub r11,rdi - and r11,4095 - cmp r10,r11 - jb NEAR $L$mul4xsp_alt - sub rbp,r11 - lea rbp,[((-320))+r9*2+rbp] - jmp NEAR $L$mul4xsp_done - -ALIGN 32 -$L$mul4xsp_alt: - lea r10,[((4096-320))+r9*2] - lea rbp,[((-320))+r9*2+rbp] - sub r11,r10 - mov r10,0 - cmovc r11,r10 - sub rbp,r11 -$L$mul4xsp_done: - and rbp,-64 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$mul4x_page_walk - jmp NEAR $L$mul4x_page_walk_done - -$L$mul4x_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$mul4x_page_walk -$L$mul4x_page_walk_done: - - neg r9 - - mov QWORD[40+rsp],rax - -$L$mul4x_body: - - call mul4x_internal - - mov rsi,QWORD[40+rsp] - - mov rax,1 - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$mul4x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_mul4x_mont_gather5: - - -ALIGN 32 -mul4x_internal: - - shl r9,5 - movd xmm5,DWORD[56+rax] - lea rax,[$L$inc] - lea r13,[128+r9*1+rdx] - shr r9,5 - movdqa xmm0,XMMWORD[rax] - movdqa xmm1,XMMWORD[16+rax] - lea r10,[((88-112))+r9*1+rsp] - lea r12,[128+rdx] - - pshufd xmm5,xmm5,0 - movdqa xmm4,xmm1 -DB 0x67,0x67 - movdqa xmm2,xmm1 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 -DB 0x67 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[112+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[128+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[144+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[160+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[176+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[192+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[208+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[224+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[240+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[256+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[272+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[288+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[304+r10],xmm0 - - paddd xmm3,xmm2 -DB 0x67 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[320+r10],xmm1 - - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[336+r10],xmm2 - pand xmm0,XMMWORD[64+r12] - - pand xmm1,XMMWORD[80+r12] - pand xmm2,XMMWORD[96+r12] - movdqa XMMWORD[352+r10],xmm3 - pand xmm3,XMMWORD[112+r12] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[((-128))+r12] - movdqa xmm5,XMMWORD[((-112))+r12] - movdqa xmm2,XMMWORD[((-96))+r12] - pand xmm4,XMMWORD[112+r10] - movdqa xmm3,XMMWORD[((-80))+r12] - pand xmm5,XMMWORD[128+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[144+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[160+r10] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[((-64))+r12] - movdqa xmm5,XMMWORD[((-48))+r12] - movdqa xmm2,XMMWORD[((-32))+r12] - pand xmm4,XMMWORD[176+r10] - movdqa xmm3,XMMWORD[((-16))+r12] - pand xmm5,XMMWORD[192+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[208+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[224+r10] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[r12] - movdqa xmm5,XMMWORD[16+r12] - movdqa xmm2,XMMWORD[32+r12] - pand xmm4,XMMWORD[240+r10] - movdqa xmm3,XMMWORD[48+r12] - pand xmm5,XMMWORD[256+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[272+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[288+r10] - por xmm0,xmm2 - por xmm1,xmm3 - por xmm0,xmm1 - pshufd xmm1,xmm0,0x4e - por xmm0,xmm1 - lea r12,[256+r12] -DB 102,72,15,126,195 - - mov QWORD[((16+8))+rsp],r13 - mov QWORD[((56+8))+rsp],rdi - - mov r8,QWORD[r8] - mov rax,QWORD[rsi] - lea rsi,[r9*1+rsi] - neg r9 - - mov rbp,r8 - mul rbx - mov r10,rax - mov rax,QWORD[rcx] - - imul rbp,r10 - lea r14,[((64+8))+rsp] - mov r11,rdx - - mul rbp - add r10,rax - mov rax,QWORD[8+r9*1+rsi] - adc rdx,0 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[16+r9*1+rsi] - adc rdx,0 - add rdi,r11 - lea r15,[32+r9] - lea rcx,[32+rcx] - adc rdx,0 - mov QWORD[r14],rdi - mov r13,rdx - jmp NEAR $L$1st4x - -ALIGN 32 -$L$1st4x: - mul rbx - add r10,rax - mov rax,QWORD[((-16))+rcx] - lea r14,[32+r14] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+r15*1+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-24))+r14],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[r15*1+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-16))+r14],rdi - mov r13,rdx - - mul rbx - add r10,rax - mov rax,QWORD[rcx] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[8+r15*1+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-8))+r14],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[16+r15*1+rsi] - adc rdx,0 - add rdi,r11 - lea rcx,[32+rcx] - adc rdx,0 - mov QWORD[r14],rdi - mov r13,rdx - - add r15,32 - jnz NEAR $L$1st4x - - mul rbx - add r10,rax - mov rax,QWORD[((-16))+rcx] - lea r14,[32+r14] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-24))+r14],r13 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+rcx] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[r9*1+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-16))+r14],rdi - mov r13,rdx - - lea rcx,[r9*1+rcx] - - xor rdi,rdi - add r13,r10 - adc rdi,0 - mov QWORD[((-8))+r14],r13 - - jmp NEAR $L$outer4x - -ALIGN 32 -$L$outer4x: - lea rdx,[((16+128))+r14] - pxor xmm4,xmm4 - pxor xmm5,xmm5 - movdqa xmm0,XMMWORD[((-128))+r12] - movdqa xmm1,XMMWORD[((-112))+r12] - movdqa xmm2,XMMWORD[((-96))+r12] - movdqa xmm3,XMMWORD[((-80))+r12] - pand xmm0,XMMWORD[((-128))+rdx] - pand xmm1,XMMWORD[((-112))+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[((-96))+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[((-80))+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[((-64))+r12] - movdqa xmm1,XMMWORD[((-48))+r12] - movdqa xmm2,XMMWORD[((-32))+r12] - movdqa xmm3,XMMWORD[((-16))+r12] - pand xmm0,XMMWORD[((-64))+rdx] - pand xmm1,XMMWORD[((-48))+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[((-32))+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[((-16))+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[r12] - movdqa xmm1,XMMWORD[16+r12] - movdqa xmm2,XMMWORD[32+r12] - movdqa xmm3,XMMWORD[48+r12] - pand xmm0,XMMWORD[rdx] - pand xmm1,XMMWORD[16+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[32+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[48+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[64+r12] - movdqa xmm1,XMMWORD[80+r12] - movdqa xmm2,XMMWORD[96+r12] - movdqa xmm3,XMMWORD[112+r12] - pand xmm0,XMMWORD[64+rdx] - pand xmm1,XMMWORD[80+rdx] - por xmm4,xmm0 - pand xmm2,XMMWORD[96+rdx] - por xmm5,xmm1 - pand xmm3,XMMWORD[112+rdx] - por xmm4,xmm2 - por xmm5,xmm3 - por xmm4,xmm5 - pshufd xmm0,xmm4,0x4e - por xmm0,xmm4 - lea r12,[256+r12] -DB 102,72,15,126,195 - - mov r10,QWORD[r9*1+r14] - mov rbp,r8 - mul rbx - add r10,rax - mov rax,QWORD[rcx] - adc rdx,0 - - imul rbp,r10 - mov r11,rdx - mov QWORD[r14],rdi - - lea r14,[r9*1+r14] - - mul rbp - add r10,rax - mov rax,QWORD[8+r9*1+rsi] - adc rdx,0 - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+rcx] - adc rdx,0 - add r11,QWORD[8+r14] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[16+r9*1+rsi] - adc rdx,0 - add rdi,r11 - lea r15,[32+r9] - lea rcx,[32+rcx] - adc rdx,0 - mov r13,rdx - jmp NEAR $L$inner4x - -ALIGN 32 -$L$inner4x: - mul rbx - add r10,rax - mov rax,QWORD[((-16))+rcx] - adc rdx,0 - add r10,QWORD[16+r14] - lea r14,[32+r14] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+r15*1+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-32))+r14],rdi - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[((-8))+rcx] - adc rdx,0 - add r11,QWORD[((-8))+r14] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[r15*1+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-24))+r14],r13 - mov r13,rdx - - mul rbx - add r10,rax - mov rax,QWORD[rcx] - adc rdx,0 - add r10,QWORD[r14] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[8+r15*1+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-16))+r14],rdi - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,QWORD[8+rcx] - adc rdx,0 - add r11,QWORD[8+r14] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[16+r15*1+rsi] - adc rdx,0 - add rdi,r11 - lea rcx,[32+rcx] - adc rdx,0 - mov QWORD[((-8))+r14],r13 - mov r13,rdx - - add r15,32 - jnz NEAR $L$inner4x - - mul rbx - add r10,rax - mov rax,QWORD[((-16))+rcx] - adc rdx,0 - add r10,QWORD[16+r14] - lea r14,[32+r14] - adc rdx,0 - mov r11,rdx - - mul rbp - add r13,rax - mov rax,QWORD[((-8))+rsi] - adc rdx,0 - add r13,r10 - adc rdx,0 - mov QWORD[((-32))+r14],rdi - mov rdi,rdx - - mul rbx - add r11,rax - mov rax,rbp - mov rbp,QWORD[((-8))+rcx] - adc rdx,0 - add r11,QWORD[((-8))+r14] - adc rdx,0 - mov r10,rdx - - mul rbp - add rdi,rax - mov rax,QWORD[r9*1+rsi] - adc rdx,0 - add rdi,r11 - adc rdx,0 - mov QWORD[((-24))+r14],r13 - mov r13,rdx - - mov QWORD[((-16))+r14],rdi - lea rcx,[r9*1+rcx] - - xor rdi,rdi - add r13,r10 - adc rdi,0 - add r13,QWORD[r14] - adc rdi,0 - mov QWORD[((-8))+r14],r13 - - cmp r12,QWORD[((16+8))+rsp] - jb NEAR $L$outer4x - xor rax,rax - sub rbp,r13 - adc r15,r15 - or rdi,r15 - sub rax,rdi - lea rbx,[r9*1+r14] - mov r12,QWORD[rcx] - lea rbp,[rcx] - mov rcx,r9 - sar rcx,3+2 - mov rdi,QWORD[((56+8))+rsp] - dec r12 - xor r10,r10 - mov r13,QWORD[8+rbp] - mov r14,QWORD[16+rbp] - mov r15,QWORD[24+rbp] - jmp NEAR $L$sqr4x_sub_entry - - -global bn_power5 - -ALIGN 32 -bn_power5: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_power5: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov rax,rsp - - lea r11,[OPENSSL_ia32cap_P] - mov r11d,DWORD[8+r11] - and r11d,0x80108 - cmp r11d,0x80108 - je NEAR $L$powerx5_enter - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$power5_prologue: - - shl r9d,3 - lea r10d,[r9*2+r9] - neg r9 - mov r8,QWORD[r8] - - - - - - - - - lea r11,[((-320))+r9*2+rsp] - mov rbp,rsp - sub r11,rdi - and r11,4095 - cmp r10,r11 - jb NEAR $L$pwr_sp_alt - sub rbp,r11 - lea rbp,[((-320))+r9*2+rbp] - jmp NEAR $L$pwr_sp_done - -ALIGN 32 -$L$pwr_sp_alt: - lea r10,[((4096-320))+r9*2] - lea rbp,[((-320))+r9*2+rbp] - sub r11,r10 - mov r10,0 - cmovc r11,r10 - sub rbp,r11 -$L$pwr_sp_done: - and rbp,-64 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$pwr_page_walk - jmp NEAR $L$pwr_page_walk_done - -$L$pwr_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$pwr_page_walk -$L$pwr_page_walk_done: - - mov r10,r9 - neg r9 - - - - - - - - - - - mov QWORD[32+rsp],r8 - mov QWORD[40+rsp],rax - -$L$power5_body: -DB 102,72,15,110,207 -DB 102,72,15,110,209 -DB 102,73,15,110,218 -DB 102,72,15,110,226 - - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - call __bn_sqr8x_internal - call __bn_post4x_internal - -DB 102,72,15,126,209 -DB 102,72,15,126,226 - mov rdi,rsi - mov rax,QWORD[40+rsp] - lea r8,[32+rsp] - - call mul4x_internal - - mov rsi,QWORD[40+rsp] - - mov rax,1 - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$power5_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_power5: - -global bn_sqr8x_internal - - -ALIGN 32 -bn_sqr8x_internal: -__bn_sqr8x_internal: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - lea rbp,[32+r10] - lea rsi,[r9*1+rsi] - - mov rcx,r9 - - - mov r14,QWORD[((-32))+rbp*1+rsi] - lea rdi,[((48+8))+r9*2+rsp] - mov rax,QWORD[((-24))+rbp*1+rsi] - lea rdi,[((-32))+rbp*1+rdi] - mov rbx,QWORD[((-16))+rbp*1+rsi] - mov r15,rax - - mul r14 - mov r10,rax - mov rax,rbx - mov r11,rdx - mov QWORD[((-24))+rbp*1+rdi],r10 - - mul r14 - add r11,rax - mov rax,rbx - adc rdx,0 - mov QWORD[((-16))+rbp*1+rdi],r11 - mov r10,rdx - - - mov rbx,QWORD[((-8))+rbp*1+rsi] - mul r15 - mov r12,rax - mov rax,rbx - mov r13,rdx - - lea rcx,[rbp] - mul r14 - add r10,rax - mov rax,rbx - mov r11,rdx - adc r11,0 - add r10,r12 - adc r11,0 - mov QWORD[((-8))+rcx*1+rdi],r10 - jmp NEAR $L$sqr4x_1st - -ALIGN 32 -$L$sqr4x_1st: - mov rbx,QWORD[rcx*1+rsi] - mul r15 - add r13,rax - mov rax,rbx - mov r12,rdx - adc r12,0 - - mul r14 - add r11,rax - mov rax,rbx - mov rbx,QWORD[8+rcx*1+rsi] - mov r10,rdx - adc r10,0 - add r11,r13 - adc r10,0 - - - mul r15 - add r12,rax - mov rax,rbx - mov QWORD[rcx*1+rdi],r11 - mov r13,rdx - adc r13,0 - - mul r14 - add r10,rax - mov rax,rbx - mov rbx,QWORD[16+rcx*1+rsi] - mov r11,rdx - adc r11,0 - add r10,r12 - adc r11,0 - - mul r15 - add r13,rax - mov rax,rbx - mov QWORD[8+rcx*1+rdi],r10 - mov r12,rdx - adc r12,0 - - mul r14 - add r11,rax - mov rax,rbx - mov rbx,QWORD[24+rcx*1+rsi] - mov r10,rdx - adc r10,0 - add r11,r13 - adc r10,0 - - - mul r15 - add r12,rax - mov rax,rbx - mov QWORD[16+rcx*1+rdi],r11 - mov r13,rdx - adc r13,0 - lea rcx,[32+rcx] - - mul r14 - add r10,rax - mov rax,rbx - mov r11,rdx - adc r11,0 - add r10,r12 - adc r11,0 - mov QWORD[((-8))+rcx*1+rdi],r10 - - cmp rcx,0 - jne NEAR $L$sqr4x_1st - - mul r15 - add r13,rax - lea rbp,[16+rbp] - adc rdx,0 - add r13,r11 - adc rdx,0 - - mov QWORD[rdi],r13 - mov r12,rdx - mov QWORD[8+rdi],rdx - jmp NEAR $L$sqr4x_outer - -ALIGN 32 -$L$sqr4x_outer: - mov r14,QWORD[((-32))+rbp*1+rsi] - lea rdi,[((48+8))+r9*2+rsp] - mov rax,QWORD[((-24))+rbp*1+rsi] - lea rdi,[((-32))+rbp*1+rdi] - mov rbx,QWORD[((-16))+rbp*1+rsi] - mov r15,rax - - mul r14 - mov r10,QWORD[((-24))+rbp*1+rdi] - add r10,rax - mov rax,rbx - adc rdx,0 - mov QWORD[((-24))+rbp*1+rdi],r10 - mov r11,rdx - - mul r14 - add r11,rax - mov rax,rbx - adc rdx,0 - add r11,QWORD[((-16))+rbp*1+rdi] - mov r10,rdx - adc r10,0 - mov QWORD[((-16))+rbp*1+rdi],r11 - - xor r12,r12 - - mov rbx,QWORD[((-8))+rbp*1+rsi] - mul r15 - add r12,rax - mov rax,rbx - adc rdx,0 - add r12,QWORD[((-8))+rbp*1+rdi] - mov r13,rdx - adc r13,0 - - mul r14 - add r10,rax - mov rax,rbx - adc rdx,0 - add r10,r12 - mov r11,rdx - adc r11,0 - mov QWORD[((-8))+rbp*1+rdi],r10 - - lea rcx,[rbp] - jmp NEAR $L$sqr4x_inner - -ALIGN 32 -$L$sqr4x_inner: - mov rbx,QWORD[rcx*1+rsi] - mul r15 - add r13,rax - mov rax,rbx - mov r12,rdx - adc r12,0 - add r13,QWORD[rcx*1+rdi] - adc r12,0 - -DB 0x67 - mul r14 - add r11,rax - mov rax,rbx - mov rbx,QWORD[8+rcx*1+rsi] - mov r10,rdx - adc r10,0 - add r11,r13 - adc r10,0 - - mul r15 - add r12,rax - mov QWORD[rcx*1+rdi],r11 - mov rax,rbx - mov r13,rdx - adc r13,0 - add r12,QWORD[8+rcx*1+rdi] - lea rcx,[16+rcx] - adc r13,0 - - mul r14 - add r10,rax - mov rax,rbx - adc rdx,0 - add r10,r12 - mov r11,rdx - adc r11,0 - mov QWORD[((-8))+rcx*1+rdi],r10 - - cmp rcx,0 - jne NEAR $L$sqr4x_inner - -DB 0x67 - mul r15 - add r13,rax - adc rdx,0 - add r13,r11 - adc rdx,0 - - mov QWORD[rdi],r13 - mov r12,rdx - mov QWORD[8+rdi],rdx - - add rbp,16 - jnz NEAR $L$sqr4x_outer - - - mov r14,QWORD[((-32))+rsi] - lea rdi,[((48+8))+r9*2+rsp] - mov rax,QWORD[((-24))+rsi] - lea rdi,[((-32))+rbp*1+rdi] - mov rbx,QWORD[((-16))+rsi] - mov r15,rax - - mul r14 - add r10,rax - mov rax,rbx - mov r11,rdx - adc r11,0 - - mul r14 - add r11,rax - mov rax,rbx - mov QWORD[((-24))+rdi],r10 - mov r10,rdx - adc r10,0 - add r11,r13 - mov rbx,QWORD[((-8))+rsi] - adc r10,0 - - mul r15 - add r12,rax - mov rax,rbx - mov QWORD[((-16))+rdi],r11 - mov r13,rdx - adc r13,0 - - mul r14 - add r10,rax - mov rax,rbx - mov r11,rdx - adc r11,0 - add r10,r12 - adc r11,0 - mov QWORD[((-8))+rdi],r10 - - mul r15 - add r13,rax - mov rax,QWORD[((-16))+rsi] - adc rdx,0 - add r13,r11 - adc rdx,0 - - mov QWORD[rdi],r13 - mov r12,rdx - mov QWORD[8+rdi],rdx - - mul rbx - add rbp,16 - xor r14,r14 - sub rbp,r9 - xor r15,r15 - - add rax,r12 - adc rdx,0 - mov QWORD[8+rdi],rax - mov QWORD[16+rdi],rdx - mov QWORD[24+rdi],r15 - - mov rax,QWORD[((-16))+rbp*1+rsi] - lea rdi,[((48+8))+rsp] - xor r10,r10 - mov r11,QWORD[8+rdi] - - lea r12,[r10*2+r14] - shr r10,63 - lea r13,[r11*2+rcx] - shr r11,63 - or r13,r10 - mov r10,QWORD[16+rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[24+rdi] - adc r12,rax - mov rax,QWORD[((-8))+rbp*1+rsi] - mov QWORD[rdi],r12 - adc r13,rdx - - lea rbx,[r10*2+r14] - mov QWORD[8+rdi],r13 - sbb r15,r15 - shr r10,63 - lea r8,[r11*2+rcx] - shr r11,63 - or r8,r10 - mov r10,QWORD[32+rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[40+rdi] - adc rbx,rax - mov rax,QWORD[rbp*1+rsi] - mov QWORD[16+rdi],rbx - adc r8,rdx - lea rbp,[16+rbp] - mov QWORD[24+rdi],r8 - sbb r15,r15 - lea rdi,[64+rdi] - jmp NEAR $L$sqr4x_shift_n_add - -ALIGN 32 -$L$sqr4x_shift_n_add: - lea r12,[r10*2+r14] - shr r10,63 - lea r13,[r11*2+rcx] - shr r11,63 - or r13,r10 - mov r10,QWORD[((-16))+rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[((-8))+rdi] - adc r12,rax - mov rax,QWORD[((-8))+rbp*1+rsi] - mov QWORD[((-32))+rdi],r12 - adc r13,rdx - - lea rbx,[r10*2+r14] - mov QWORD[((-24))+rdi],r13 - sbb r15,r15 - shr r10,63 - lea r8,[r11*2+rcx] - shr r11,63 - or r8,r10 - mov r10,QWORD[rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[8+rdi] - adc rbx,rax - mov rax,QWORD[rbp*1+rsi] - mov QWORD[((-16))+rdi],rbx - adc r8,rdx - - lea r12,[r10*2+r14] - mov QWORD[((-8))+rdi],r8 - sbb r15,r15 - shr r10,63 - lea r13,[r11*2+rcx] - shr r11,63 - or r13,r10 - mov r10,QWORD[16+rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[24+rdi] - adc r12,rax - mov rax,QWORD[8+rbp*1+rsi] - mov QWORD[rdi],r12 - adc r13,rdx - - lea rbx,[r10*2+r14] - mov QWORD[8+rdi],r13 - sbb r15,r15 - shr r10,63 - lea r8,[r11*2+rcx] - shr r11,63 - or r8,r10 - mov r10,QWORD[32+rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[40+rdi] - adc rbx,rax - mov rax,QWORD[16+rbp*1+rsi] - mov QWORD[16+rdi],rbx - adc r8,rdx - mov QWORD[24+rdi],r8 - sbb r15,r15 - lea rdi,[64+rdi] - add rbp,32 - jnz NEAR $L$sqr4x_shift_n_add - - lea r12,[r10*2+r14] -DB 0x67 - shr r10,63 - lea r13,[r11*2+rcx] - shr r11,63 - or r13,r10 - mov r10,QWORD[((-16))+rdi] - mov r14,r11 - mul rax - neg r15 - mov r11,QWORD[((-8))+rdi] - adc r12,rax - mov rax,QWORD[((-8))+rsi] - mov QWORD[((-32))+rdi],r12 - adc r13,rdx - - lea rbx,[r10*2+r14] - mov QWORD[((-24))+rdi],r13 - sbb r15,r15 - shr r10,63 - lea r8,[r11*2+rcx] - shr r11,63 - or r8,r10 - mul rax - neg r15 - adc rbx,rax - adc r8,rdx - mov QWORD[((-16))+rdi],rbx - mov QWORD[((-8))+rdi],r8 -DB 102,72,15,126,213 -__bn_sqr8x_reduction: - xor rax,rax - lea rcx,[rbp*1+r9] - lea rdx,[((48+8))+r9*2+rsp] - mov QWORD[((0+8))+rsp],rcx - lea rdi,[((48+8))+r9*1+rsp] - mov QWORD[((8+8))+rsp],rdx - neg r9 - jmp NEAR $L$8x_reduction_loop - -ALIGN 32 -$L$8x_reduction_loop: - lea rdi,[r9*1+rdi] -DB 0x66 - mov rbx,QWORD[rdi] - mov r9,QWORD[8+rdi] - mov r10,QWORD[16+rdi] - mov r11,QWORD[24+rdi] - mov r12,QWORD[32+rdi] - mov r13,QWORD[40+rdi] - mov r14,QWORD[48+rdi] - mov r15,QWORD[56+rdi] - mov QWORD[rdx],rax - lea rdi,[64+rdi] - -DB 0x67 - mov r8,rbx - imul rbx,QWORD[((32+8))+rsp] - mov rax,QWORD[rbp] - mov ecx,8 - jmp NEAR $L$8x_reduce - -ALIGN 32 -$L$8x_reduce: - mul rbx - mov rax,QWORD[8+rbp] - neg r8 - mov r8,rdx - adc r8,0 - - mul rbx - add r9,rax - mov rax,QWORD[16+rbp] - adc rdx,0 - add r8,r9 - mov QWORD[((48-8+8))+rcx*8+rsp],rbx - mov r9,rdx - adc r9,0 - - mul rbx - add r10,rax - mov rax,QWORD[24+rbp] - adc rdx,0 - add r9,r10 - mov rsi,QWORD[((32+8))+rsp] - mov r10,rdx - adc r10,0 - - mul rbx - add r11,rax - mov rax,QWORD[32+rbp] - adc rdx,0 - imul rsi,r8 - add r10,r11 - mov r11,rdx - adc r11,0 - - mul rbx - add r12,rax - mov rax,QWORD[40+rbp] - adc rdx,0 - add r11,r12 - mov r12,rdx - adc r12,0 - - mul rbx - add r13,rax - mov rax,QWORD[48+rbp] - adc rdx,0 - add r12,r13 - mov r13,rdx - adc r13,0 - - mul rbx - add r14,rax - mov rax,QWORD[56+rbp] - adc rdx,0 - add r13,r14 - mov r14,rdx - adc r14,0 - - mul rbx - mov rbx,rsi - add r15,rax - mov rax,QWORD[rbp] - adc rdx,0 - add r14,r15 - mov r15,rdx - adc r15,0 - - dec ecx - jnz NEAR $L$8x_reduce - - lea rbp,[64+rbp] - xor rax,rax - mov rdx,QWORD[((8+8))+rsp] - cmp rbp,QWORD[((0+8))+rsp] - jae NEAR $L$8x_no_tail - -DB 0x66 - add r8,QWORD[rdi] - adc r9,QWORD[8+rdi] - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - sbb rsi,rsi - - mov rbx,QWORD[((48+56+8))+rsp] - mov ecx,8 - mov rax,QWORD[rbp] - jmp NEAR $L$8x_tail - -ALIGN 32 -$L$8x_tail: - mul rbx - add r8,rax - mov rax,QWORD[8+rbp] - mov QWORD[rdi],r8 - mov r8,rdx - adc r8,0 - - mul rbx - add r9,rax - mov rax,QWORD[16+rbp] - adc rdx,0 - add r8,r9 - lea rdi,[8+rdi] - mov r9,rdx - adc r9,0 - - mul rbx - add r10,rax - mov rax,QWORD[24+rbp] - adc rdx,0 - add r9,r10 - mov r10,rdx - adc r10,0 - - mul rbx - add r11,rax - mov rax,QWORD[32+rbp] - adc rdx,0 - add r10,r11 - mov r11,rdx - adc r11,0 - - mul rbx - add r12,rax - mov rax,QWORD[40+rbp] - adc rdx,0 - add r11,r12 - mov r12,rdx - adc r12,0 - - mul rbx - add r13,rax - mov rax,QWORD[48+rbp] - adc rdx,0 - add r12,r13 - mov r13,rdx - adc r13,0 - - mul rbx - add r14,rax - mov rax,QWORD[56+rbp] - adc rdx,0 - add r13,r14 - mov r14,rdx - adc r14,0 - - mul rbx - mov rbx,QWORD[((48-16+8))+rcx*8+rsp] - add r15,rax - adc rdx,0 - add r14,r15 - mov rax,QWORD[rbp] - mov r15,rdx - adc r15,0 - - dec ecx - jnz NEAR $L$8x_tail - - lea rbp,[64+rbp] - mov rdx,QWORD[((8+8))+rsp] - cmp rbp,QWORD[((0+8))+rsp] - jae NEAR $L$8x_tail_done - - mov rbx,QWORD[((48+56+8))+rsp] - neg rsi - mov rax,QWORD[rbp] - adc r8,QWORD[rdi] - adc r9,QWORD[8+rdi] - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - sbb rsi,rsi - - mov ecx,8 - jmp NEAR $L$8x_tail - -ALIGN 32 -$L$8x_tail_done: - xor rax,rax - add r8,QWORD[rdx] - adc r9,0 - adc r10,0 - adc r11,0 - adc r12,0 - adc r13,0 - adc r14,0 - adc r15,0 - adc rax,0 - - neg rsi -$L$8x_no_tail: - adc r8,QWORD[rdi] - adc r9,QWORD[8+rdi] - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - adc rax,0 - mov rcx,QWORD[((-8))+rbp] - xor rsi,rsi - -DB 102,72,15,126,213 - - mov QWORD[rdi],r8 - mov QWORD[8+rdi],r9 -DB 102,73,15,126,217 - mov QWORD[16+rdi],r10 - mov QWORD[24+rdi],r11 - mov QWORD[32+rdi],r12 - mov QWORD[40+rdi],r13 - mov QWORD[48+rdi],r14 - mov QWORD[56+rdi],r15 - lea rdi,[64+rdi] - - cmp rdi,rdx - jb NEAR $L$8x_reduction_loop - DB 0F3h,0C3h ;repret - - - -ALIGN 32 -__bn_post4x_internal: - - mov r12,QWORD[rbp] - lea rbx,[r9*1+rdi] - mov rcx,r9 -DB 102,72,15,126,207 - neg rax -DB 102,72,15,126,206 - sar rcx,3+2 - dec r12 - xor r10,r10 - mov r13,QWORD[8+rbp] - mov r14,QWORD[16+rbp] - mov r15,QWORD[24+rbp] - jmp NEAR $L$sqr4x_sub_entry - -ALIGN 16 -$L$sqr4x_sub: - mov r12,QWORD[rbp] - mov r13,QWORD[8+rbp] - mov r14,QWORD[16+rbp] - mov r15,QWORD[24+rbp] -$L$sqr4x_sub_entry: - lea rbp,[32+rbp] - not r12 - not r13 - not r14 - not r15 - and r12,rax - and r13,rax - and r14,rax - and r15,rax - - neg r10 - adc r12,QWORD[rbx] - adc r13,QWORD[8+rbx] - adc r14,QWORD[16+rbx] - adc r15,QWORD[24+rbx] - mov QWORD[rdi],r12 - lea rbx,[32+rbx] - mov QWORD[8+rdi],r13 - sbb r10,r10 - mov QWORD[16+rdi],r14 - mov QWORD[24+rdi],r15 - lea rdi,[32+rdi] - - inc rcx - jnz NEAR $L$sqr4x_sub - - mov r10,r9 - neg r9 - DB 0F3h,0C3h ;repret - - -global bn_from_montgomery - -ALIGN 32 -bn_from_montgomery: - - test DWORD[48+rsp],7 - jz NEAR bn_from_mont8x - xor eax,eax - DB 0F3h,0C3h ;repret - - - - -ALIGN 32 -bn_from_mont8x: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_from_mont8x: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - -DB 0x67 - mov rax,rsp - - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$from_prologue: - - shl r9d,3 - lea r10,[r9*2+r9] - neg r9 - mov r8,QWORD[r8] - - - - - - - - - lea r11,[((-320))+r9*2+rsp] - mov rbp,rsp - sub r11,rdi - and r11,4095 - cmp r10,r11 - jb NEAR $L$from_sp_alt - sub rbp,r11 - lea rbp,[((-320))+r9*2+rbp] - jmp NEAR $L$from_sp_done - -ALIGN 32 -$L$from_sp_alt: - lea r10,[((4096-320))+r9*2] - lea rbp,[((-320))+r9*2+rbp] - sub r11,r10 - mov r10,0 - cmovc r11,r10 - sub rbp,r11 -$L$from_sp_done: - and rbp,-64 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$from_page_walk - jmp NEAR $L$from_page_walk_done - -$L$from_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$from_page_walk -$L$from_page_walk_done: - - mov r10,r9 - neg r9 - - - - - - - - - - - mov QWORD[32+rsp],r8 - mov QWORD[40+rsp],rax - -$L$from_body: - mov r11,r9 - lea rax,[48+rsp] - pxor xmm0,xmm0 - jmp NEAR $L$mul_by_1 - -ALIGN 32 -$L$mul_by_1: - movdqu xmm1,XMMWORD[rsi] - movdqu xmm2,XMMWORD[16+rsi] - movdqu xmm3,XMMWORD[32+rsi] - movdqa XMMWORD[r9*1+rax],xmm0 - movdqu xmm4,XMMWORD[48+rsi] - movdqa XMMWORD[16+r9*1+rax],xmm0 -DB 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 - movdqa XMMWORD[rax],xmm1 - movdqa XMMWORD[32+r9*1+rax],xmm0 - movdqa XMMWORD[16+rax],xmm2 - movdqa XMMWORD[48+r9*1+rax],xmm0 - movdqa XMMWORD[32+rax],xmm3 - movdqa XMMWORD[48+rax],xmm4 - lea rax,[64+rax] - sub r11,64 - jnz NEAR $L$mul_by_1 - -DB 102,72,15,110,207 -DB 102,72,15,110,209 -DB 0x67 - mov rbp,rcx -DB 102,73,15,110,218 - lea r11,[OPENSSL_ia32cap_P] - mov r11d,DWORD[8+r11] - and r11d,0x80108 - cmp r11d,0x80108 - jne NEAR $L$from_mont_nox - - lea rdi,[r9*1+rax] - call __bn_sqrx8x_reduction - call __bn_postx4x_internal - - pxor xmm0,xmm0 - lea rax,[48+rsp] - jmp NEAR $L$from_mont_zero - -ALIGN 32 -$L$from_mont_nox: - call __bn_sqr8x_reduction - call __bn_post4x_internal - - pxor xmm0,xmm0 - lea rax,[48+rsp] - jmp NEAR $L$from_mont_zero - -ALIGN 32 -$L$from_mont_zero: - mov rsi,QWORD[40+rsp] - - movdqa XMMWORD[rax],xmm0 - movdqa XMMWORD[16+rax],xmm0 - movdqa XMMWORD[32+rax],xmm0 - movdqa XMMWORD[48+rax],xmm0 - lea rax,[64+rax] - sub r9,32 - jnz NEAR $L$from_mont_zero - - mov rax,1 - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$from_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_from_mont8x: - -ALIGN 32 -bn_mulx4x_mont_gather5: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_mulx4x_mont_gather5: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov rax,rsp - -$L$mulx4x_enter: - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$mulx4x_prologue: - - shl r9d,3 - lea r10,[r9*2+r9] - neg r9 - mov r8,QWORD[r8] - - - - - - - - - - - lea r11,[((-320))+r9*2+rsp] - mov rbp,rsp - sub r11,rdi - and r11,4095 - cmp r10,r11 - jb NEAR $L$mulx4xsp_alt - sub rbp,r11 - lea rbp,[((-320))+r9*2+rbp] - jmp NEAR $L$mulx4xsp_done - -$L$mulx4xsp_alt: - lea r10,[((4096-320))+r9*2] - lea rbp,[((-320))+r9*2+rbp] - sub r11,r10 - mov r10,0 - cmovc r11,r10 - sub rbp,r11 -$L$mulx4xsp_done: - and rbp,-64 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$mulx4x_page_walk - jmp NEAR $L$mulx4x_page_walk_done - -$L$mulx4x_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$mulx4x_page_walk -$L$mulx4x_page_walk_done: - - - - - - - - - - - - - - mov QWORD[32+rsp],r8 - mov QWORD[40+rsp],rax - -$L$mulx4x_body: - call mulx4x_internal - - mov rsi,QWORD[40+rsp] - - mov rax,1 - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$mulx4x_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_mulx4x_mont_gather5: - - -ALIGN 32 -mulx4x_internal: - - mov QWORD[8+rsp],r9 - mov r10,r9 - neg r9 - shl r9,5 - neg r10 - lea r13,[128+r9*1+rdx] - shr r9,5+5 - movd xmm5,DWORD[56+rax] - sub r9,1 - lea rax,[$L$inc] - mov QWORD[((16+8))+rsp],r13 - mov QWORD[((24+8))+rsp],r9 - mov QWORD[((56+8))+rsp],rdi - movdqa xmm0,XMMWORD[rax] - movdqa xmm1,XMMWORD[16+rax] - lea r10,[((88-112))+r10*1+rsp] - lea rdi,[128+rdx] - - pshufd xmm5,xmm5,0 - movdqa xmm4,xmm1 -DB 0x67 - movdqa xmm2,xmm1 -DB 0x67 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[112+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[128+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[144+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[160+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[176+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[192+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[208+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[224+r10],xmm3 - movdqa xmm3,xmm4 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[240+r10],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[256+r10],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[272+r10],xmm2 - movdqa xmm2,xmm4 - - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[288+r10],xmm3 - movdqa xmm3,xmm4 -DB 0x67 - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[304+r10],xmm0 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[320+r10],xmm1 - - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[336+r10],xmm2 - - pand xmm0,XMMWORD[64+rdi] - pand xmm1,XMMWORD[80+rdi] - pand xmm2,XMMWORD[96+rdi] - movdqa XMMWORD[352+r10],xmm3 - pand xmm3,XMMWORD[112+rdi] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[((-128))+rdi] - movdqa xmm5,XMMWORD[((-112))+rdi] - movdqa xmm2,XMMWORD[((-96))+rdi] - pand xmm4,XMMWORD[112+r10] - movdqa xmm3,XMMWORD[((-80))+rdi] - pand xmm5,XMMWORD[128+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[144+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[160+r10] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[((-64))+rdi] - movdqa xmm5,XMMWORD[((-48))+rdi] - movdqa xmm2,XMMWORD[((-32))+rdi] - pand xmm4,XMMWORD[176+r10] - movdqa xmm3,XMMWORD[((-16))+rdi] - pand xmm5,XMMWORD[192+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[208+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[224+r10] - por xmm0,xmm2 - por xmm1,xmm3 - movdqa xmm4,XMMWORD[rdi] - movdqa xmm5,XMMWORD[16+rdi] - movdqa xmm2,XMMWORD[32+rdi] - pand xmm4,XMMWORD[240+r10] - movdqa xmm3,XMMWORD[48+rdi] - pand xmm5,XMMWORD[256+r10] - por xmm0,xmm4 - pand xmm2,XMMWORD[272+r10] - por xmm1,xmm5 - pand xmm3,XMMWORD[288+r10] - por xmm0,xmm2 - por xmm1,xmm3 - pxor xmm0,xmm1 - pshufd xmm1,xmm0,0x4e - por xmm0,xmm1 - lea rdi,[256+rdi] -DB 102,72,15,126,194 - lea rbx,[((64+32+8))+rsp] - - mov r9,rdx - mulx rax,r8,QWORD[rsi] - mulx r12,r11,QWORD[8+rsi] - add r11,rax - mulx r13,rax,QWORD[16+rsi] - adc r12,rax - adc r13,0 - mulx r14,rax,QWORD[24+rsi] - - mov r15,r8 - imul r8,QWORD[((32+8))+rsp] - xor rbp,rbp - mov rdx,r8 - - mov QWORD[((8+8))+rsp],rdi - - lea rsi,[32+rsi] - adcx r13,rax - adcx r14,rbp - - mulx r10,rax,QWORD[rcx] - adcx r15,rax - adox r10,r11 - mulx r11,rax,QWORD[8+rcx] - adcx r10,rax - adox r11,r12 - mulx r12,rax,QWORD[16+rcx] - mov rdi,QWORD[((24+8))+rsp] - mov QWORD[((-32))+rbx],r10 - adcx r11,rax - adox r12,r13 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov QWORD[((-24))+rbx],r11 - adcx r12,rax - adox r15,rbp - lea rcx,[32+rcx] - mov QWORD[((-16))+rbx],r12 - jmp NEAR $L$mulx4x_1st - -ALIGN 32 -$L$mulx4x_1st: - adcx r15,rbp - mulx rax,r10,QWORD[rsi] - adcx r10,r14 - mulx r14,r11,QWORD[8+rsi] - adcx r11,rax - mulx rax,r12,QWORD[16+rsi] - adcx r12,r14 - mulx r14,r13,QWORD[24+rsi] -DB 0x67,0x67 - mov rdx,r8 - adcx r13,rax - adcx r14,rbp - lea rsi,[32+rsi] - lea rbx,[32+rbx] - - adox r10,r15 - mulx r15,rax,QWORD[rcx] - adcx r10,rax - adox r11,r15 - mulx r15,rax,QWORD[8+rcx] - adcx r11,rax - adox r12,r15 - mulx r15,rax,QWORD[16+rcx] - mov QWORD[((-40))+rbx],r10 - adcx r12,rax - mov QWORD[((-32))+rbx],r11 - adox r13,r15 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov QWORD[((-24))+rbx],r12 - adcx r13,rax - adox r15,rbp - lea rcx,[32+rcx] - mov QWORD[((-16))+rbx],r13 - - dec rdi - jnz NEAR $L$mulx4x_1st - - mov rax,QWORD[8+rsp] - adc r15,rbp - lea rsi,[rax*1+rsi] - add r14,r15 - mov rdi,QWORD[((8+8))+rsp] - adc rbp,rbp - mov QWORD[((-8))+rbx],r14 - jmp NEAR $L$mulx4x_outer - -ALIGN 32 -$L$mulx4x_outer: - lea r10,[((16-256))+rbx] - pxor xmm4,xmm4 -DB 0x67,0x67 - pxor xmm5,xmm5 - movdqa xmm0,XMMWORD[((-128))+rdi] - movdqa xmm1,XMMWORD[((-112))+rdi] - movdqa xmm2,XMMWORD[((-96))+rdi] - pand xmm0,XMMWORD[256+r10] - movdqa xmm3,XMMWORD[((-80))+rdi] - pand xmm1,XMMWORD[272+r10] - por xmm4,xmm0 - pand xmm2,XMMWORD[288+r10] - por xmm5,xmm1 - pand xmm3,XMMWORD[304+r10] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[((-64))+rdi] - movdqa xmm1,XMMWORD[((-48))+rdi] - movdqa xmm2,XMMWORD[((-32))+rdi] - pand xmm0,XMMWORD[320+r10] - movdqa xmm3,XMMWORD[((-16))+rdi] - pand xmm1,XMMWORD[336+r10] - por xmm4,xmm0 - pand xmm2,XMMWORD[352+r10] - por xmm5,xmm1 - pand xmm3,XMMWORD[368+r10] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[rdi] - movdqa xmm1,XMMWORD[16+rdi] - movdqa xmm2,XMMWORD[32+rdi] - pand xmm0,XMMWORD[384+r10] - movdqa xmm3,XMMWORD[48+rdi] - pand xmm1,XMMWORD[400+r10] - por xmm4,xmm0 - pand xmm2,XMMWORD[416+r10] - por xmm5,xmm1 - pand xmm3,XMMWORD[432+r10] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[64+rdi] - movdqa xmm1,XMMWORD[80+rdi] - movdqa xmm2,XMMWORD[96+rdi] - pand xmm0,XMMWORD[448+r10] - movdqa xmm3,XMMWORD[112+rdi] - pand xmm1,XMMWORD[464+r10] - por xmm4,xmm0 - pand xmm2,XMMWORD[480+r10] - por xmm5,xmm1 - pand xmm3,XMMWORD[496+r10] - por xmm4,xmm2 - por xmm5,xmm3 - por xmm4,xmm5 - pshufd xmm0,xmm4,0x4e - por xmm0,xmm4 - lea rdi,[256+rdi] -DB 102,72,15,126,194 - - mov QWORD[rbx],rbp - lea rbx,[32+rax*1+rbx] - mulx r11,r8,QWORD[rsi] - xor rbp,rbp - mov r9,rdx - mulx r12,r14,QWORD[8+rsi] - adox r8,QWORD[((-32))+rbx] - adcx r11,r14 - mulx r13,r15,QWORD[16+rsi] - adox r11,QWORD[((-24))+rbx] - adcx r12,r15 - mulx r14,rdx,QWORD[24+rsi] - adox r12,QWORD[((-16))+rbx] - adcx r13,rdx - lea rcx,[rax*1+rcx] - lea rsi,[32+rsi] - adox r13,QWORD[((-8))+rbx] - adcx r14,rbp - adox r14,rbp - - mov r15,r8 - imul r8,QWORD[((32+8))+rsp] - - mov rdx,r8 - xor rbp,rbp - mov QWORD[((8+8))+rsp],rdi - - mulx r10,rax,QWORD[rcx] - adcx r15,rax - adox r10,r11 - mulx r11,rax,QWORD[8+rcx] - adcx r10,rax - adox r11,r12 - mulx r12,rax,QWORD[16+rcx] - adcx r11,rax - adox r12,r13 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - mov rdi,QWORD[((24+8))+rsp] - mov QWORD[((-32))+rbx],r10 - adcx r12,rax - mov QWORD[((-24))+rbx],r11 - adox r15,rbp - mov QWORD[((-16))+rbx],r12 - lea rcx,[32+rcx] - jmp NEAR $L$mulx4x_inner - -ALIGN 32 -$L$mulx4x_inner: - mulx rax,r10,QWORD[rsi] - adcx r15,rbp - adox r10,r14 - mulx r14,r11,QWORD[8+rsi] - adcx r10,QWORD[rbx] - adox r11,rax - mulx rax,r12,QWORD[16+rsi] - adcx r11,QWORD[8+rbx] - adox r12,r14 - mulx r14,r13,QWORD[24+rsi] - mov rdx,r8 - adcx r12,QWORD[16+rbx] - adox r13,rax - adcx r13,QWORD[24+rbx] - adox r14,rbp - lea rsi,[32+rsi] - lea rbx,[32+rbx] - adcx r14,rbp - - adox r10,r15 - mulx r15,rax,QWORD[rcx] - adcx r10,rax - adox r11,r15 - mulx r15,rax,QWORD[8+rcx] - adcx r11,rax - adox r12,r15 - mulx r15,rax,QWORD[16+rcx] - mov QWORD[((-40))+rbx],r10 - adcx r12,rax - adox r13,r15 - mov QWORD[((-32))+rbx],r11 - mulx r15,rax,QWORD[24+rcx] - mov rdx,r9 - lea rcx,[32+rcx] - mov QWORD[((-24))+rbx],r12 - adcx r13,rax - adox r15,rbp - mov QWORD[((-16))+rbx],r13 - - dec rdi - jnz NEAR $L$mulx4x_inner - - mov rax,QWORD[((0+8))+rsp] - adc r15,rbp - sub rdi,QWORD[rbx] - mov rdi,QWORD[((8+8))+rsp] - mov r10,QWORD[((16+8))+rsp] - adc r14,r15 - lea rsi,[rax*1+rsi] - adc rbp,rbp - mov QWORD[((-8))+rbx],r14 - - cmp rdi,r10 - jb NEAR $L$mulx4x_outer - - mov r10,QWORD[((-8))+rcx] - mov r8,rbp - mov r12,QWORD[rax*1+rcx] - lea rbp,[rax*1+rcx] - mov rcx,rax - lea rdi,[rax*1+rbx] - xor eax,eax - xor r15,r15 - sub r10,r14 - adc r15,r15 - or r8,r15 - sar rcx,3+2 - sub rax,r8 - mov rdx,QWORD[((56+8))+rsp] - dec r12 - mov r13,QWORD[8+rbp] - xor r8,r8 - mov r14,QWORD[16+rbp] - mov r15,QWORD[24+rbp] - jmp NEAR $L$sqrx4x_sub_entry - - - -ALIGN 32 -bn_powerx5: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_bn_powerx5: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - mov rcx,r9 - mov r8,QWORD[40+rsp] - mov r9,QWORD[48+rsp] - - - - mov rax,rsp - -$L$powerx5_enter: - push rbx - - push rbp - - push r12 - - push r13 - - push r14 - - push r15 - -$L$powerx5_prologue: - - shl r9d,3 - lea r10,[r9*2+r9] - neg r9 - mov r8,QWORD[r8] - - - - - - - - - lea r11,[((-320))+r9*2+rsp] - mov rbp,rsp - sub r11,rdi - and r11,4095 - cmp r10,r11 - jb NEAR $L$pwrx_sp_alt - sub rbp,r11 - lea rbp,[((-320))+r9*2+rbp] - jmp NEAR $L$pwrx_sp_done - -ALIGN 32 -$L$pwrx_sp_alt: - lea r10,[((4096-320))+r9*2] - lea rbp,[((-320))+r9*2+rbp] - sub r11,r10 - mov r10,0 - cmovc r11,r10 - sub rbp,r11 -$L$pwrx_sp_done: - and rbp,-64 - mov r11,rsp - sub r11,rbp - and r11,-4096 - lea rsp,[rbp*1+r11] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$pwrx_page_walk - jmp NEAR $L$pwrx_page_walk_done - -$L$pwrx_page_walk: - lea rsp,[((-4096))+rsp] - mov r10,QWORD[rsp] - cmp rsp,rbp - ja NEAR $L$pwrx_page_walk -$L$pwrx_page_walk_done: - - mov r10,r9 - neg r9 - - - - - - - - - - - - - pxor xmm0,xmm0 -DB 102,72,15,110,207 -DB 102,72,15,110,209 -DB 102,73,15,110,218 -DB 102,72,15,110,226 - mov QWORD[32+rsp],r8 - mov QWORD[40+rsp],rax - -$L$powerx5_body: - - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - call __bn_sqrx8x_internal - call __bn_postx4x_internal - - mov r9,r10 - mov rdi,rsi -DB 102,72,15,126,209 -DB 102,72,15,126,226 - mov rax,QWORD[40+rsp] - - call mulx4x_internal - - mov rsi,QWORD[40+rsp] - - mov rax,1 - - mov r15,QWORD[((-48))+rsi] - - mov r14,QWORD[((-40))+rsi] - - mov r13,QWORD[((-32))+rsi] - - mov r12,QWORD[((-24))+rsi] - - mov rbp,QWORD[((-16))+rsi] - - mov rbx,QWORD[((-8))+rsi] - - lea rsp,[rsi] - -$L$powerx5_epilogue: - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$SEH_end_bn_powerx5: - -global bn_sqrx8x_internal - - -ALIGN 32 -bn_sqrx8x_internal: -__bn_sqrx8x_internal: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - lea rdi,[((48+8))+rsp] - lea rbp,[r9*1+rsi] - mov QWORD[((0+8))+rsp],r9 - mov QWORD[((8+8))+rsp],rbp - jmp NEAR $L$sqr8x_zero_start - -ALIGN 32 -DB 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 -$L$sqrx8x_zero: -DB 0x3e - movdqa XMMWORD[rdi],xmm0 - movdqa XMMWORD[16+rdi],xmm0 - movdqa XMMWORD[32+rdi],xmm0 - movdqa XMMWORD[48+rdi],xmm0 -$L$sqr8x_zero_start: - movdqa XMMWORD[64+rdi],xmm0 - movdqa XMMWORD[80+rdi],xmm0 - movdqa XMMWORD[96+rdi],xmm0 - movdqa XMMWORD[112+rdi],xmm0 - lea rdi,[128+rdi] - sub r9,64 - jnz NEAR $L$sqrx8x_zero - - mov rdx,QWORD[rsi] - - xor r10,r10 - xor r11,r11 - xor r12,r12 - xor r13,r13 - xor r14,r14 - xor r15,r15 - lea rdi,[((48+8))+rsp] - xor rbp,rbp - jmp NEAR $L$sqrx8x_outer_loop - -ALIGN 32 -$L$sqrx8x_outer_loop: - mulx rax,r8,QWORD[8+rsi] - adcx r8,r9 - adox r10,rax - mulx rax,r9,QWORD[16+rsi] - adcx r9,r10 - adox r11,rax -DB 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 - adcx r10,r11 - adox r12,rax -DB 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 - adcx r11,r12 - adox r13,rax - mulx rax,r12,QWORD[40+rsi] - adcx r12,r13 - adox r14,rax - mulx rax,r13,QWORD[48+rsi] - adcx r13,r14 - adox rax,r15 - mulx r15,r14,QWORD[56+rsi] - mov rdx,QWORD[8+rsi] - adcx r14,rax - adox r15,rbp - adc r15,QWORD[64+rdi] - mov QWORD[8+rdi],r8 - mov QWORD[16+rdi],r9 - sbb rcx,rcx - xor rbp,rbp - - - mulx rbx,r8,QWORD[16+rsi] - mulx rax,r9,QWORD[24+rsi] - adcx r8,r10 - adox r9,rbx - mulx rbx,r10,QWORD[32+rsi] - adcx r9,r11 - adox r10,rax -DB 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 - adcx r10,r12 - adox r11,rbx -DB 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 - adcx r11,r13 - adox r12,r14 -DB 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 - mov rdx,QWORD[16+rsi] - adcx r12,rax - adox r13,rbx - adcx r13,r15 - adox r14,rbp - adcx r14,rbp - - mov QWORD[24+rdi],r8 - mov QWORD[32+rdi],r9 - - mulx rbx,r8,QWORD[24+rsi] - mulx rax,r9,QWORD[32+rsi] - adcx r8,r10 - adox r9,rbx - mulx rbx,r10,QWORD[40+rsi] - adcx r9,r11 - adox r10,rax -DB 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 - adcx r10,r12 - adox r11,r13 -DB 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 -DB 0x3e - mov rdx,QWORD[24+rsi] - adcx r11,rbx - adox r12,rax - adcx r12,r14 - mov QWORD[40+rdi],r8 - mov QWORD[48+rdi],r9 - mulx rax,r8,QWORD[32+rsi] - adox r13,rbp - adcx r13,rbp - - mulx rbx,r9,QWORD[40+rsi] - adcx r8,r10 - adox r9,rax - mulx rax,r10,QWORD[48+rsi] - adcx r9,r11 - adox r10,r12 - mulx r12,r11,QWORD[56+rsi] - mov rdx,QWORD[32+rsi] - mov r14,QWORD[40+rsi] - adcx r10,rbx - adox r11,rax - mov r15,QWORD[48+rsi] - adcx r11,r13 - adox r12,rbp - adcx r12,rbp - - mov QWORD[56+rdi],r8 - mov QWORD[64+rdi],r9 - - mulx rax,r9,r14 - mov r8,QWORD[56+rsi] - adcx r9,r10 - mulx rbx,r10,r15 - adox r10,rax - adcx r10,r11 - mulx rax,r11,r8 - mov rdx,r14 - adox r11,rbx - adcx r11,r12 - - adcx rax,rbp - - mulx rbx,r14,r15 - mulx r13,r12,r8 - mov rdx,r15 - lea rsi,[64+rsi] - adcx r11,r14 - adox r12,rbx - adcx r12,rax - adox r13,rbp - -DB 0x67,0x67 - mulx r14,r8,r8 - adcx r13,r8 - adcx r14,rbp - - cmp rsi,QWORD[((8+8))+rsp] - je NEAR $L$sqrx8x_outer_break - - neg rcx - mov rcx,-8 - mov r15,rbp - mov r8,QWORD[64+rdi] - adcx r9,QWORD[72+rdi] - adcx r10,QWORD[80+rdi] - adcx r11,QWORD[88+rdi] - adc r12,QWORD[96+rdi] - adc r13,QWORD[104+rdi] - adc r14,QWORD[112+rdi] - adc r15,QWORD[120+rdi] - lea rbp,[rsi] - lea rdi,[128+rdi] - sbb rax,rax - - mov rdx,QWORD[((-64))+rsi] - mov QWORD[((16+8))+rsp],rax - mov QWORD[((24+8))+rsp],rdi - - - xor eax,eax - jmp NEAR $L$sqrx8x_loop - -ALIGN 32 -$L$sqrx8x_loop: - mov rbx,r8 - mulx r8,rax,QWORD[rbp] - adcx rbx,rax - adox r8,r9 - - mulx r9,rax,QWORD[8+rbp] - adcx r8,rax - adox r9,r10 - - mulx r10,rax,QWORD[16+rbp] - adcx r9,rax - adox r10,r11 - - mulx r11,rax,QWORD[24+rbp] - adcx r10,rax - adox r11,r12 - -DB 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 - adcx r11,rax - adox r12,r13 - - mulx r13,rax,QWORD[40+rbp] - adcx r12,rax - adox r13,r14 - - mulx r14,rax,QWORD[48+rbp] - mov QWORD[rcx*8+rdi],rbx - mov ebx,0 - adcx r13,rax - adox r14,r15 - -DB 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 - mov rdx,QWORD[8+rcx*8+rsi] - adcx r14,rax - adox r15,rbx - adcx r15,rbx - -DB 0x67 - inc rcx - jnz NEAR $L$sqrx8x_loop - - lea rbp,[64+rbp] - mov rcx,-8 - cmp rbp,QWORD[((8+8))+rsp] - je NEAR $L$sqrx8x_break - - sub rbx,QWORD[((16+8))+rsp] -DB 0x66 - mov rdx,QWORD[((-64))+rsi] - adcx r8,QWORD[rdi] - adcx r9,QWORD[8+rdi] - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - lea rdi,[64+rdi] -DB 0x67 - sbb rax,rax - xor ebx,ebx - mov QWORD[((16+8))+rsp],rax - jmp NEAR $L$sqrx8x_loop - -ALIGN 32 -$L$sqrx8x_break: - xor rbp,rbp - sub rbx,QWORD[((16+8))+rsp] - adcx r8,rbp - mov rcx,QWORD[((24+8))+rsp] - adcx r9,rbp - mov rdx,QWORD[rsi] - adc r10,0 - mov QWORD[rdi],r8 - adc r11,0 - adc r12,0 - adc r13,0 - adc r14,0 - adc r15,0 - cmp rdi,rcx - je NEAR $L$sqrx8x_outer_loop - - mov QWORD[8+rdi],r9 - mov r9,QWORD[8+rcx] - mov QWORD[16+rdi],r10 - mov r10,QWORD[16+rcx] - mov QWORD[24+rdi],r11 - mov r11,QWORD[24+rcx] - mov QWORD[32+rdi],r12 - mov r12,QWORD[32+rcx] - mov QWORD[40+rdi],r13 - mov r13,QWORD[40+rcx] - mov QWORD[48+rdi],r14 - mov r14,QWORD[48+rcx] - mov QWORD[56+rdi],r15 - mov r15,QWORD[56+rcx] - mov rdi,rcx - jmp NEAR $L$sqrx8x_outer_loop - -ALIGN 32 -$L$sqrx8x_outer_break: - mov QWORD[72+rdi],r9 -DB 102,72,15,126,217 - mov QWORD[80+rdi],r10 - mov QWORD[88+rdi],r11 - mov QWORD[96+rdi],r12 - mov QWORD[104+rdi],r13 - mov QWORD[112+rdi],r14 - lea rdi,[((48+8))+rsp] - mov rdx,QWORD[rcx*1+rsi] - - mov r11,QWORD[8+rdi] - xor r10,r10 - mov r9,QWORD[((0+8))+rsp] - adox r11,r11 - mov r12,QWORD[16+rdi] - mov r13,QWORD[24+rdi] - - -ALIGN 32 -$L$sqrx4x_shift_n_add: - mulx rbx,rax,rdx - adox r12,r12 - adcx rax,r10 -DB 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 -DB 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 - adox r13,r13 - adcx rbx,r11 - mov r11,QWORD[40+rdi] - mov QWORD[rdi],rax - mov QWORD[8+rdi],rbx - - mulx rbx,rax,rdx - adox r10,r10 - adcx rax,r12 - mov rdx,QWORD[16+rcx*1+rsi] - mov r12,QWORD[48+rdi] - adox r11,r11 - adcx rbx,r13 - mov r13,QWORD[56+rdi] - mov QWORD[16+rdi],rax - mov QWORD[24+rdi],rbx - - mulx rbx,rax,rdx - adox r12,r12 - adcx rax,r10 - mov rdx,QWORD[24+rcx*1+rsi] - lea rcx,[32+rcx] - mov r10,QWORD[64+rdi] - adox r13,r13 - adcx rbx,r11 - mov r11,QWORD[72+rdi] - mov QWORD[32+rdi],rax - mov QWORD[40+rdi],rbx - - mulx rbx,rax,rdx - adox r10,r10 - adcx rax,r12 - jrcxz $L$sqrx4x_shift_n_add_break -DB 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 - adox r11,r11 - adcx rbx,r13 - mov r12,QWORD[80+rdi] - mov r13,QWORD[88+rdi] - mov QWORD[48+rdi],rax - mov QWORD[56+rdi],rbx - lea rdi,[64+rdi] - nop - jmp NEAR $L$sqrx4x_shift_n_add - -ALIGN 32 -$L$sqrx4x_shift_n_add_break: - adcx rbx,r13 - mov QWORD[48+rdi],rax - mov QWORD[56+rdi],rbx - lea rdi,[64+rdi] -DB 102,72,15,126,213 -__bn_sqrx8x_reduction: - xor eax,eax - mov rbx,QWORD[((32+8))+rsp] - mov rdx,QWORD[((48+8))+rsp] - lea rcx,[((-64))+r9*1+rbp] - - mov QWORD[((0+8))+rsp],rcx - mov QWORD[((8+8))+rsp],rdi - - lea rdi,[((48+8))+rsp] - jmp NEAR $L$sqrx8x_reduction_loop - -ALIGN 32 -$L$sqrx8x_reduction_loop: - mov r9,QWORD[8+rdi] - mov r10,QWORD[16+rdi] - mov r11,QWORD[24+rdi] - mov r12,QWORD[32+rdi] - mov r8,rdx - imul rdx,rbx - mov r13,QWORD[40+rdi] - mov r14,QWORD[48+rdi] - mov r15,QWORD[56+rdi] - mov QWORD[((24+8))+rsp],rax - - lea rdi,[64+rdi] - xor rsi,rsi - mov rcx,-8 - jmp NEAR $L$sqrx8x_reduce - -ALIGN 32 -$L$sqrx8x_reduce: - mov rbx,r8 - mulx r8,rax,QWORD[rbp] - adcx rax,rbx - adox r8,r9 - - mulx r9,rbx,QWORD[8+rbp] - adcx r8,rbx - adox r9,r10 - - mulx r10,rbx,QWORD[16+rbp] - adcx r9,rbx - adox r10,r11 - - mulx r11,rbx,QWORD[24+rbp] - adcx r10,rbx - adox r11,r12 - -DB 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 - mov rax,rdx - mov rdx,r8 - adcx r11,rbx - adox r12,r13 - - mulx rdx,rbx,QWORD[((32+8))+rsp] - mov rdx,rax - mov QWORD[((64+48+8))+rcx*8+rsp],rax - - mulx r13,rax,QWORD[40+rbp] - adcx r12,rax - adox r13,r14 - - mulx r14,rax,QWORD[48+rbp] - adcx r13,rax - adox r14,r15 - - mulx r15,rax,QWORD[56+rbp] - mov rdx,rbx - adcx r14,rax - adox r15,rsi - adcx r15,rsi - -DB 0x67,0x67,0x67 - inc rcx - jnz NEAR $L$sqrx8x_reduce - - mov rax,rsi - cmp rbp,QWORD[((0+8))+rsp] - jae NEAR $L$sqrx8x_no_tail - - mov rdx,QWORD[((48+8))+rsp] - add r8,QWORD[rdi] - lea rbp,[64+rbp] - mov rcx,-8 - adcx r9,QWORD[8+rdi] - adcx r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - lea rdi,[64+rdi] - sbb rax,rax - - xor rsi,rsi - mov QWORD[((16+8))+rsp],rax - jmp NEAR $L$sqrx8x_tail - -ALIGN 32 -$L$sqrx8x_tail: - mov rbx,r8 - mulx r8,rax,QWORD[rbp] - adcx rbx,rax - adox r8,r9 - - mulx r9,rax,QWORD[8+rbp] - adcx r8,rax - adox r9,r10 - - mulx r10,rax,QWORD[16+rbp] - adcx r9,rax - adox r10,r11 - - mulx r11,rax,QWORD[24+rbp] - adcx r10,rax - adox r11,r12 - -DB 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 - adcx r11,rax - adox r12,r13 - - mulx r13,rax,QWORD[40+rbp] - adcx r12,rax - adox r13,r14 - - mulx r14,rax,QWORD[48+rbp] - adcx r13,rax - adox r14,r15 - - mulx r15,rax,QWORD[56+rbp] - mov rdx,QWORD[((72+48+8))+rcx*8+rsp] - adcx r14,rax - adox r15,rsi - mov QWORD[rcx*8+rdi],rbx - mov rbx,r8 - adcx r15,rsi - - inc rcx - jnz NEAR $L$sqrx8x_tail - - cmp rbp,QWORD[((0+8))+rsp] - jae NEAR $L$sqrx8x_tail_done - - sub rsi,QWORD[((16+8))+rsp] - mov rdx,QWORD[((48+8))+rsp] - lea rbp,[64+rbp] - adc r8,QWORD[rdi] - adc r9,QWORD[8+rdi] - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - lea rdi,[64+rdi] - sbb rax,rax - sub rcx,8 - - xor rsi,rsi - mov QWORD[((16+8))+rsp],rax - jmp NEAR $L$sqrx8x_tail - -ALIGN 32 -$L$sqrx8x_tail_done: - xor rax,rax - add r8,QWORD[((24+8))+rsp] - adc r9,0 - adc r10,0 - adc r11,0 - adc r12,0 - adc r13,0 - adc r14,0 - adc r15,0 - adc rax,0 - - sub rsi,QWORD[((16+8))+rsp] -$L$sqrx8x_no_tail: - adc r8,QWORD[rdi] -DB 102,72,15,126,217 - adc r9,QWORD[8+rdi] - mov rsi,QWORD[56+rbp] -DB 102,72,15,126,213 - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - adc r12,QWORD[32+rdi] - adc r13,QWORD[40+rdi] - adc r14,QWORD[48+rdi] - adc r15,QWORD[56+rdi] - adc rax,0 - - mov rbx,QWORD[((32+8))+rsp] - mov rdx,QWORD[64+rcx*1+rdi] - - mov QWORD[rdi],r8 - lea r8,[64+rdi] - mov QWORD[8+rdi],r9 - mov QWORD[16+rdi],r10 - mov QWORD[24+rdi],r11 - mov QWORD[32+rdi],r12 - mov QWORD[40+rdi],r13 - mov QWORD[48+rdi],r14 - mov QWORD[56+rdi],r15 - - lea rdi,[64+rcx*1+rdi] - cmp r8,QWORD[((8+8))+rsp] - jb NEAR $L$sqrx8x_reduction_loop - DB 0F3h,0C3h ;repret - - -ALIGN 32 - -__bn_postx4x_internal: - - mov r12,QWORD[rbp] - mov r10,rcx - mov r9,rcx - neg rax - sar rcx,3+2 - -DB 102,72,15,126,202 -DB 102,72,15,126,206 - dec r12 - mov r13,QWORD[8+rbp] - xor r8,r8 - mov r14,QWORD[16+rbp] - mov r15,QWORD[24+rbp] - jmp NEAR $L$sqrx4x_sub_entry - -ALIGN 16 -$L$sqrx4x_sub: - mov r12,QWORD[rbp] - mov r13,QWORD[8+rbp] - mov r14,QWORD[16+rbp] - mov r15,QWORD[24+rbp] -$L$sqrx4x_sub_entry: - andn r12,r12,rax - lea rbp,[32+rbp] - andn r13,r13,rax - andn r14,r14,rax - andn r15,r15,rax - - neg r8 - adc r12,QWORD[rdi] - adc r13,QWORD[8+rdi] - adc r14,QWORD[16+rdi] - adc r15,QWORD[24+rdi] - mov QWORD[rdx],r12 - lea rdi,[32+rdi] - mov QWORD[8+rdx],r13 - sbb r8,r8 - mov QWORD[16+rdx],r14 - mov QWORD[24+rdx],r15 - lea rdx,[32+rdx] - - inc rcx - jnz NEAR $L$sqrx4x_sub - - neg r9 - - DB 0F3h,0C3h ;repret - - -global bn_scatter5 - -ALIGN 16 -bn_scatter5: - - cmp edx,0 - jz NEAR $L$scatter_epilogue - lea r8,[r9*8+r8] -$L$scatter: - mov rax,QWORD[rcx] - lea rcx,[8+rcx] - mov QWORD[r8],rax - lea r8,[256+r8] - sub edx,1 - jnz NEAR $L$scatter -$L$scatter_epilogue: - DB 0F3h,0C3h ;repret - - - -global bn_gather5 - -ALIGN 32 -bn_gather5: - -$L$SEH_begin_bn_gather5: - -DB 0x4c,0x8d,0x14,0x24 - -DB 0x48,0x81,0xec,0x08,0x01,0x00,0x00 - lea rax,[$L$inc] - and rsp,-16 - - movd xmm5,r9d - movdqa xmm0,XMMWORD[rax] - movdqa xmm1,XMMWORD[16+rax] - lea r11,[128+r8] - lea rax,[128+rsp] - - pshufd xmm5,xmm5,0 - movdqa xmm4,xmm1 - movdqa xmm2,xmm1 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa xmm3,xmm4 - - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[(-128)+rax],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[(-112)+rax],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[(-96)+rax],xmm2 - movdqa xmm2,xmm4 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[(-80)+rax],xmm3 - movdqa xmm3,xmm4 - - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[(-64)+rax],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[(-48)+rax],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[(-32)+rax],xmm2 - movdqa xmm2,xmm4 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[(-16)+rax],xmm3 - movdqa xmm3,xmm4 - - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[rax],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[16+rax],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[32+rax],xmm2 - movdqa xmm2,xmm4 - paddd xmm1,xmm0 - pcmpeqd xmm0,xmm5 - movdqa XMMWORD[48+rax],xmm3 - movdqa xmm3,xmm4 - - paddd xmm2,xmm1 - pcmpeqd xmm1,xmm5 - movdqa XMMWORD[64+rax],xmm0 - movdqa xmm0,xmm4 - - paddd xmm3,xmm2 - pcmpeqd xmm2,xmm5 - movdqa XMMWORD[80+rax],xmm1 - movdqa xmm1,xmm4 - - paddd xmm0,xmm3 - pcmpeqd xmm3,xmm5 - movdqa XMMWORD[96+rax],xmm2 - movdqa xmm2,xmm4 - movdqa XMMWORD[112+rax],xmm3 - jmp NEAR $L$gather - -ALIGN 32 -$L$gather: - pxor xmm4,xmm4 - pxor xmm5,xmm5 - movdqa xmm0,XMMWORD[((-128))+r11] - movdqa xmm1,XMMWORD[((-112))+r11] - movdqa xmm2,XMMWORD[((-96))+r11] - pand xmm0,XMMWORD[((-128))+rax] - movdqa xmm3,XMMWORD[((-80))+r11] - pand xmm1,XMMWORD[((-112))+rax] - por xmm4,xmm0 - pand xmm2,XMMWORD[((-96))+rax] - por xmm5,xmm1 - pand xmm3,XMMWORD[((-80))+rax] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[((-64))+r11] - movdqa xmm1,XMMWORD[((-48))+r11] - movdqa xmm2,XMMWORD[((-32))+r11] - pand xmm0,XMMWORD[((-64))+rax] - movdqa xmm3,XMMWORD[((-16))+r11] - pand xmm1,XMMWORD[((-48))+rax] - por xmm4,xmm0 - pand xmm2,XMMWORD[((-32))+rax] - por xmm5,xmm1 - pand xmm3,XMMWORD[((-16))+rax] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[r11] - movdqa xmm1,XMMWORD[16+r11] - movdqa xmm2,XMMWORD[32+r11] - pand xmm0,XMMWORD[rax] - movdqa xmm3,XMMWORD[48+r11] - pand xmm1,XMMWORD[16+rax] - por xmm4,xmm0 - pand xmm2,XMMWORD[32+rax] - por xmm5,xmm1 - pand xmm3,XMMWORD[48+rax] - por xmm4,xmm2 - por xmm5,xmm3 - movdqa xmm0,XMMWORD[64+r11] - movdqa xmm1,XMMWORD[80+r11] - movdqa xmm2,XMMWORD[96+r11] - pand xmm0,XMMWORD[64+rax] - movdqa xmm3,XMMWORD[112+r11] - pand xmm1,XMMWORD[80+rax] - por xmm4,xmm0 - pand xmm2,XMMWORD[96+rax] - por xmm5,xmm1 - pand xmm3,XMMWORD[112+rax] - por xmm4,xmm2 - por xmm5,xmm3 - por xmm4,xmm5 - lea r11,[256+r11] - pshufd xmm0,xmm4,0x4e - por xmm0,xmm4 - movq QWORD[rcx],xmm0 - lea rcx,[8+rcx] - sub edx,1 - jnz NEAR $L$gather - - lea rsp,[r10] - - DB 0F3h,0C3h ;repret -$L$SEH_end_bn_gather5: - - -ALIGN 64 -$L$inc: - DD 0,0,1,1 - DD 2,2,2,2 -DB 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 -DB 112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115 -DB 99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111 -DB 114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79 -DB 71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111 -DB 112,101,110,115,115,108,46,111,114,103,62,0 -EXTERN __imp_RtlVirtualUnwind - -ALIGN 16 -mul_handler: - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD[120+r8] - mov rbx,QWORD[248+r8] - - mov rsi,QWORD[8+r9] - mov r11,QWORD[56+r9] - - mov r10d,DWORD[r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_seh_tail - - mov r10d,DWORD[4+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jb NEAR $L$common_pop_regs - - mov rax,QWORD[152+r8] - - mov r10d,DWORD[8+r11] - lea r10,[r10*1+rsi] - cmp rbx,r10 - jae NEAR $L$common_seh_tail - - lea r10,[$L$mul_epilogue] - cmp rbx,r10 - ja NEAR $L$body_40 - - mov r10,QWORD[192+r8] - mov rax,QWORD[8+r10*8+rax] - - jmp NEAR $L$common_pop_regs - -$L$body_40: - mov rax,QWORD[40+rax] -$L$common_pop_regs: - mov rbx,QWORD[((-8))+rax] - mov rbp,QWORD[((-16))+rax] - mov r12,QWORD[((-24))+rax] - mov r13,QWORD[((-32))+rax] - mov r14,QWORD[((-40))+rax] - mov r15,QWORD[((-48))+rax] - mov QWORD[144+r8],rbx - mov QWORD[160+r8],rbp - mov QWORD[216+r8],r12 - mov QWORD[224+r8],r13 - mov QWORD[232+r8],r14 - mov QWORD[240+r8],r15 - -$L$common_seh_tail: - mov rdi,QWORD[8+rax] - mov rsi,QWORD[16+rax] - mov QWORD[152+r8],rax - mov QWORD[168+r8],rsi - mov QWORD[176+r8],rdi - - mov rdi,QWORD[40+r9] - mov rsi,r8 - mov ecx,154 - DD 0xa548f3fc - - mov rsi,r9 - xor rcx,rcx - mov rdx,QWORD[8+rsi] - mov r8,QWORD[rsi] - mov r9,QWORD[16+rsi] - mov r10,QWORD[40+rsi] - lea r11,[56+rsi] - lea r12,[24+rsi] - mov QWORD[32+rsp],r10 - mov QWORD[40+rsp],r11 - mov QWORD[48+rsp],r12 - mov QWORD[56+rsp],rcx - call QWORD[__imp_RtlVirtualUnwind] - - mov eax,1 - add rsp,64 - popfq - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp - pop rbx - pop rdi - pop rsi - DB 0F3h,0C3h ;repret - - -section .pdata rdata align=4 -ALIGN 4 - DD $L$SEH_begin_bn_mul_mont_gather5 wrt ..imagebase - DD $L$SEH_end_bn_mul_mont_gather5 wrt ..imagebase - DD $L$SEH_info_bn_mul_mont_gather5 wrt ..imagebase - - DD $L$SEH_begin_bn_mul4x_mont_gather5 wrt ..imagebase - DD $L$SEH_end_bn_mul4x_mont_gather5 wrt ..imagebase - DD $L$SEH_info_bn_mul4x_mont_gather5 wrt ..imagebase - - DD $L$SEH_begin_bn_power5 wrt ..imagebase - DD $L$SEH_end_bn_power5 wrt ..imagebase - DD $L$SEH_info_bn_power5 wrt ..imagebase - - DD $L$SEH_begin_bn_from_mont8x wrt ..imagebase - DD $L$SEH_end_bn_from_mont8x wrt ..imagebase - DD $L$SEH_info_bn_from_mont8x wrt ..imagebase - DD $L$SEH_begin_bn_mulx4x_mont_gather5 wrt ..imagebase - DD $L$SEH_end_bn_mulx4x_mont_gather5 wrt ..imagebase - DD $L$SEH_info_bn_mulx4x_mont_gather5 wrt ..imagebase - - DD $L$SEH_begin_bn_powerx5 wrt ..imagebase - DD $L$SEH_end_bn_powerx5 wrt ..imagebase - DD $L$SEH_info_bn_powerx5 wrt ..imagebase - DD $L$SEH_begin_bn_gather5 wrt ..imagebase - DD $L$SEH_end_bn_gather5 wrt ..imagebase - DD $L$SEH_info_bn_gather5 wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$SEH_info_bn_mul_mont_gather5: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$mul_body wrt ..imagebase,$L$mul_body wrt ..imagebase,$L$mul_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_mul4x_mont_gather5: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$mul4x_prologue wrt ..imagebase,$L$mul4x_body wrt ..imagebase,$L$mul4x_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_power5: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$power5_prologue wrt ..imagebase,$L$power5_body wrt ..imagebase,$L$power5_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_from_mont8x: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$from_prologue wrt ..imagebase,$L$from_body wrt ..imagebase,$L$from_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_mulx4x_mont_gather5: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$mulx4x_prologue wrt ..imagebase,$L$mulx4x_body wrt ..imagebase,$L$mulx4x_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_powerx5: -DB 9,0,0,0 - DD mul_handler wrt ..imagebase - DD $L$powerx5_prologue wrt ..imagebase,$L$powerx5_body wrt ..imagebase,$L$powerx5_epilogue wrt ..imagebase -ALIGN 8 -$L$SEH_info_bn_gather5: -DB 0x01,0x0b,0x03,0x0a -DB 0x0b,0x01,0x21,0x00 -DB 0x04,0xa3,0x00,0x00 -ALIGN 8 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/test/trampoline-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/test/trampoline-x86_64.asm deleted file mode 100644 index 99006695ad..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/test/trampoline-x86_64.asm +++ /dev/null @@ -1,682 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - - - - - - - - -global abi_test_trampoline -ALIGN 16 -abi_test_trampoline: -$L$abi_test_trampoline_seh_begin: - - - - - - - - - - - sub rsp,344 - -$L$abi_test_trampoline_seh_prolog_alloc: - mov QWORD[112+rsp],rbx - -$L$abi_test_trampoline_seh_prolog_rbx: - mov QWORD[120+rsp],rbp - -$L$abi_test_trampoline_seh_prolog_rbp: - mov QWORD[128+rsp],rdi - -$L$abi_test_trampoline_seh_prolog_rdi: - mov QWORD[136+rsp],rsi - -$L$abi_test_trampoline_seh_prolog_rsi: - mov QWORD[144+rsp],r12 - -$L$abi_test_trampoline_seh_prolog_r12: - mov QWORD[152+rsp],r13 - -$L$abi_test_trampoline_seh_prolog_r13: - mov QWORD[160+rsp],r14 - -$L$abi_test_trampoline_seh_prolog_r14: - mov QWORD[168+rsp],r15 - -$L$abi_test_trampoline_seh_prolog_r15: - movdqa XMMWORD[176+rsp],xmm6 - -$L$abi_test_trampoline_seh_prolog_xmm6: - movdqa XMMWORD[192+rsp],xmm7 - -$L$abi_test_trampoline_seh_prolog_xmm7: - movdqa XMMWORD[208+rsp],xmm8 - -$L$abi_test_trampoline_seh_prolog_xmm8: - movdqa XMMWORD[224+rsp],xmm9 - -$L$abi_test_trampoline_seh_prolog_xmm9: - movdqa XMMWORD[240+rsp],xmm10 - -$L$abi_test_trampoline_seh_prolog_xmm10: - movdqa XMMWORD[256+rsp],xmm11 - -$L$abi_test_trampoline_seh_prolog_xmm11: - movdqa XMMWORD[272+rsp],xmm12 - -$L$abi_test_trampoline_seh_prolog_xmm12: - movdqa XMMWORD[288+rsp],xmm13 - -$L$abi_test_trampoline_seh_prolog_xmm13: - movdqa XMMWORD[304+rsp],xmm14 - -$L$abi_test_trampoline_seh_prolog_xmm14: - movdqa XMMWORD[320+rsp],xmm15 - -$L$abi_test_trampoline_seh_prolog_xmm15: -$L$abi_test_trampoline_seh_prolog_end: - mov rbx,QWORD[rdx] - mov rbp,QWORD[8+rdx] - mov rdi,QWORD[16+rdx] - mov rsi,QWORD[24+rdx] - mov r12,QWORD[32+rdx] - mov r13,QWORD[40+rdx] - mov r14,QWORD[48+rdx] - mov r15,QWORD[56+rdx] - movdqa xmm6,XMMWORD[64+rdx] - movdqa xmm7,XMMWORD[80+rdx] - movdqa xmm8,XMMWORD[96+rdx] - movdqa xmm9,XMMWORD[112+rdx] - movdqa xmm10,XMMWORD[128+rdx] - movdqa xmm11,XMMWORD[144+rdx] - movdqa xmm12,XMMWORD[160+rdx] - movdqa xmm13,XMMWORD[176+rdx] - movdqa xmm14,XMMWORD[192+rdx] - movdqa xmm15,XMMWORD[208+rdx] - - mov QWORD[88+rsp],rcx - mov QWORD[96+rsp],rdx - - - - - mov r10,r8 - mov r11,r9 - dec r11 - js NEAR $L$args_done - mov rcx,QWORD[r10] - add r10,8 - dec r11 - js NEAR $L$args_done - mov rdx,QWORD[r10] - add r10,8 - dec r11 - js NEAR $L$args_done - mov r8,QWORD[r10] - add r10,8 - dec r11 - js NEAR $L$args_done - mov r9,QWORD[r10] - add r10,8 - lea rax,[32+rsp] -$L$args_loop: - dec r11 - js NEAR $L$args_done - - - - - - - mov QWORD[104+rsp],r11 - mov r11,QWORD[r10] - mov QWORD[rax],r11 - mov r11,QWORD[104+rsp] - - add r10,8 - add rax,8 - jmp NEAR $L$args_loop - -$L$args_done: - mov rax,QWORD[88+rsp] - mov r10,QWORD[384+rsp] - test r10,r10 - jz NEAR $L$no_unwind - - - pushfq - or QWORD[rsp],0x100 - popfq - - - - nop -global abi_test_unwind_start -abi_test_unwind_start: - - call rax -global abi_test_unwind_return -abi_test_unwind_return: - - - - - pushfq - and QWORD[rsp],-0x101 - popfq -global abi_test_unwind_stop -abi_test_unwind_stop: - - jmp NEAR $L$call_done - -$L$no_unwind: - call rax - -$L$call_done: - - mov rdx,QWORD[96+rsp] - mov QWORD[rdx],rbx - mov QWORD[8+rdx],rbp - mov QWORD[16+rdx],rdi - mov QWORD[24+rdx],rsi - mov QWORD[32+rdx],r12 - mov QWORD[40+rdx],r13 - mov QWORD[48+rdx],r14 - mov QWORD[56+rdx],r15 - movdqa XMMWORD[64+rdx],xmm6 - movdqa XMMWORD[80+rdx],xmm7 - movdqa XMMWORD[96+rdx],xmm8 - movdqa XMMWORD[112+rdx],xmm9 - movdqa XMMWORD[128+rdx],xmm10 - movdqa XMMWORD[144+rdx],xmm11 - movdqa XMMWORD[160+rdx],xmm12 - movdqa XMMWORD[176+rdx],xmm13 - movdqa XMMWORD[192+rdx],xmm14 - movdqa XMMWORD[208+rdx],xmm15 - mov rbx,QWORD[112+rsp] - - mov rbp,QWORD[120+rsp] - - mov rdi,QWORD[128+rsp] - - mov rsi,QWORD[136+rsp] - - mov r12,QWORD[144+rsp] - - mov r13,QWORD[152+rsp] - - mov r14,QWORD[160+rsp] - - mov r15,QWORD[168+rsp] - - movdqa xmm6,XMMWORD[176+rsp] - - movdqa xmm7,XMMWORD[192+rsp] - - movdqa xmm8,XMMWORD[208+rsp] - - movdqa xmm9,XMMWORD[224+rsp] - - movdqa xmm10,XMMWORD[240+rsp] - - movdqa xmm11,XMMWORD[256+rsp] - - movdqa xmm12,XMMWORD[272+rsp] - - movdqa xmm13,XMMWORD[288+rsp] - - movdqa xmm14,XMMWORD[304+rsp] - - movdqa xmm15,XMMWORD[320+rsp] - - add rsp,344 - - - - DB 0F3h,0C3h ;repret - -$L$abi_test_trampoline_seh_end: - - -global abi_test_clobber_rax -ALIGN 16 -abi_test_clobber_rax: - xor rax,rax - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_rbx -ALIGN 16 -abi_test_clobber_rbx: - xor rbx,rbx - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_rcx -ALIGN 16 -abi_test_clobber_rcx: - xor rcx,rcx - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_rdx -ALIGN 16 -abi_test_clobber_rdx: - xor rdx,rdx - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_rdi -ALIGN 16 -abi_test_clobber_rdi: - xor rdi,rdi - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_rsi -ALIGN 16 -abi_test_clobber_rsi: - xor rsi,rsi - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_rbp -ALIGN 16 -abi_test_clobber_rbp: - xor rbp,rbp - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r8 -ALIGN 16 -abi_test_clobber_r8: - xor r8,r8 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r9 -ALIGN 16 -abi_test_clobber_r9: - xor r9,r9 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r10 -ALIGN 16 -abi_test_clobber_r10: - xor r10,r10 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r11 -ALIGN 16 -abi_test_clobber_r11: - xor r11,r11 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r12 -ALIGN 16 -abi_test_clobber_r12: - xor r12,r12 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r13 -ALIGN 16 -abi_test_clobber_r13: - xor r13,r13 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r14 -ALIGN 16 -abi_test_clobber_r14: - xor r14,r14 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_r15 -ALIGN 16 -abi_test_clobber_r15: - xor r15,r15 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm0 -ALIGN 16 -abi_test_clobber_xmm0: - pxor xmm0,xmm0 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm1 -ALIGN 16 -abi_test_clobber_xmm1: - pxor xmm1,xmm1 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm2 -ALIGN 16 -abi_test_clobber_xmm2: - pxor xmm2,xmm2 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm3 -ALIGN 16 -abi_test_clobber_xmm3: - pxor xmm3,xmm3 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm4 -ALIGN 16 -abi_test_clobber_xmm4: - pxor xmm4,xmm4 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm5 -ALIGN 16 -abi_test_clobber_xmm5: - pxor xmm5,xmm5 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm6 -ALIGN 16 -abi_test_clobber_xmm6: - pxor xmm6,xmm6 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm7 -ALIGN 16 -abi_test_clobber_xmm7: - pxor xmm7,xmm7 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm8 -ALIGN 16 -abi_test_clobber_xmm8: - pxor xmm8,xmm8 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm9 -ALIGN 16 -abi_test_clobber_xmm9: - pxor xmm9,xmm9 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm10 -ALIGN 16 -abi_test_clobber_xmm10: - pxor xmm10,xmm10 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm11 -ALIGN 16 -abi_test_clobber_xmm11: - pxor xmm11,xmm11 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm12 -ALIGN 16 -abi_test_clobber_xmm12: - pxor xmm12,xmm12 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm13 -ALIGN 16 -abi_test_clobber_xmm13: - pxor xmm13,xmm13 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm14 -ALIGN 16 -abi_test_clobber_xmm14: - pxor xmm14,xmm14 - DB 0F3h,0C3h ;repret - - -global abi_test_clobber_xmm15 -ALIGN 16 -abi_test_clobber_xmm15: - pxor xmm15,xmm15 - DB 0F3h,0C3h ;repret - - - - - -global abi_test_bad_unwind_wrong_register -ALIGN 16 -abi_test_bad_unwind_wrong_register: - -$L$abi_test_bad_unwind_wrong_register_seh_begin: - push r12 - -$L$abi_test_bad_unwind_wrong_register_seh_push_r13: - - - - nop - pop r12 - - DB 0F3h,0C3h ;repret -$L$abi_test_bad_unwind_wrong_register_seh_end: - - - - - - - -global abi_test_bad_unwind_temporary -ALIGN 16 -abi_test_bad_unwind_temporary: - -$L$abi_test_bad_unwind_temporary_seh_begin: - push r12 - -$L$abi_test_bad_unwind_temporary_seh_push_r12: - - mov rax,r12 - inc rax - mov QWORD[rsp],rax - - - - mov QWORD[rsp],r12 - - - pop r12 - - DB 0F3h,0C3h ;repret -$L$abi_test_bad_unwind_temporary_seh_end: - - - - - - - -global abi_test_get_and_clear_direction_flag -abi_test_get_and_clear_direction_flag: - pushfq - pop rax - and rax,0x400 - shr rax,10 - cld - DB 0F3h,0C3h ;repret - - - - - -global abi_test_set_direction_flag -abi_test_set_direction_flag: - std - DB 0F3h,0C3h ;repret - - - - - - -global abi_test_bad_unwind_epilog -ALIGN 16 -abi_test_bad_unwind_epilog: -$L$abi_test_bad_unwind_epilog_seh_begin: - push r12 -$L$abi_test_bad_unwind_epilog_seh_push_r12: - - nop - - - pop r12 - nop - DB 0F3h,0C3h ;repret -$L$abi_test_bad_unwind_epilog_seh_end: - -section .pdata rdata align=4 -ALIGN 4 - - DD $L$abi_test_trampoline_seh_begin wrt ..imagebase - DD $L$abi_test_trampoline_seh_end wrt ..imagebase - DD $L$abi_test_trampoline_seh_info wrt ..imagebase - - DD $L$abi_test_bad_unwind_wrong_register_seh_begin wrt ..imagebase - DD $L$abi_test_bad_unwind_wrong_register_seh_end wrt ..imagebase - DD $L$abi_test_bad_unwind_wrong_register_seh_info wrt ..imagebase - - DD $L$abi_test_bad_unwind_temporary_seh_begin wrt ..imagebase - DD $L$abi_test_bad_unwind_temporary_seh_end wrt ..imagebase - DD $L$abi_test_bad_unwind_temporary_seh_info wrt ..imagebase - - DD $L$abi_test_bad_unwind_epilog_seh_begin wrt ..imagebase - DD $L$abi_test_bad_unwind_epilog_seh_end wrt ..imagebase - DD $L$abi_test_bad_unwind_epilog_seh_info wrt ..imagebase - -section .xdata rdata align=8 -ALIGN 8 -$L$abi_test_trampoline_seh_info: - -DB 1 -DB $L$abi_test_trampoline_seh_prolog_end-$L$abi_test_trampoline_seh_begin -DB 38 -DB 0 -DB $L$abi_test_trampoline_seh_prolog_xmm15-$L$abi_test_trampoline_seh_begin -DB 248 - DW 20 -DB $L$abi_test_trampoline_seh_prolog_xmm14-$L$abi_test_trampoline_seh_begin -DB 232 - DW 19 -DB $L$abi_test_trampoline_seh_prolog_xmm13-$L$abi_test_trampoline_seh_begin -DB 216 - DW 18 -DB $L$abi_test_trampoline_seh_prolog_xmm12-$L$abi_test_trampoline_seh_begin -DB 200 - DW 17 -DB $L$abi_test_trampoline_seh_prolog_xmm11-$L$abi_test_trampoline_seh_begin -DB 184 - DW 16 -DB $L$abi_test_trampoline_seh_prolog_xmm10-$L$abi_test_trampoline_seh_begin -DB 168 - DW 15 -DB $L$abi_test_trampoline_seh_prolog_xmm9-$L$abi_test_trampoline_seh_begin -DB 152 - DW 14 -DB $L$abi_test_trampoline_seh_prolog_xmm8-$L$abi_test_trampoline_seh_begin -DB 136 - DW 13 -DB $L$abi_test_trampoline_seh_prolog_xmm7-$L$abi_test_trampoline_seh_begin -DB 120 - DW 12 -DB $L$abi_test_trampoline_seh_prolog_xmm6-$L$abi_test_trampoline_seh_begin -DB 104 - DW 11 -DB $L$abi_test_trampoline_seh_prolog_r15-$L$abi_test_trampoline_seh_begin -DB 244 - DW 21 -DB $L$abi_test_trampoline_seh_prolog_r14-$L$abi_test_trampoline_seh_begin -DB 228 - DW 20 -DB $L$abi_test_trampoline_seh_prolog_r13-$L$abi_test_trampoline_seh_begin -DB 212 - DW 19 -DB $L$abi_test_trampoline_seh_prolog_r12-$L$abi_test_trampoline_seh_begin -DB 196 - DW 18 -DB $L$abi_test_trampoline_seh_prolog_rsi-$L$abi_test_trampoline_seh_begin -DB 100 - DW 17 -DB $L$abi_test_trampoline_seh_prolog_rdi-$L$abi_test_trampoline_seh_begin -DB 116 - DW 16 -DB $L$abi_test_trampoline_seh_prolog_rbp-$L$abi_test_trampoline_seh_begin -DB 84 - DW 15 -DB $L$abi_test_trampoline_seh_prolog_rbx-$L$abi_test_trampoline_seh_begin -DB 52 - DW 14 -DB $L$abi_test_trampoline_seh_prolog_alloc-$L$abi_test_trampoline_seh_begin -DB 1 - DW 43 - - -ALIGN 8 -$L$abi_test_bad_unwind_wrong_register_seh_info: -DB 1 -DB $L$abi_test_bad_unwind_wrong_register_seh_push_r13-$L$abi_test_bad_unwind_wrong_register_seh_begin -DB 1 -DB 0 - -DB $L$abi_test_bad_unwind_wrong_register_seh_push_r13-$L$abi_test_bad_unwind_wrong_register_seh_begin -DB 208 - -ALIGN 8 -$L$abi_test_bad_unwind_temporary_seh_info: -DB 1 -DB $L$abi_test_bad_unwind_temporary_seh_push_r12-$L$abi_test_bad_unwind_temporary_seh_begin -DB 1 -DB 0 - -DB $L$abi_test_bad_unwind_temporary_seh_push_r12-$L$abi_test_bad_unwind_temporary_seh_begin -DB 192 - -ALIGN 8 -$L$abi_test_bad_unwind_epilog_seh_info: -DB 1 -DB $L$abi_test_bad_unwind_epilog_seh_push_r12-$L$abi_test_bad_unwind_epilog_seh_begin -DB 1 -DB 0 - -DB $L$abi_test_bad_unwind_epilog_seh_push_r12-$L$abi_test_bad_unwind_epilog_seh_begin -DB 192 diff --git a/packager/third_party/boringssl/win-x86_64/crypto/third_party/sike/asm/fp-x86_64.asm b/packager/third_party/boringssl/win-x86_64/crypto/third_party/sike/asm/fp-x86_64.asm deleted file mode 100644 index fbfef1be13..0000000000 --- a/packager/third_party/boringssl/win-x86_64/crypto/third_party/sike/asm/fp-x86_64.asm +++ /dev/null @@ -1,1951 +0,0 @@ -; This file is generated from a similarly-named Perl script in the BoringSSL -; source tree. Do not edit by hand. - -default rel -%define XMMWORD -%define YMMWORD -%define ZMMWORD - -%ifdef BORINGSSL_PREFIX -%include "boringssl_prefix_symbols_nasm.inc" -%endif -section .text code align=64 - - - -$L$p434x2: - DQ 0xFFFFFFFFFFFFFFFE - DQ 0xFFFFFFFFFFFFFFFF - DQ 0xFB82ECF5C5FFFFFF - DQ 0xF78CB8F062B15D47 - DQ 0xD9F8BFAD038A40AC - DQ 0x0004683E4E2EE688 - - -$L$p434p1: - DQ 0xFDC1767AE3000000 - DQ 0x7BC65C783158AEA3 - DQ 0x6CFC5FD681C52056 - DQ 0x0002341F27177344 - -EXTERN OPENSSL_ia32cap_P - -global sike_fpadd - -sike_fpadd: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_fpadd: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push r12 - - - push r13 - - - push r14 - - - - xor rax,rax - - mov r8,QWORD[rdi] - add r8,QWORD[rsi] - mov r9,QWORD[8+rdi] - adc r9,QWORD[8+rsi] - mov r10,QWORD[16+rdi] - adc r10,QWORD[16+rsi] - mov r11,QWORD[24+rdi] - adc r11,QWORD[24+rsi] - mov r12,QWORD[32+rdi] - adc r12,QWORD[32+rsi] - mov r13,QWORD[40+rdi] - adc r13,QWORD[40+rsi] - mov r14,QWORD[48+rdi] - adc r14,QWORD[48+rsi] - - mov rcx,QWORD[$L$p434x2] - sub r8,rcx - mov rcx,QWORD[((8+$L$p434x2))] - sbb r9,rcx - sbb r10,rcx - mov rcx,QWORD[((16+$L$p434x2))] - sbb r11,rcx - mov rcx,QWORD[((24+$L$p434x2))] - sbb r12,rcx - mov rcx,QWORD[((32+$L$p434x2))] - sbb r13,rcx - mov rcx,QWORD[((40+$L$p434x2))] - sbb r14,rcx - - sbb rax,0 - - mov rdi,QWORD[$L$p434x2] - and rdi,rax - mov rsi,QWORD[((8+$L$p434x2))] - and rsi,rax - mov rcx,QWORD[((16+$L$p434x2))] - and rcx,rax - - add r8,rdi - mov QWORD[rdx],r8 - adc r9,rsi - mov QWORD[8+rdx],r9 - adc r10,rsi - mov QWORD[16+rdx],r10 - adc r11,rcx - mov QWORD[24+rdx],r11 - - setc cl - mov r8,QWORD[((24+$L$p434x2))] - and r8,rax - mov r9,QWORD[((32+$L$p434x2))] - and r9,rax - mov r10,QWORD[((40+$L$p434x2))] - and r10,rax - bt rcx,0 - - adc r12,r8 - mov QWORD[32+rdx],r12 - adc r13,r9 - mov QWORD[40+rdx],r13 - adc r14,r10 - mov QWORD[48+rdx],r14 - - pop r14 - - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -global sike_cswap_asm - -sike_cswap_asm: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_cswap_asm: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - - movq xmm3,rdx - - - - - - pshufd xmm3,xmm3,68 - - movdqu xmm0,XMMWORD[rdi] - movdqu xmm1,XMMWORD[rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[rdi],xmm0 - movdqu XMMWORD[rsi],xmm1 - - movdqu xmm0,XMMWORD[16+rdi] - movdqu xmm1,XMMWORD[16+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[16+rdi],xmm0 - movdqu XMMWORD[16+rsi],xmm1 - - movdqu xmm0,XMMWORD[32+rdi] - movdqu xmm1,XMMWORD[32+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[32+rdi],xmm0 - movdqu XMMWORD[32+rsi],xmm1 - - movdqu xmm0,XMMWORD[48+rdi] - movdqu xmm1,XMMWORD[48+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[48+rdi],xmm0 - movdqu XMMWORD[48+rsi],xmm1 - - movdqu xmm0,XMMWORD[64+rdi] - movdqu xmm1,XMMWORD[64+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[64+rdi],xmm0 - movdqu XMMWORD[64+rsi],xmm1 - - movdqu xmm0,XMMWORD[80+rdi] - movdqu xmm1,XMMWORD[80+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[80+rdi],xmm0 - movdqu XMMWORD[80+rsi],xmm1 - - movdqu xmm0,XMMWORD[96+rdi] - movdqu xmm1,XMMWORD[96+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[96+rdi],xmm0 - movdqu XMMWORD[96+rsi],xmm1 - - movdqu xmm0,XMMWORD[112+rdi] - movdqu xmm1,XMMWORD[112+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[112+rdi],xmm0 - movdqu XMMWORD[112+rsi],xmm1 - - movdqu xmm0,XMMWORD[128+rdi] - movdqu xmm1,XMMWORD[128+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[128+rdi],xmm0 - movdqu XMMWORD[128+rsi],xmm1 - - movdqu xmm0,XMMWORD[144+rdi] - movdqu xmm1,XMMWORD[144+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[144+rdi],xmm0 - movdqu XMMWORD[144+rsi],xmm1 - - movdqu xmm0,XMMWORD[160+rdi] - movdqu xmm1,XMMWORD[160+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[160+rdi],xmm0 - movdqu XMMWORD[160+rsi],xmm1 - - movdqu xmm0,XMMWORD[176+rdi] - movdqu xmm1,XMMWORD[176+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[176+rdi],xmm0 - movdqu XMMWORD[176+rsi],xmm1 - - movdqu xmm0,XMMWORD[192+rdi] - movdqu xmm1,XMMWORD[192+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[192+rdi],xmm0 - movdqu XMMWORD[192+rsi],xmm1 - - movdqu xmm0,XMMWORD[208+rdi] - movdqu xmm1,XMMWORD[208+rsi] - movdqa xmm2,xmm1 - pxor xmm2,xmm0 - pand xmm2,xmm3 - pxor xmm0,xmm2 - pxor xmm1,xmm2 - movdqu XMMWORD[208+rdi],xmm0 - movdqu XMMWORD[208+rsi],xmm1 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret -global sike_fpsub - -sike_fpsub: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_fpsub: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push r12 - - - push r13 - - - push r14 - - - - xor rax,rax - - mov r8,QWORD[rdi] - sub r8,QWORD[rsi] - mov r9,QWORD[8+rdi] - sbb r9,QWORD[8+rsi] - mov r10,QWORD[16+rdi] - sbb r10,QWORD[16+rsi] - mov r11,QWORD[24+rdi] - sbb r11,QWORD[24+rsi] - mov r12,QWORD[32+rdi] - sbb r12,QWORD[32+rsi] - mov r13,QWORD[40+rdi] - sbb r13,QWORD[40+rsi] - mov r14,QWORD[48+rdi] - sbb r14,QWORD[48+rsi] - - sbb rax,0x0 - - mov rdi,QWORD[$L$p434x2] - and rdi,rax - mov rsi,QWORD[((8+$L$p434x2))] - and rsi,rax - mov rcx,QWORD[((16+$L$p434x2))] - and rcx,rax - - add r8,rdi - mov QWORD[rdx],r8 - adc r9,rsi - mov QWORD[8+rdx],r9 - adc r10,rsi - mov QWORD[16+rdx],r10 - adc r11,rcx - mov QWORD[24+rdx],r11 - - setc cl - mov r8,QWORD[((24+$L$p434x2))] - and r8,rax - mov r9,QWORD[((32+$L$p434x2))] - and r9,rax - mov r10,QWORD[((40+$L$p434x2))] - and r10,rax - bt rcx,0x0 - - adc r12,r8 - adc r13,r9 - adc r14,r10 - mov QWORD[32+rdx],r12 - mov QWORD[40+rdx],r13 - mov QWORD[48+rdx],r14 - - pop r14 - - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -global sike_mpadd_asm - -sike_mpadd_asm: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_mpadd_asm: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - mov r8,QWORD[rdi]; - mov r9,QWORD[8+rdi] - mov r10,QWORD[16+rdi] - mov r11,QWORD[24+rdi] - mov rcx,QWORD[32+rdi] - add r8,QWORD[rsi] - adc r9,QWORD[8+rsi] - adc r10,QWORD[16+rsi] - adc r11,QWORD[24+rsi] - adc rcx,QWORD[32+rsi] - mov QWORD[rdx],r8 - mov QWORD[8+rdx],r9 - mov QWORD[16+rdx],r10 - mov QWORD[24+rdx],r11 - mov QWORD[32+rdx],rcx - - mov r8,QWORD[40+rdi] - mov r9,QWORD[48+rdi] - adc r8,QWORD[40+rsi] - adc r9,QWORD[48+rsi] - mov QWORD[40+rdx],r8 - mov QWORD[48+rdx],r9 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -global sike_mpsubx2_asm - -sike_mpsubx2_asm: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_mpsubx2_asm: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - xor rax,rax - - mov r8,QWORD[rdi] - mov r9,QWORD[8+rdi] - mov r10,QWORD[16+rdi] - mov r11,QWORD[24+rdi] - mov rcx,QWORD[32+rdi] - sub r8,QWORD[rsi] - sbb r9,QWORD[8+rsi] - sbb r10,QWORD[16+rsi] - sbb r11,QWORD[24+rsi] - sbb rcx,QWORD[32+rsi] - mov QWORD[rdx],r8 - mov QWORD[8+rdx],r9 - mov QWORD[16+rdx],r10 - mov QWORD[24+rdx],r11 - mov QWORD[32+rdx],rcx - - mov r8,QWORD[40+rdi] - mov r9,QWORD[48+rdi] - mov r10,QWORD[56+rdi] - mov r11,QWORD[64+rdi] - mov rcx,QWORD[72+rdi] - sbb r8,QWORD[40+rsi] - sbb r9,QWORD[48+rsi] - sbb r10,QWORD[56+rsi] - sbb r11,QWORD[64+rsi] - sbb rcx,QWORD[72+rsi] - mov QWORD[40+rdx],r8 - mov QWORD[48+rdx],r9 - mov QWORD[56+rdx],r10 - mov QWORD[64+rdx],r11 - mov QWORD[72+rdx],rcx - - mov r8,QWORD[80+rdi] - mov r9,QWORD[88+rdi] - mov r10,QWORD[96+rdi] - mov r11,QWORD[104+rdi] - sbb r8,QWORD[80+rsi] - sbb r9,QWORD[88+rsi] - sbb r10,QWORD[96+rsi] - sbb r11,QWORD[104+rsi] - sbb rax,0x0 - mov QWORD[80+rdx],r8 - mov QWORD[88+rdx],r9 - mov QWORD[96+rdx],r10 - mov QWORD[104+rdx],r11 - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -global sike_mpdblsubx2_asm - -sike_mpdblsubx2_asm: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_mpdblsubx2_asm: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push r12 - - - push r13 - - - - xor rax,rax - - - mov r8,QWORD[rdx] - mov r9,QWORD[8+rdx] - mov r10,QWORD[16+rdx] - mov r11,QWORD[24+rdx] - mov r12,QWORD[32+rdx] - mov r13,QWORD[40+rdx] - mov rcx,QWORD[48+rdx] - sub r8,QWORD[rdi] - sbb r9,QWORD[8+rdi] - sbb r10,QWORD[16+rdi] - sbb r11,QWORD[24+rdi] - sbb r12,QWORD[32+rdi] - sbb r13,QWORD[40+rdi] - sbb rcx,QWORD[48+rdi] - adc rax,0x0 - - - sub r8,QWORD[rsi] - sbb r9,QWORD[8+rsi] - sbb r10,QWORD[16+rsi] - sbb r11,QWORD[24+rsi] - sbb r12,QWORD[32+rsi] - sbb r13,QWORD[40+rsi] - sbb rcx,QWORD[48+rsi] - adc rax,0x0 - - - mov QWORD[rdx],r8 - mov QWORD[8+rdx],r9 - mov QWORD[16+rdx],r10 - mov QWORD[24+rdx],r11 - mov QWORD[32+rdx],r12 - mov QWORD[40+rdx],r13 - mov QWORD[48+rdx],rcx - - - mov r8,QWORD[56+rdx] - mov r9,QWORD[64+rdx] - mov r10,QWORD[72+rdx] - mov r11,QWORD[80+rdx] - mov r12,QWORD[88+rdx] - mov r13,QWORD[96+rdx] - mov rcx,QWORD[104+rdx] - - sub r8,rax - sbb r8,QWORD[56+rdi] - sbb r9,QWORD[64+rdi] - sbb r10,QWORD[72+rdi] - sbb r11,QWORD[80+rdi] - sbb r12,QWORD[88+rdi] - sbb r13,QWORD[96+rdi] - sbb rcx,QWORD[104+rdi] - - - sub r8,QWORD[56+rsi] - sbb r9,QWORD[64+rsi] - sbb r10,QWORD[72+rsi] - sbb r11,QWORD[80+rsi] - sbb r12,QWORD[88+rsi] - sbb r13,QWORD[96+rsi] - sbb rcx,QWORD[104+rsi] - - - mov QWORD[56+rdx],r8 - mov QWORD[64+rdx],r9 - mov QWORD[72+rdx],r10 - mov QWORD[80+rdx],r11 - mov QWORD[88+rdx],r12 - mov QWORD[96+rdx],r13 - mov QWORD[104+rdx],rcx - - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - - -$L$rdc_bdw: - - - - - - - - - xor rax,rax - mov rdx,QWORD[((0+0))+rdi] - mulx r9,r8,QWORD[((0+$L$p434p1))] - mulx r10,r12,QWORD[((8+$L$p434p1))] - mulx r11,r13,QWORD[((16+$L$p434p1))] - - adox r9,r12 - adox r10,r13 - - mulx r12,r13,QWORD[((24+$L$p434p1))] - adox r11,r13 - adox r12,rax - - xor rax,rax - mov rdx,QWORD[((0+8))+rdi] - mulx rcx,r13,QWORD[((0+$L$p434p1))] - adcx r9,r13 - adcx r10,rcx - - mulx r13,rcx,QWORD[((8+$L$p434p1))] - adcx r11,r13 - adox r10,rcx - - mulx r13,rcx,QWORD[((16+$L$p434p1))] - adcx r12,r13 - adox r11,rcx - - mulx r13,rcx,QWORD[((24+$L$p434p1))] - adcx r13,rax - adox r12,rcx - adox r13,rax - - xor rcx,rcx - add r8,QWORD[24+rdi] - adc r9,QWORD[32+rdi] - adc r10,QWORD[40+rdi] - adc r11,QWORD[48+rdi] - adc r12,QWORD[56+rdi] - adc r13,QWORD[64+rdi] - adc rcx,QWORD[72+rdi] - mov QWORD[24+rdi],r8 - mov QWORD[32+rdi],r9 - mov QWORD[40+rdi],r10 - mov QWORD[48+rdi],r11 - mov QWORD[56+rdi],r12 - mov QWORD[64+rdi],r13 - mov QWORD[72+rdi],rcx - mov r8,QWORD[80+rdi] - mov r9,QWORD[88+rdi] - mov r10,QWORD[96+rdi] - mov r11,QWORD[104+rdi] - adc r8,0x0 - adc r9,0x0 - adc r10,0x0 - adc r11,0x0 - mov QWORD[80+rdi],r8 - mov QWORD[88+rdi],r9 - mov QWORD[96+rdi],r10 - mov QWORD[104+rdi],r11 - - xor rax,rax - mov rdx,QWORD[((16+0))+rdi] - mulx r9,r8,QWORD[((0+$L$p434p1))] - mulx r10,r12,QWORD[((8+$L$p434p1))] - mulx r11,r13,QWORD[((16+$L$p434p1))] - - adox r9,r12 - adox r10,r13 - - mulx r12,r13,QWORD[((24+$L$p434p1))] - adox r11,r13 - adox r12,rax - - xor rax,rax - mov rdx,QWORD[((16+8))+rdi] - mulx rcx,r13,QWORD[((0+$L$p434p1))] - adcx r9,r13 - adcx r10,rcx - - mulx r13,rcx,QWORD[((8+$L$p434p1))] - adcx r11,r13 - adox r10,rcx - - mulx r13,rcx,QWORD[((16+$L$p434p1))] - adcx r12,r13 - adox r11,rcx - - mulx r13,rcx,QWORD[((24+$L$p434p1))] - adcx r13,rax - adox r12,rcx - adox r13,rax - - xor rcx,rcx - add r8,QWORD[40+rdi] - adc r9,QWORD[48+rdi] - adc r10,QWORD[56+rdi] - adc r11,QWORD[64+rdi] - adc r12,QWORD[72+rdi] - adc r13,QWORD[80+rdi] - adc rcx,QWORD[88+rdi] - mov QWORD[40+rdi],r8 - mov QWORD[48+rdi],r9 - mov QWORD[56+rdi],r10 - mov QWORD[64+rdi],r11 - mov QWORD[72+rdi],r12 - mov QWORD[80+rdi],r13 - mov QWORD[88+rdi],rcx - mov r8,QWORD[96+rdi] - mov r9,QWORD[104+rdi] - adc r8,0x0 - adc r9,0x0 - mov QWORD[96+rdi],r8 - mov QWORD[104+rdi],r9 - - xor rax,rax - mov rdx,QWORD[((32+0))+rdi] - mulx r9,r8,QWORD[((0+$L$p434p1))] - mulx r10,r12,QWORD[((8+$L$p434p1))] - mulx r11,r13,QWORD[((16+$L$p434p1))] - - adox r9,r12 - adox r10,r13 - - mulx r12,r13,QWORD[((24+$L$p434p1))] - adox r11,r13 - adox r12,rax - - xor rax,rax - mov rdx,QWORD[((32+8))+rdi] - mulx rcx,r13,QWORD[((0+$L$p434p1))] - adcx r9,r13 - adcx r10,rcx - - mulx r13,rcx,QWORD[((8+$L$p434p1))] - adcx r11,r13 - adox r10,rcx - - mulx r13,rcx,QWORD[((16+$L$p434p1))] - adcx r12,r13 - adox r11,rcx - - mulx r13,rcx,QWORD[((24+$L$p434p1))] - adcx r13,rax - adox r12,rcx - adox r13,rax - - xor rcx,rcx - add r8,QWORD[56+rdi] - adc r9,QWORD[64+rdi] - adc r10,QWORD[72+rdi] - adc r11,QWORD[80+rdi] - adc r12,QWORD[88+rdi] - adc r13,QWORD[96+rdi] - adc rcx,QWORD[104+rdi] - mov QWORD[rsi],r8 - mov QWORD[8+rsi],r9 - mov QWORD[72+rdi],r10 - mov QWORD[80+rdi],r11 - mov QWORD[88+rdi],r12 - mov QWORD[96+rdi],r13 - mov QWORD[104+rdi],rcx - - xor rax,rax - mov rdx,QWORD[48+rdi] - mulx r9,r8,QWORD[((0+$L$p434p1))] - mulx r10,r12,QWORD[((8+$L$p434p1))] - mulx r11,r13,QWORD[((16+$L$p434p1))] - - adox r9,r12 - adox r10,r13 - - mulx r12,r13,QWORD[((24+$L$p434p1))] - adox r11,r13 - adox r12,rax - - add r8,QWORD[72+rdi] - adc r9,QWORD[80+rdi] - adc r10,QWORD[88+rdi] - adc r11,QWORD[96+rdi] - adc r12,QWORD[104+rdi] - mov QWORD[16+rsi],r8 - mov QWORD[24+rsi],r9 - mov QWORD[32+rsi],r10 - mov QWORD[40+rsi],r11 - mov QWORD[48+rsi],r12 - - - pop r15 - - - pop r14 - - - pop r13 - - - pop r12 - - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -global sike_fprdc - -sike_fprdc: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_fprdc: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push r12 - - - push r13 - - - push r14 - - - push r15 - - - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$rdc_bdw - - - - - mov r14,QWORD[((0+0))+rdi] - mov rax,QWORD[((0+$L$p434p1))] - mul r14 - xor r10,r10 - mov r8,rax - mov r9,rdx - - - mov rax,QWORD[((8+$L$p434p1))] - mul r14 - xor r11,r11 - add r9,rax - adc r10,rdx - - - mov rcx,QWORD[((0+8))+rdi] - mov rax,QWORD[((0+$L$p434p1))] - mul rcx - add r9,rax - adc r10,rdx - adc r11,0x0 - - - xor r12,r12 - mov rax,QWORD[((16+$L$p434p1))] - mul r14 - add r10,rax - adc r11,rdx - adc r12,0x0 - - - mov rax,QWORD[((8+$L$p434p1))] - mul rcx - add r10,rax - adc r11,rdx - adc r12,0x0 - - - mov rax,QWORD[((24+$L$p434p1))] - mul r14 - xor r13,r13 - add r11,rax - adc r12,rdx - adc r13,0x0 - - - mov rax,QWORD[((16+$L$p434p1))] - mul rcx - add r11,rax - adc r12,rdx - adc r13,0x0 - - - mov rax,QWORD[((24+$L$p434p1))] - mul rcx - add r12,rax - adc r13,rdx - - - xor rcx,rcx - add r8,QWORD[24+rdi] - adc r9,QWORD[32+rdi] - adc r10,QWORD[40+rdi] - adc r11,QWORD[48+rdi] - adc r12,QWORD[56+rdi] - adc r13,QWORD[64+rdi] - adc rcx,QWORD[72+rdi] - mov QWORD[24+rdi],r8 - mov QWORD[32+rdi],r9 - mov QWORD[40+rdi],r10 - mov QWORD[48+rdi],r11 - mov QWORD[56+rdi],r12 - mov QWORD[64+rdi],r13 - mov QWORD[72+rdi],rcx - mov r8,QWORD[80+rdi] - mov r9,QWORD[88+rdi] - mov r10,QWORD[96+rdi] - mov r11,QWORD[104+rdi] - adc r8,0x0 - adc r9,0x0 - adc r10,0x0 - adc r11,0x0 - mov QWORD[80+rdi],r8 - mov QWORD[88+rdi],r9 - mov QWORD[96+rdi],r10 - mov QWORD[104+rdi],r11 - - - mov r14,QWORD[((16+0))+rdi] - mov rax,QWORD[((0+$L$p434p1))] - mul r14 - xor r10,r10 - mov r8,rax - mov r9,rdx - - - mov rax,QWORD[((8+$L$p434p1))] - mul r14 - xor r11,r11 - add r9,rax - adc r10,rdx - - - mov rcx,QWORD[((16+8))+rdi] - mov rax,QWORD[((0+$L$p434p1))] - mul rcx - add r9,rax - adc r10,rdx - adc r11,0x0 - - - xor r12,r12 - mov rax,QWORD[((16+$L$p434p1))] - mul r14 - add r10,rax - adc r11,rdx - adc r12,0x0 - - - mov rax,QWORD[((8+$L$p434p1))] - mul rcx - add r10,rax - adc r11,rdx - adc r12,0x0 - - - mov rax,QWORD[((24+$L$p434p1))] - mul r14 - xor r13,r13 - add r11,rax - adc r12,rdx - adc r13,0x0 - - - mov rax,QWORD[((16+$L$p434p1))] - mul rcx - add r11,rax - adc r12,rdx - adc r13,0x0 - - - mov rax,QWORD[((24+$L$p434p1))] - mul rcx - add r12,rax - adc r13,rdx - - - xor rcx,rcx - add r8,QWORD[40+rdi] - adc r9,QWORD[48+rdi] - adc r10,QWORD[56+rdi] - adc r11,QWORD[64+rdi] - adc r12,QWORD[72+rdi] - adc r13,QWORD[80+rdi] - adc rcx,QWORD[88+rdi] - mov QWORD[40+rdi],r8 - mov QWORD[48+rdi],r9 - mov QWORD[56+rdi],r10 - mov QWORD[64+rdi],r11 - mov QWORD[72+rdi],r12 - mov QWORD[80+rdi],r13 - mov QWORD[88+rdi],rcx - mov r8,QWORD[96+rdi] - mov r9,QWORD[104+rdi] - adc r8,0x0 - adc r9,0x0 - mov QWORD[96+rdi],r8 - mov QWORD[104+rdi],r9 - - - mov r14,QWORD[((32+0))+rdi] - mov rax,QWORD[((0+$L$p434p1))] - mul r14 - xor r10,r10 - mov r8,rax - mov r9,rdx - - - mov rax,QWORD[((8+$L$p434p1))] - mul r14 - xor r11,r11 - add r9,rax - adc r10,rdx - - - mov rcx,QWORD[((32+8))+rdi] - mov rax,QWORD[((0+$L$p434p1))] - mul rcx - add r9,rax - adc r10,rdx - adc r11,0x0 - - - xor r12,r12 - mov rax,QWORD[((16+$L$p434p1))] - mul r14 - add r10,rax - adc r11,rdx - adc r12,0x0 - - - mov rax,QWORD[((8+$L$p434p1))] - mul rcx - add r10,rax - adc r11,rdx - adc r12,0x0 - - - mov rax,QWORD[((24+$L$p434p1))] - mul r14 - xor r13,r13 - add r11,rax - adc r12,rdx - adc r13,0x0 - - - mov rax,QWORD[((16+$L$p434p1))] - mul rcx - add r11,rax - adc r12,rdx - adc r13,0x0 - - - mov rax,QWORD[((24+$L$p434p1))] - mul rcx - add r12,rax - adc r13,rdx - - - xor rcx,rcx - add r8,QWORD[56+rdi] - adc r9,QWORD[64+rdi] - adc r10,QWORD[72+rdi] - adc r11,QWORD[80+rdi] - adc r12,QWORD[88+rdi] - adc r13,QWORD[96+rdi] - adc rcx,QWORD[104+rdi] - mov QWORD[rsi],r8 - mov QWORD[8+rsi],r9 - mov QWORD[72+rdi],r10 - mov QWORD[80+rdi],r11 - mov QWORD[88+rdi],r12 - mov QWORD[96+rdi],r13 - mov QWORD[104+rdi],rcx - - mov r13,QWORD[48+rdi] - - xor r10,r10 - mov rax,QWORD[((0+$L$p434p1))] - mul r13 - mov r8,rax - mov r9,rdx - - xor r11,r11 - mov rax,QWORD[((8+$L$p434p1))] - mul r13 - add r9,rax - adc r10,rdx - - xor r12,r12 - mov rax,QWORD[((16+$L$p434p1))] - mul r13 - add r10,rax - adc r11,rdx - - mov rax,QWORD[((24+$L$p434p1))] - mul r13 - add r11,rax - adc r12,rdx - - add r8,QWORD[72+rdi] - adc r9,QWORD[80+rdi] - adc r10,QWORD[88+rdi] - adc r11,QWORD[96+rdi] - adc r12,QWORD[104+rdi] - mov QWORD[16+rsi],r8 - mov QWORD[24+rsi],r9 - mov QWORD[32+rsi],r10 - mov QWORD[40+rsi],r11 - mov QWORD[48+rsi],r12 - - - pop r15 - - pop r14 - - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - -$L$mul_bdw: - - - - - - - - - - mov rcx,rdx - xor rax,rax - - - mov r8,QWORD[rdi] - mov r9,QWORD[8+rdi] - mov r10,QWORD[16+rdi] - mov r11,QWORD[24+rdi] - - push rbx - - - push rbp - - - sub rsp,96 - - - add r8,QWORD[32+rdi] - adc r9,QWORD[40+rdi] - adc r10,QWORD[48+rdi] - adc r11,0x0 - sbb rax,0x0 - mov QWORD[rsp],r8 - mov QWORD[8+rsp],r9 - mov QWORD[16+rsp],r10 - mov QWORD[24+rsp],r11 - - - xor rbx,rbx - mov r12,QWORD[rsi] - mov r13,QWORD[8+rsi] - mov r14,QWORD[16+rsi] - mov r15,QWORD[24+rsi] - add r12,QWORD[32+rsi] - adc r13,QWORD[40+rsi] - adc r14,QWORD[48+rsi] - adc r15,0x0 - sbb rbx,0x0 - mov QWORD[32+rsp],r12 - mov QWORD[40+rsp],r13 - mov QWORD[48+rsp],r14 - mov QWORD[56+rsp],r15 - - - and r12,rax - and r13,rax - and r14,rax - and r15,rax - - - and r8,rbx - and r9,rbx - and r10,rbx - and r11,rbx - - - add r8,r12 - adc r9,r13 - adc r10,r14 - adc r11,r15 - mov QWORD[64+rsp],r8 - mov QWORD[72+rsp],r9 - mov QWORD[80+rsp],r10 - mov QWORD[88+rsp],r11 - - - mov rdx,QWORD[((0+0))+rsp] - mulx r8,r9,QWORD[((32+0))+rsp] - mov QWORD[((0+0))+rsp],r9 - mulx r9,r10,QWORD[((32+8))+rsp] - xor rax,rax - adox r8,r10 - mulx r10,r11,QWORD[((32+16))+rsp] - adox r9,r11 - mulx r11,r12,QWORD[((32+24))+rsp] - adox r10,r12 - - mov rdx,QWORD[((0+8))+rsp] - mulx r13,r12,QWORD[((32+0))+rsp] - adox r11,rax - xor rax,rax - mulx r14,r15,QWORD[((32+8))+rsp] - adox r12,r8 - mov QWORD[((0+8))+rsp],r12 - adcx r13,r15 - mulx r15,rbx,QWORD[((32+16))+rsp] - adcx r14,rbx - adox r13,r9 - mulx rbx,rbp,QWORD[((32+24))+rsp] - adcx r15,rbp - adcx rbx,rax - adox r14,r10 - - mov rdx,QWORD[((0+16))+rsp] - mulx r9,r8,QWORD[((32+0))+rsp] - adox r15,r11 - adox rbx,rax - xor rax,rax - mulx r10,r11,QWORD[((32+8))+rsp] - adox r8,r13 - mov QWORD[((0+16))+rsp],r8 - adcx r9,r11 - mulx r11,r12,QWORD[((32+16))+rsp] - adcx r10,r12 - adox r9,r14 - mulx r12,rbp,QWORD[((32+24))+rsp] - adcx r11,rbp - adcx r12,rax - - adox r10,r15 - adox r11,rbx - adox r12,rax - - mov rdx,QWORD[((0+24))+rsp] - mulx r13,r8,QWORD[((32+0))+rsp] - xor rax,rax - mulx r14,r15,QWORD[((32+8))+rsp] - adcx r13,r15 - adox r9,r8 - mulx r15,rbx,QWORD[((32+16))+rsp] - adcx r14,rbx - adox r10,r13 - mulx rbx,rbp,QWORD[((32+24))+rsp] - adcx r15,rbp - adcx rbx,rax - adox r11,r14 - adox r12,r15 - adox rbx,rax - mov QWORD[((0+24))+rsp],r9 - mov QWORD[((0+32))+rsp],r10 - mov QWORD[((0+40))+rsp],r11 - mov QWORD[((0+48))+rsp],r12 - mov QWORD[((0+56))+rsp],rbx - - - - mov rdx,QWORD[((0+0))+rdi] - mulx r8,r9,QWORD[((0+0))+rsi] - mov QWORD[((0+0))+rcx],r9 - mulx r9,r10,QWORD[((0+8))+rsi] - xor rax,rax - adox r8,r10 - mulx r10,r11,QWORD[((0+16))+rsi] - adox r9,r11 - mulx r11,r12,QWORD[((0+24))+rsi] - adox r10,r12 - - mov rdx,QWORD[((0+8))+rdi] - mulx r13,r12,QWORD[((0+0))+rsi] - adox r11,rax - xor rax,rax - mulx r14,r15,QWORD[((0+8))+rsi] - adox r12,r8 - mov QWORD[((0+8))+rcx],r12 - adcx r13,r15 - mulx r15,rbx,QWORD[((0+16))+rsi] - adcx r14,rbx - adox r13,r9 - mulx rbx,rbp,QWORD[((0+24))+rsi] - adcx r15,rbp - adcx rbx,rax - adox r14,r10 - - mov rdx,QWORD[((0+16))+rdi] - mulx r9,r8,QWORD[((0+0))+rsi] - adox r15,r11 - adox rbx,rax - xor rax,rax - mulx r10,r11,QWORD[((0+8))+rsi] - adox r8,r13 - mov QWORD[((0+16))+rcx],r8 - adcx r9,r11 - mulx r11,r12,QWORD[((0+16))+rsi] - adcx r10,r12 - adox r9,r14 - mulx r12,rbp,QWORD[((0+24))+rsi] - adcx r11,rbp - adcx r12,rax - - adox r10,r15 - adox r11,rbx - adox r12,rax - - mov rdx,QWORD[((0+24))+rdi] - mulx r13,r8,QWORD[((0+0))+rsi] - xor rax,rax - mulx r14,r15,QWORD[((0+8))+rsi] - adcx r13,r15 - adox r9,r8 - mulx r15,rbx,QWORD[((0+16))+rsi] - adcx r14,rbx - adox r10,r13 - mulx rbx,rbp,QWORD[((0+24))+rsi] - adcx r15,rbp - adcx rbx,rax - adox r11,r14 - adox r12,r15 - adox rbx,rax - mov QWORD[((0+24))+rcx],r9 - mov QWORD[((0+32))+rcx],r10 - mov QWORD[((0+40))+rcx],r11 - mov QWORD[((0+48))+rcx],r12 - mov QWORD[((0+56))+rcx],rbx - - - - mov rdx,QWORD[((32+0))+rdi] - mulx r8,r9,QWORD[((32+0))+rsi] - mov QWORD[((64+0))+rcx],r9 - mulx r9,r10,QWORD[((32+8))+rsi] - xor rax,rax - adox r8,r10 - mulx r10,r11,QWORD[((32+16))+rsi] - adox r9,r11 - - mov rdx,QWORD[((32+8))+rdi] - mulx r11,r12,QWORD[((32+0))+rsi] - adox r10,rax - xor rax,rax - - mulx r13,r14,QWORD[((32+8))+rsi] - adox r12,r8 - mov QWORD[((64+8))+rcx],r12 - adcx r11,r14 - - mulx r14,r8,QWORD[((32+16))+rsi] - adox r11,r9 - adcx r13,r8 - adcx r14,rax - adox r13,r10 - - mov rdx,QWORD[((32+16))+rdi] - mulx r9,r8,QWORD[((32+0))+rsi] - adox r14,rax - xor rax,rax - - mulx r12,r10,QWORD[((32+8))+rsi] - adox r8,r11 - mov QWORD[((64+16))+rcx],r8 - adcx r9,r13 - - mulx r8,r11,QWORD[((32+16))+rsi] - adcx r12,r14 - adcx r8,rax - adox r9,r10 - adox r11,r12 - adox r8,rax - mov QWORD[((64+24))+rcx],r9 - mov QWORD[((64+32))+rcx],r11 - mov QWORD[((64+40))+rcx],r8 - - - - - mov r8,QWORD[64+rsp] - mov r9,QWORD[72+rsp] - mov r10,QWORD[80+rsp] - mov r11,QWORD[88+rsp] - - mov rax,QWORD[32+rsp] - add r8,rax - mov rax,QWORD[40+rsp] - adc r9,rax - mov rax,QWORD[48+rsp] - adc r10,rax - mov rax,QWORD[56+rsp] - adc r11,rax - - - mov r12,QWORD[rsp] - mov r13,QWORD[8+rsp] - mov r14,QWORD[16+rsp] - mov r15,QWORD[24+rsp] - sub r12,QWORD[rcx] - sbb r13,QWORD[8+rcx] - sbb r14,QWORD[16+rcx] - sbb r15,QWORD[24+rcx] - sbb r8,QWORD[32+rcx] - sbb r9,QWORD[40+rcx] - sbb r10,QWORD[48+rcx] - sbb r11,QWORD[56+rcx] - - - sub r12,QWORD[64+rcx] - sbb r13,QWORD[72+rcx] - sbb r14,QWORD[80+rcx] - sbb r15,QWORD[88+rcx] - sbb r8,QWORD[96+rcx] - sbb r9,QWORD[104+rcx] - sbb r10,0x0 - sbb r11,0x0 - - add r12,QWORD[32+rcx] - mov QWORD[32+rcx],r12 - adc r13,QWORD[40+rcx] - mov QWORD[40+rcx],r13 - adc r14,QWORD[48+rcx] - mov QWORD[48+rcx],r14 - adc r15,QWORD[56+rcx] - mov QWORD[56+rcx],r15 - adc r8,QWORD[64+rcx] - mov QWORD[64+rcx],r8 - adc r9,QWORD[72+rcx] - mov QWORD[72+rcx],r9 - adc r10,QWORD[80+rcx] - mov QWORD[80+rcx],r10 - adc r11,QWORD[88+rcx] - mov QWORD[88+rcx],r11 - mov r12,QWORD[96+rcx] - adc r12,0x0 - mov QWORD[96+rcx],r12 - mov r13,QWORD[104+rcx] - adc r13,0x0 - mov QWORD[104+rcx],r13 - - add rsp,96 - - pop rbp - - - pop rbx - - - - - pop r15 - - - pop r14 - - - pop r13 - - - pop r12 - - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - - -global sike_mpmul - -sike_mpmul: - mov QWORD[8+rsp],rdi ;WIN64 prologue - mov QWORD[16+rsp],rsi - mov rax,rsp -$L$SEH_begin_sike_mpmul: - mov rdi,rcx - mov rsi,rdx - mov rdx,r8 - - - - push r12 - - - push r13 - - - push r14 - - - push r15 - - - - - - lea rcx,[OPENSSL_ia32cap_P] - mov rcx,QWORD[8+rcx] - and ecx,0x80100 - cmp ecx,0x80100 - je NEAR $L$mul_bdw - - - - mov rcx,rdx - - sub rsp,112 - - - - xor rax,rax - mov r8,QWORD[32+rdi] - mov r9,QWORD[40+rdi] - mov r10,QWORD[48+rdi] - xor r11,r11 - add r8,QWORD[rdi] - adc r9,QWORD[8+rdi] - adc r10,QWORD[16+rdi] - adc r11,QWORD[24+rdi] - - sbb rax,0 - mov QWORD[64+rsp],rax - - mov QWORD[rcx],r8 - mov QWORD[8+rcx],r9 - mov QWORD[16+rcx],r10 - mov QWORD[24+rcx],r11 - - - xor rdx,rdx - mov r12,QWORD[32+rsi] - mov r13,QWORD[40+rsi] - mov r14,QWORD[48+rsi] - xor r15,r15 - add r12,QWORD[rsi] - adc r13,QWORD[8+rsi] - adc r14,QWORD[16+rsi] - adc r15,QWORD[24+rsi] - sbb rdx,0x0 - - mov QWORD[72+rsp],rdx - - - mov rax,QWORD[rcx] - mul r12 - mov QWORD[rsp],rax - mov r8,rdx - - xor r9,r9 - mov rax,QWORD[rcx] - mul r13 - add r8,rax - adc r9,rdx - - xor r10,r10 - mov rax,QWORD[8+rcx] - mul r12 - add r8,rax - mov QWORD[8+rsp],r8 - adc r9,rdx - adc r10,0x0 - - xor r8,r8 - mov rax,QWORD[rcx] - mul r14 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[16+rcx] - mul r12 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[8+rcx] - mul r13 - add r9,rax - mov QWORD[16+rsp],r9 - adc r10,rdx - adc r8,0x0 - - xor r9,r9 - mov rax,QWORD[rcx] - mul r15 - add r10,rax - adc r8,rdx - adc r9,0x0 - - mov rax,QWORD[24+rcx] - mul r12 - add r10,rax - adc r8,rdx - adc r9,0x0 - - mov rax,QWORD[8+rcx] - mul r14 - add r10,rax - adc r8,rdx - adc r9,0x0 - - mov rax,QWORD[16+rcx] - mul r13 - add r10,rax - mov QWORD[24+rsp],r10 - adc r8,rdx - adc r9,0x0 - - xor r10,r10 - mov rax,QWORD[8+rcx] - mul r15 - add r8,rax - adc r9,rdx - adc r10,0x0 - - mov rax,QWORD[24+rcx] - mul r13 - add r8,rax - adc r9,rdx - adc r10,0x0 - - mov rax,QWORD[16+rcx] - mul r14 - add r8,rax - mov QWORD[32+rsp],r8 - adc r9,rdx - adc r10,0x0 - - xor r11,r11 - mov rax,QWORD[16+rcx] - mul r15 - add r9,rax - adc r10,rdx - adc r11,0x0 - - mov rax,QWORD[24+rcx] - mul r14 - add r9,rax - mov QWORD[40+rsp],r9 - adc r10,rdx - adc r11,0x0 - - mov rax,QWORD[24+rcx] - mul r15 - add r10,rax - mov QWORD[48+rsp],r10 - adc r11,rdx - mov QWORD[56+rsp],r11 - - - mov rax,QWORD[64+rsp] - and r12,rax - and r13,rax - and r14,rax - and r15,rax - - - mov rax,QWORD[72+rsp] - mov r8,QWORD[rcx] - and r8,rax - mov r9,QWORD[8+rcx] - and r9,rax - mov r10,QWORD[16+rcx] - and r10,rax - mov r11,QWORD[24+rcx] - and r11,rax - - - add r12,r8 - adc r13,r9 - adc r14,r10 - adc r15,r11 - - - mov rax,QWORD[32+rsp] - add r12,rax - mov rax,QWORD[40+rsp] - adc r13,rax - mov rax,QWORD[48+rsp] - adc r14,rax - mov rax,QWORD[56+rsp] - adc r15,rax - mov QWORD[80+rsp],r12 - mov QWORD[88+rsp],r13 - mov QWORD[96+rsp],r14 - mov QWORD[104+rsp],r15 - - - mov r11,QWORD[rdi] - mov rax,QWORD[rsi] - mul r11 - xor r9,r9 - mov QWORD[rcx],rax - mov r8,rdx - - mov r14,QWORD[16+rdi] - mov rax,QWORD[8+rsi] - mul r11 - xor r10,r10 - add r8,rax - adc r9,rdx - - mov r12,QWORD[8+rdi] - mov rax,QWORD[rsi] - mul r12 - add r8,rax - mov QWORD[8+rcx],r8 - adc r9,rdx - adc r10,0x0 - - xor r8,r8 - mov rax,QWORD[16+rsi] - mul r11 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov r13,QWORD[rsi] - mov rax,r14 - mul r13 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[8+rsi] - mul r12 - add r9,rax - mov QWORD[16+rcx],r9 - adc r10,rdx - adc r8,0x0 - - xor r9,r9 - mov rax,QWORD[24+rsi] - mul r11 - mov r15,QWORD[24+rdi] - add r10,rax - adc r8,rdx - adc r9,0x0 - - mov rax,r15 - mul r13 - add r10,rax - adc r8,rdx - adc r9,0x0 - - mov rax,QWORD[16+rsi] - mul r12 - add r10,rax - adc r8,rdx - adc r9,0x0 - - mov rax,QWORD[8+rsi] - mul r14 - add r10,rax - mov QWORD[24+rcx],r10 - adc r8,rdx - adc r9,0x0 - - xor r10,r10 - mov rax,QWORD[24+rsi] - mul r12 - add r8,rax - adc r9,rdx - adc r10,0x0 - - mov rax,QWORD[8+rsi] - mul r15 - add r8,rax - adc r9,rdx - adc r10,0x0 - - mov rax,QWORD[16+rsi] - mul r14 - add r8,rax - mov QWORD[32+rcx],r8 - adc r9,rdx - adc r10,0x0 - - xor r8,r8 - mov rax,QWORD[24+rsi] - mul r14 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[16+rsi] - mul r15 - add r9,rax - mov QWORD[40+rcx],r9 - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[24+rsi] - mul r15 - add r10,rax - mov QWORD[48+rcx],r10 - adc r8,rdx - mov QWORD[56+rcx],r8 - - - - mov r11,QWORD[32+rdi] - mov rax,QWORD[32+rsi] - mul r11 - xor r9,r9 - mov QWORD[64+rcx],rax - mov r8,rdx - - mov r14,QWORD[48+rdi] - mov rax,QWORD[40+rsi] - mul r11 - xor r10,r10 - add r8,rax - adc r9,rdx - - mov r12,QWORD[40+rdi] - mov rax,QWORD[32+rsi] - mul r12 - add r8,rax - mov QWORD[72+rcx],r8 - adc r9,rdx - adc r10,0x0 - - xor r8,r8 - mov rax,QWORD[48+rsi] - mul r11 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov r13,QWORD[32+rsi] - mov rax,r14 - mul r13 - add r9,rax - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[40+rsi] - mul r12 - add r9,rax - mov QWORD[80+rcx],r9 - adc r10,rdx - adc r8,0x0 - - mov rax,QWORD[48+rsi] - mul r12 - xor r12,r12 - add r10,rax - adc r8,rdx - adc r12,0x0 - - mov rax,QWORD[40+rsi] - mul r14 - add r10,rax - adc r8,rdx - adc r12,0x0 - mov QWORD[88+rcx],r10 - - mov rax,QWORD[48+rsi] - mul r14 - add r8,rax - adc r12,0x0 - mov QWORD[96+rcx],r8 - - add rdx,r12 - - - mov r8,QWORD[rsp] - sub r8,QWORD[rcx] - mov r9,QWORD[8+rsp] - sbb r9,QWORD[8+rcx] - mov r10,QWORD[16+rsp] - sbb r10,QWORD[16+rcx] - mov r11,QWORD[24+rsp] - sbb r11,QWORD[24+rcx] - mov r12,QWORD[80+rsp] - sbb r12,QWORD[32+rcx] - mov r13,QWORD[88+rsp] - sbb r13,QWORD[40+rcx] - mov r14,QWORD[96+rsp] - sbb r14,QWORD[48+rcx] - mov r15,QWORD[104+rsp] - sbb r15,QWORD[56+rcx] - - - mov rax,QWORD[64+rcx] - sub r8,rax - mov rax,QWORD[72+rcx] - sbb r9,rax - mov rax,QWORD[80+rcx] - sbb r10,rax - mov rax,QWORD[88+rcx] - sbb r11,rax - mov rax,QWORD[96+rcx] - sbb r12,rax - sbb r13,rdx - sbb r14,0x0 - sbb r15,0x0 - - - add r8,QWORD[32+rcx] - mov QWORD[32+rcx],r8 - adc r9,QWORD[40+rcx] - mov QWORD[40+rcx],r9 - adc r10,QWORD[48+rcx] - mov QWORD[48+rcx],r10 - adc r11,QWORD[56+rcx] - mov QWORD[56+rcx],r11 - adc r12,QWORD[64+rcx] - mov QWORD[64+rcx],r12 - adc r13,QWORD[72+rcx] - mov QWORD[72+rcx],r13 - adc r14,QWORD[80+rcx] - mov QWORD[80+rcx],r14 - adc r15,QWORD[88+rcx] - mov QWORD[88+rcx],r15 - mov r12,QWORD[96+rcx] - adc r12,0x0 - mov QWORD[96+rcx],r12 - adc rdx,0x0 - mov QWORD[104+rcx],rdx - - add rsp,112 - - - - pop r15 - - pop r14 - - pop r13 - - pop r12 - - mov rdi,QWORD[8+rsp] ;WIN64 epilogue - mov rsi,QWORD[16+rsp] - DB 0F3h,0C3h ;repret - diff --git a/packager/third_party/curl/CMakeLists.txt b/packager/third_party/curl/CMakeLists.txt new file mode 100644 index 0000000000..26e418dc74 --- /dev/null +++ b/packager/third_party/curl/CMakeLists.txt @@ -0,0 +1,38 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# CMake build file to host CURL configuration. + +# Don't build a shared libcurl. +set(BUILD_SHARED_LIBS OFF) + +# Don't build with libdn, librtmp, libssh2, or zlib. +set(USE_LIBIDN2 OFF) +set(USE_LIBRTMP OFF) +set(CURL_USE_LIBSSH2 OFF) +set(CURL_ZLIB OFF) + +if(UNIX AND NOT APPLE) + # Use libcares to fix static linking on Linux. + set(ENABLE_ARES ON) +endif() + +if(WIN32) + # Use native TLS on Windows. We could use mbedtls, but only if we also + # provide a path to CA certs. + set(CURL_USE_SCHANNEL ON) +else() + # Force libcurl to use our local mbedtls instead of searching for OpenSSL on + # the system. + set(CURL_USE_MBEDTLS ON) + set(MBEDTLS_INCLUDE_DIRS ../mbedtls/source/include) + set(MBEDTLS_LIBRARY mbedtls) + set(MBEDX509_LIBRARY mbedx509) + set(MBEDCRYPTO_LIBRARY mbedcrypto) +endif() + +# With these set in scope of this folder, load the library's own CMakeLists.txt. +add_subdirectory(source) diff --git a/packager/third_party/curl/README.packager b/packager/third_party/curl/README.packager deleted file mode 100644 index 27185764f0..0000000000 --- a/packager/third_party/curl/README.packager +++ /dev/null @@ -1,52 +0,0 @@ -Name: curl -URL: http://curl.haxx.se/dev/ -License: MIT/X -License File: source/COPYING -Local Modifications: None - -Description: -libcurl is a free and easy-to-use client-side URL transfer library. libcurl -supports SSL certificates, HTTP GET, HTTP POST, HTTP PUT, and various other -transfer protocols. - -************************************************************************** -Description of source tree. - -1) config/ - Directory containing configuration files, which are required to build - libcurl and curl correctly. On linux platform, an *auto-generated* - configuration file "config/linux/curl_config.h" is used; the library uses - the configuration files coming with libcurl distribution - "source/lib/config-*.h" on other platforms. - - config/curl/curlbuild.h - Curl build file renamed from source/include/curl/curlbuild.h.dist - - config/dummy_tool_hugehelp.c - A dummy manual required to build curl command line tool. - - config/linux/curl_config.h - An *auto-generated* configuration file by running bash commands: - - cd source - ./buildconf - cd .. - source/configure --with-ssl --without-ca-bundle --without-ca-path \ - --without-zlib --without-libidn --without-librtmp - cp lib/curl_config.h config/linux/curl_config.h - - on linux platform, with a few features disabled to build correctly on a - fresh linux box. - - config/mac/curl_config.h - Similar as above, but run in Mac instead. - - config/linux/find_curl_ca_bundle.sh - A script used to find the path to curl_ca_bundle in the target system. - -2) curl.gyp - A gyp build file for the library. Manually maintained. - -3) source/ - Directory containing curl source codes from github without modification. - diff --git a/packager/third_party/curl/config/curl/curlbuild.h b/packager/third_party/curl/config/curl/curlbuild.h deleted file mode 100644 index f09419a843..0000000000 --- a/packager/third_party/curl/config/curl/curlbuild.h +++ /dev/null @@ -1,585 +0,0 @@ -#ifndef __CURL_CURLBUILD_H -#define __CURL_CURLBUILD_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2013, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -/* ================================================================ */ -/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */ -/* ================================================================ */ - -/* - * NOTE 1: - * ------- - * - * See file include/curl/curlbuild.h.in, run configure, and forget - * that this file exists it is only used for non-configure systems. - * But you can keep reading if you want ;-) - * - */ - -/* ================================================================ */ -/* NOTES FOR NON-CONFIGURE SYSTEMS */ -/* ================================================================ */ - -/* - * NOTE 1: - * ------- - * - * Nothing in this file is intended to be modified or adjusted by the - * curl library user nor by the curl library builder. - * - * If you think that something actually needs to be changed, adjusted - * or fixed in this file, then, report it on the libcurl development - * mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/ - * - * Try to keep one section per platform, compiler and architecture, - * otherwise, if an existing section is reused for a different one and - * later on the original is adjusted, probably the piggybacking one can - * be adversely changed. - * - * In order to differentiate between platforms/compilers/architectures - * use only compiler built in predefined preprocessor symbols. - * - * This header file shall only export symbols which are 'curl' or 'CURL' - * prefixed, otherwise public name space would be polluted. - * - * NOTE 2: - * ------- - * - * For any given platform/compiler curl_off_t must be typedef'ed to a - * 64-bit wide signed integral data type. The width of this data type - * must remain constant and independent of any possible large file - * support settings. - * - * As an exception to the above, curl_off_t shall be typedef'ed to a - * 32-bit wide signed integral data type if there is no 64-bit type. - * - * As a general rule, curl_off_t shall not be mapped to off_t. This - * rule shall only be violated if off_t is the only 64-bit data type - * available and the size of off_t is independent of large file support - * settings. Keep your build on the safe side avoiding an off_t gating. - * If you have a 64-bit off_t then take for sure that another 64-bit - * data type exists, dig deeper and you will find it. - * - * NOTE 3: - * ------- - * - * Right now you might be staring at file include/curl/curlbuild.h.dist or - * at file include/curl/curlbuild.h, this is due to the following reason: - * file include/curl/curlbuild.h.dist is renamed to include/curl/curlbuild.h - * when the libcurl source code distribution archive file is created. - * - * File include/curl/curlbuild.h.dist is not included in the distribution - * archive. File include/curl/curlbuild.h is not present in the git tree. - * - * The distributed include/curl/curlbuild.h file is only intended to be used - * on systems which can not run the also distributed configure script. - * - * On systems capable of running the configure script, the configure process - * will overwrite the distributed include/curl/curlbuild.h file with one that - * is suitable and specific to the library being configured and built, which - * is generated from the include/curl/curlbuild.h.in template file. - * - * If you check out from git on a non-configure platform, you must run the - * appropriate buildconf* script to set up curlbuild.h and other local files. - * - */ - -/* ================================================================ */ -/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */ -/* ================================================================ */ - -#ifdef CURL_SIZEOF_LONG -# error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined -#endif - -#ifdef CURL_TYPEOF_CURL_SOCKLEN_T -# error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined -#endif - -#ifdef CURL_SIZEOF_CURL_SOCKLEN_T -# error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined -#endif - -#ifdef CURL_TYPEOF_CURL_OFF_T -# error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_FORMAT_CURL_OFF_T -# error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_FORMAT_CURL_OFF_TU -# error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined -#endif - -#ifdef CURL_FORMAT_OFF_T -# error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined -#endif - -#ifdef CURL_SIZEOF_CURL_OFF_T -# error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_SUFFIX_CURL_OFF_T -# error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_SUFFIX_CURL_OFF_TU -# error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined -#endif - -/* ================================================================ */ -/* EXTERNAL INTERFACE SETTINGS FOR NON-CONFIGURE SYSTEMS ONLY */ -/* ================================================================ */ - -#if defined(__DJGPP__) || defined(__GO32__) -# if defined(__DJGPP__) && (__DJGPP__ > 1) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# else -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__SALFORDC__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__BORLANDC__) -# if (__BORLANDC__ < 0x520) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# else -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T __int64 -# define CURL_FORMAT_CURL_OFF_T "I64d" -# define CURL_FORMAT_CURL_OFF_TU "I64u" -# define CURL_FORMAT_OFF_T "%I64d" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T i64 -# define CURL_SUFFIX_CURL_OFF_TU ui64 -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__TURBOC__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__WATCOMC__) -# if defined(__386__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T __int64 -# define CURL_FORMAT_CURL_OFF_T "I64d" -# define CURL_FORMAT_CURL_OFF_TU "I64u" -# define CURL_FORMAT_OFF_T "%I64d" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T i64 -# define CURL_SUFFIX_CURL_OFF_TU ui64 -# else -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__POCC__) -# if (__POCC__ < 280) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# elif defined(_MSC_VER) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T __int64 -# define CURL_FORMAT_CURL_OFF_T "I64d" -# define CURL_FORMAT_CURL_OFF_TU "I64u" -# define CURL_FORMAT_OFF_T "%I64d" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T i64 -# define CURL_SUFFIX_CURL_OFF_TU ui64 -# else -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__LCC__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__SYMBIAN32__) -# if defined(__EABI__) /* Treat all ARM compilers equally */ -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# elif defined(__CW32__) -# pragma longlong on -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# elif defined(__VC32__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T __int64 -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T unsigned int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__MWERKS__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(_WIN32_WCE) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T __int64 -# define CURL_FORMAT_CURL_OFF_T "I64d" -# define CURL_FORMAT_CURL_OFF_TU "I64u" -# define CURL_FORMAT_OFF_T "%I64d" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T i64 -# define CURL_SUFFIX_CURL_OFF_TU ui64 -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__MINGW32__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "I64d" -# define CURL_FORMAT_CURL_OFF_TU "I64u" -# define CURL_FORMAT_OFF_T "%I64d" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__VMS) -# if defined(__VAX) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# else -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T unsigned int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -#elif defined(__OS400__) -# if defined(__ILEC400__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 -# define CURL_PULL_SYS_TYPES_H 1 -# define CURL_PULL_SYS_SOCKET_H 1 -# endif - -#elif defined(__MVS__) -# if defined(__IBMC__) || defined(__IBMCPP__) -# if defined(_ILP32) -# define CURL_SIZEOF_LONG 4 -# elif defined(_LP64) -# define CURL_SIZEOF_LONG 8 -# endif -# if defined(_LONG_LONG) -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# elif defined(_LP64) -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# else -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 -# define CURL_PULL_SYS_TYPES_H 1 -# define CURL_PULL_SYS_SOCKET_H 1 -# endif - -#elif defined(__370__) -# if defined(__IBMC__) || defined(__IBMCPP__) -# if defined(_ILP32) -# define CURL_SIZEOF_LONG 4 -# elif defined(_LP64) -# define CURL_SIZEOF_LONG 8 -# endif -# if defined(_LONG_LONG) -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# elif defined(_LP64) -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# else -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 -# define CURL_PULL_SYS_TYPES_H 1 -# define CURL_PULL_SYS_SOCKET_H 1 -# endif - -#elif defined(TPF) -# define CURL_SIZEOF_LONG 8 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -/* ===================================== */ -/* KEEP MSVC THE PENULTIMATE ENTRY */ -/* ===================================== */ - -#elif defined(_MSC_VER) -# if (_MSC_VER >= 900) && (_INTEGRAL_MAX_BITS >= 64) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T __int64 -# define CURL_FORMAT_CURL_OFF_T "I64d" -# define CURL_FORMAT_CURL_OFF_TU "I64u" -# define CURL_FORMAT_OFF_T "%I64d" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T i64 -# define CURL_SUFFIX_CURL_OFF_TU ui64 -# else -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 4 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T int -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -/* ===================================== */ -/* KEEP GENERIC GCC THE LAST ENTRY */ -/* ===================================== */ - -#elif defined(__GNUC__) -# if defined(__ILP32__) || \ - defined(__i386__) || defined(__ppc__) || defined(__arm__) || defined(__sparc__) -# define CURL_SIZEOF_LONG 4 -# define CURL_TYPEOF_CURL_OFF_T long long -# define CURL_FORMAT_CURL_OFF_T "lld" -# define CURL_FORMAT_CURL_OFF_TU "llu" -# define CURL_FORMAT_OFF_T "%lld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T LL -# define CURL_SUFFIX_CURL_OFF_TU ULL -# elif defined(__LP64__) || \ - defined(__x86_64__) || defined(__ppc64__) || defined(__sparc64__) -# define CURL_SIZEOF_LONG 8 -# define CURL_TYPEOF_CURL_OFF_T long -# define CURL_FORMAT_CURL_OFF_T "ld" -# define CURL_FORMAT_CURL_OFF_TU "lu" -# define CURL_FORMAT_OFF_T "%ld" -# define CURL_SIZEOF_CURL_OFF_T 8 -# define CURL_SUFFIX_CURL_OFF_T L -# define CURL_SUFFIX_CURL_OFF_TU UL -# endif -# define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t -# define CURL_SIZEOF_CURL_SOCKLEN_T 4 -# define CURL_PULL_SYS_TYPES_H 1 -# define CURL_PULL_SYS_SOCKET_H 1 - -#else -# error "Unknown non-configure build target!" - Error Compilation_aborted_Unknown_non_configure_build_target -#endif - -/* CURL_PULL_SYS_TYPES_H is defined above when inclusion of header file */ -/* sys/types.h is required here to properly make type definitions below. */ -#ifdef CURL_PULL_SYS_TYPES_H -# include -#endif - -/* CURL_PULL_SYS_SOCKET_H is defined above when inclusion of header file */ -/* sys/socket.h is required here to properly make type definitions below. */ -#ifdef CURL_PULL_SYS_SOCKET_H -# include -#endif - -/* Data type definition of curl_socklen_t. */ - -#ifdef CURL_TYPEOF_CURL_SOCKLEN_T - typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t; -#endif - -/* Data type definition of curl_off_t. */ - -#ifdef CURL_TYPEOF_CURL_OFF_T - typedef CURL_TYPEOF_CURL_OFF_T curl_off_t; -#endif - -#endif /* __CURL_CURLBUILD_H */ diff --git a/packager/third_party/curl/config/dummy_tool_hugehelp.c b/packager/third_party/curl/config/dummy_tool_hugehelp.c deleted file mode 100644 index 28e6fc736e..0000000000 --- a/packager/third_party/curl/config/dummy_tool_hugehelp.c +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd -// -// The dummy curl manual. - -#include "tool_hugehelp.h" - -void hugehelp(void) { - printf("This is a dummy huge help."); -} diff --git a/packager/third_party/curl/config/linux/curl_config.h b/packager/third_party/curl/config/linux/curl_config.h deleted file mode 100644 index 3b49fe8fa6..0000000000 --- a/packager/third_party/curl/config/linux/curl_config.h +++ /dev/null @@ -1,1045 +0,0 @@ -/* lib/curl_config.h. Generated from curl_config.h.in by configure. */ -/* lib/curl_config.h.in. Generated from configure.ac by autoheader. */ - -/* Location of default ca bundle */ -/* #undef CURL_CA_BUNDLE */ - -/* define "1" to use built in CA store of SSL library */ -/* #undef CURL_CA_FALLBACK */ - -/* Location of default ca path */ -/* #undef CURL_CA_PATH */ - -/* Default SSL backend */ -/* #undef CURL_DEFAULT_SSL_BACKEND */ - -/* to disable cookies support */ -/* #undef CURL_DISABLE_COOKIES */ - -/* to disable cryptographic authentication */ -/* #undef CURL_DISABLE_CRYPTO_AUTH */ - -/* to disable DICT */ -/* #undef CURL_DISABLE_DICT */ - -/* to disable FILE */ -/* #undef CURL_DISABLE_FILE */ - -/* to disable FTP */ -/* #undef CURL_DISABLE_FTP */ - -/* to disable Gopher */ -/* #undef CURL_DISABLE_GOPHER */ - -/* to disable HTTP */ -/* #undef CURL_DISABLE_HTTP */ - -/* to disable IMAP */ -/* #undef CURL_DISABLE_IMAP */ - -/* to disable LDAP */ -/* #undef CURL_DISABLE_LDAP */ - -/* to disable LDAPS */ -/* #undef CURL_DISABLE_LDAPS */ - -/* to disable --libcurl C code generation option */ -/* #undef CURL_DISABLE_LIBCURL_OPTION */ - -/* to disable POP3 */ -/* #undef CURL_DISABLE_POP3 */ - -/* to disable proxies */ -/* #undef CURL_DISABLE_PROXY */ - -/* to disable RTSP */ -/* #undef CURL_DISABLE_RTSP */ - -/* to disable SMB/CIFS */ -/* #undef CURL_DISABLE_SMB */ - -/* to disable SMTP */ -/* #undef CURL_DISABLE_SMTP */ - -/* to disable TELNET */ -/* #undef CURL_DISABLE_TELNET */ - -/* to disable TFTP */ -/* #undef CURL_DISABLE_TFTP */ - -/* to disable TLS-SRP authentication */ -/* #undef CURL_DISABLE_TLS_SRP */ - -/* to disable verbose strings */ -/* #undef CURL_DISABLE_VERBOSE_STRINGS */ - -/* Definition to make a library symbol externally visible. */ -#define CURL_EXTERN_SYMBOL __attribute__ ((__visibility__ ("default"))) - -/* built with multiple SSL backends */ -/* #undef CURL_WITH_MULTI_SSL */ - -/* your Entropy Gathering Daemon socket pathname */ -/* #undef EGD_SOCKET */ - -/* Define if you want to enable IPv6 support */ -#define ENABLE_IPV6 1 - -/* Define to the type of arg 2 for gethostname. */ -#define GETHOSTNAME_TYPE_ARG2 size_t - -/* Define to the type qualifier of arg 1 for getnameinfo. */ -#define GETNAMEINFO_QUAL_ARG1 const - -/* Define to the type of arg 1 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG1 struct sockaddr * - -/* Define to the type of arg 2 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG2 socklen_t - -/* Define to the type of args 4 and 6 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG46 socklen_t - -/* Define to the type of arg 7 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG7 int - -/* Specifies the number of arguments to getservbyport_r */ -#define GETSERVBYPORT_R_ARGS 6 - -/* Specifies the size of the buffer to pass to getservbyport_r */ -#define GETSERVBYPORT_R_BUFSIZE 4096 - -/* Define to 1 if you have the alarm function. */ -#define HAVE_ALARM 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ALLOCA_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_INET_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_TFTP_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ASSERT_H 1 - -/* Define to 1 if you have the basename function. */ -#define HAVE_BASENAME 1 - -/* Define to 1 if bool is an available type. */ -#define HAVE_BOOL_T 1 - -/* Define to 1 if using BoringSSL. */ -/* packager uses BORINGSSL. */ -#define HAVE_BORINGSSL 1 -/* Uses RSA_flags which does not exist in boringssl */ -#define OPENSSL_NO_RSA 1 - -/* Define to 1 if you have the __builtin_available function. */ -/* #undef HAVE_BUILTIN_AVAILABLE */ - -/* Define to 1 if you have the clock_gettime function and monotonic timer. */ -/* Disabled for packager. */ -/* #undef HAVE_CLOCK_GETTIME_MONOTONIC */ - -/* Define to 1 if you have the closesocket function. */ -/* #undef HAVE_CLOSESOCKET */ - -/* Define to 1 if you have the CloseSocket camel case function. */ -/* #undef HAVE_CLOSESOCKET_CAMEL */ - -/* Define to 1 if you have the connect function. */ -#define HAVE_CONNECT 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_CRYPTO_H */ - -/* Define to 1 if you have the `CyaSSL_CTX_UseSupportedCurve' function. */ -/* #undef HAVE_CYASSL_CTX_USESUPPORTEDCURVE */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_CYASSL_ERROR_SSL_H */ - -/* Define to 1 if you have the `CyaSSL_get_peer_certificate' function. */ -/* #undef HAVE_CYASSL_GET_PEER_CERTIFICATE */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_CYASSL_OPTIONS_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you have the `ENGINE_cleanup' function. */ -#ifndef HAVE_BORINGSSL -#define HAVE_ENGINE_CLEANUP 1 -#endif - -/* Define to 1 if you have the `ENGINE_load_builtin_engines' function. */ -#ifndef HAVE_BORINGSSL -#define HAVE_ENGINE_LOAD_BUILTIN_ENGINES 1 -#endif - -/* Define to 1 if you have the header file. */ -#define HAVE_ERRNO_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_ERR_H */ - -/* Define to 1 if you have the fcntl function. */ -#define HAVE_FCNTL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_FCNTL_H 1 - -/* Define to 1 if you have a working fcntl O_NONBLOCK function. */ -#define HAVE_FCNTL_O_NONBLOCK 1 - -/* Define to 1 if you have the fdopen function. */ -#define HAVE_FDOPEN 1 - -/* Define to 1 if you have the freeaddrinfo function. */ -#define HAVE_FREEADDRINFO 1 - -/* Define to 1 if you have the freeifaddrs function. */ -#define HAVE_FREEIFADDRS 1 - -/* Define to 1 if you have the fsetxattr function. */ -#define HAVE_FSETXATTR 1 - -/* fsetxattr() takes 5 args */ -#define HAVE_FSETXATTR_5 1 - -/* fsetxattr() takes 6 args */ -/* #undef HAVE_FSETXATTR_6 */ - -/* Define to 1 if you have the ftruncate function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the gai_strerror function. */ -#define HAVE_GAI_STRERROR 1 - -/* Define to 1 if you have a working getaddrinfo function. */ -#define HAVE_GETADDRINFO 1 - -/* Define to 1 if the getaddrinfo function is threadsafe. */ -#define HAVE_GETADDRINFO_THREADSAFE 1 - -/* Define to 1 if you have the `geteuid' function. */ -#define HAVE_GETEUID 1 - -/* Define to 1 if you have the gethostbyaddr function. */ -#define HAVE_GETHOSTBYADDR 1 - -/* Define to 1 if you have the gethostbyaddr_r function. */ -#define HAVE_GETHOSTBYADDR_R 1 - -/* gethostbyaddr_r() takes 5 args */ -/* #undef HAVE_GETHOSTBYADDR_R_5 */ - -/* gethostbyaddr_r() takes 7 args */ -/* #undef HAVE_GETHOSTBYADDR_R_7 */ - -/* gethostbyaddr_r() takes 8 args */ -#define HAVE_GETHOSTBYADDR_R_8 1 - -/* Define to 1 if you have the gethostbyname function. */ -#define HAVE_GETHOSTBYNAME 1 - -/* Define to 1 if you have the gethostbyname_r function. */ -#define HAVE_GETHOSTBYNAME_R 1 - -/* gethostbyname_r() takes 3 args */ -/* #undef HAVE_GETHOSTBYNAME_R_3 */ - -/* gethostbyname_r() takes 5 args */ -/* #undef HAVE_GETHOSTBYNAME_R_5 */ - -/* gethostbyname_r() takes 6 args */ -#define HAVE_GETHOSTBYNAME_R_6 1 - -/* Define to 1 if you have the gethostname function. */ -#define HAVE_GETHOSTNAME 1 - -/* Define to 1 if you have a working getifaddrs function. */ -#define HAVE_GETIFADDRS 1 - -/* Define to 1 if you have the getnameinfo function. */ -#define HAVE_GETNAMEINFO 1 - -/* Define to 1 if you have the `getpass_r' function. */ -/* #undef HAVE_GETPASS_R */ - -/* Define to 1 if you have the `getppid' function. */ -#define HAVE_GETPPID 1 - -/* Define to 1 if you have the `getpwuid' function. */ -#define HAVE_GETPWUID 1 - -/* Define to 1 if you have the `getpwuid_r' function. */ -/* Disabled for packager. Not verified yet. */ -/* #undef HAVE_GETPWUID_R */ - -/* Define to 1 if you have the `getrlimit' function. */ -#define HAVE_GETRLIMIT 1 - -/* Define to 1 if you have the getservbyport_r function. */ -#define HAVE_GETSERVBYPORT_R 1 - -/* Define to 1 if you have the `gettimeofday' function. */ -#define HAVE_GETTIMEOFDAY 1 - -/* Define to 1 if you have a working glibc-style strerror_r function. */ -/* #undef HAVE_GLIBC_STRERROR_R */ - -/* Define to 1 if you have a working gmtime_r function. */ -#define HAVE_GMTIME_R 1 - -/* Define to 1 if you have the `gnutls_alpn_set_protocols' function. */ -/* #undef HAVE_GNUTLS_ALPN_SET_PROTOCOLS */ - -/* Define to 1 if you have the `gnutls_certificate_set_x509_key_file2' - function. */ -/* #undef HAVE_GNUTLS_CERTIFICATE_SET_X509_KEY_FILE2 */ - -/* Define to 1 if you have the `gnutls_ocsp_req_init' function. */ -/* #undef HAVE_GNUTLS_OCSP_REQ_INIT */ - -/* if you have the function gnutls_srp_verifier */ -/* #undef HAVE_GNUTLS_SRP */ - -/* if you have GSS-API libraries */ -/* #undef HAVE_GSSAPI */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_GSSAPI_GSSAPI_GENERIC_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_GSSAPI_GSSAPI_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_GSSAPI_GSSAPI_KRB5_H */ - -/* if you have GNU GSS */ -/* #undef HAVE_GSSGNU */ - -/* if you have Heimdal */ -/* #undef HAVE_GSSHEIMDAL */ - -/* if you have MIT Kerberos */ -/* #undef HAVE_GSSMIT */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_IDN2_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_IFADDRS_H 1 - -/* Define to 1 if you have the `if_nametoindex' function. */ -/* Disabled for packager. Not verified yet. */ -/* #undef HAVE_IF_NAMETOINDEX */ - -/* Define to 1 if you have the inet_ntoa_r function. */ -/* #undef HAVE_INET_NTOA_R */ - -/* inet_ntoa_r() takes 2 args */ -/* #undef HAVE_INET_NTOA_R_2 */ - -/* inet_ntoa_r() takes 3 args */ -/* #undef HAVE_INET_NTOA_R_3 */ - -/* Define to 1 if you have a IPv6 capable working inet_ntop function. */ -#define HAVE_INET_NTOP 1 - -/* Define to 1 if you have a IPv6 capable working inet_pton function. */ -#define HAVE_INET_PTON 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if you have the ioctl function. */ -#define HAVE_IOCTL 1 - -/* Define to 1 if you have the ioctlsocket function. */ -/* #undef HAVE_IOCTLSOCKET */ - -/* Define to 1 if you have the IoctlSocket camel case function. */ -/* #undef HAVE_IOCTLSOCKET_CAMEL */ - -/* Define to 1 if you have a working IoctlSocket camel case FIONBIO function. - */ -/* #undef HAVE_IOCTLSOCKET_CAMEL_FIONBIO */ - -/* Define to 1 if you have a working ioctlsocket FIONBIO function. */ -/* #undef HAVE_IOCTLSOCKET_FIONBIO */ - -/* Define to 1 if you have a working ioctl FIONBIO function. */ -#define HAVE_IOCTL_FIONBIO 1 - -/* Define to 1 if you have a working ioctl SIOCGIFADDR function. */ -#define HAVE_IOCTL_SIOCGIFADDR 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_IO_H */ - -/* Define to 1 if you have the lber.h header file. */ -#define HAVE_LBER_H 1 - -/* Define to 1 if you have the ldapssl.h header file. */ -/* #undef HAVE_LDAPSSL_H */ - -/* Define to 1 if you have the ldap.h header file. */ -#define HAVE_LDAP_H 1 - -/* Define to 1 if you have the `ldap_init_fd' function. */ -#define HAVE_LDAP_INIT_FD 1 - -/* Use LDAPS implementation */ -#define HAVE_LDAP_SSL 1 - -/* Define to 1 if you have the ldap_ssl.h header file. */ -/* #undef HAVE_LDAP_SSL_H */ - -/* Define to 1 if you have the `ldap_url_parse' function. */ -#define HAVE_LDAP_URL_PARSE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the `idn2' library (-lidn2). */ -/* #undef HAVE_LIBIDN2 */ - -/* Define to 1 if using libressl. */ -/* #undef HAVE_LIBRESSL */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LIBRTMP_RTMP_H */ - -/* Define to 1 if you have the `ssh2' library (-lssh2). */ -/* #undef HAVE_LIBSSH2 */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LIBSSH2_H */ - -/* Define to 1 if you have the `ssl' library (-lssl). */ -#define HAVE_LIBSSL 1 - -/* if zlib is available */ -/* #undef HAVE_LIBZ */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIMITS_H 1 - -/* if your compiler supports LL */ -#define HAVE_LL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LOCALE_H 1 - -/* Define to 1 if you have a working localtime_r function. */ -#define HAVE_LOCALTIME_R 1 - -/* Define to 1 if the compiler supports the 'long long' data type. */ -#define HAVE_LONGLONG 1 - -/* Define to 1 if you have the malloc.h header file. */ -#define HAVE_MALLOC_H 1 - -/* Define to 1 if you have the memory.h header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the memrchr function or macro. */ -/* #undef HAVE_MEMRCHR */ - -/* Define to 1 if you have the MSG_NOSIGNAL flag. */ -#define HAVE_MSG_NOSIGNAL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETDB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_IN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_TCP_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NET_IF_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_NGHTTP2_NGHTTP2_H */ - -/* Define to 1 if NI_WITHSCOPEID exists and works. */ -/* #undef HAVE_NI_WITHSCOPEID */ - -/* if you have an old MIT Kerberos version, lacking GSS_C_NT_HOSTBASED_SERVICE - */ -/* #undef HAVE_OLD_GSSMIT */ - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_CRYPTO_H 1 - -/* Define to 1 if you have the header file. */ -#ifndef HAVE_BORINGSSL -#define HAVE_OPENSSL_ENGINE_H 1 -#endif - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_ERR_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_PEM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_RSA_H 1 - -/* if you have the function SRP_Calc_client_key */ -#define HAVE_OPENSSL_SRP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_SSL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_X509_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_PEM_H */ - -/* Define to 1 if you have the `pipe' function. */ -#define HAVE_PIPE 1 - -/* Define to 1 if you have a working poll function. */ -#define HAVE_POLL 1 - -/* If you have a fine poll */ -#define HAVE_POLL_FINE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_POLL_H 1 - -/* Define to 1 if you have a working POSIX-style strerror_r function. */ -#define HAVE_POSIX_STRERROR_R 1 - -/* if you have */ -#define HAVE_PTHREAD_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_PWD_H 1 - -/* Define to 1 if you have the `RAND_egd' function. */ -#define HAVE_RAND_EGD 1 - -/* Define to 1 if you have the recv function. */ -#define HAVE_RECV 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_RSA_H */ - -/* Define to 1 if you have the select function. */ -#define HAVE_SELECT 1 - -/* Define to 1 if you have the send function. */ -#define HAVE_SEND 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SETJMP_H 1 - -/* Define to 1 if you have the `setlocale' function. */ -#define HAVE_SETLOCALE 1 - -/* Define to 1 if you have the `setmode' function. */ -/* #undef HAVE_SETMODE */ - -/* Define to 1 if you have the `setrlimit' function. */ -#define HAVE_SETRLIMIT 1 - -/* Define to 1 if you have the setsockopt function. */ -#define HAVE_SETSOCKOPT 1 - -/* Define to 1 if you have a working setsockopt SO_NONBLOCK function. */ -/* #undef HAVE_SETSOCKOPT_SO_NONBLOCK */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SGTTY_H 1 - -/* Define to 1 if you have the sigaction function. */ -#define HAVE_SIGACTION 1 - -/* Define to 1 if you have the siginterrupt function. */ -#define HAVE_SIGINTERRUPT 1 - -/* Define to 1 if you have the signal function. */ -#define HAVE_SIGNAL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SIGNAL_H 1 - -/* Define to 1 if you have the sigsetjmp function or macro. */ -#define HAVE_SIGSETJMP 1 - -/* Define to 1 if sig_atomic_t is an available typedef. */ -#define HAVE_SIG_ATOMIC_T 1 - -/* Define to 1 if sig_atomic_t is already defined as volatile. */ -/* #undef HAVE_SIG_ATOMIC_T_VOLATILE */ - -/* Define to 1 if struct sockaddr_in6 has the sin6_scope_id member */ -#define HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID 1 - -/* Define to 1 if you have the socket function. */ -#define HAVE_SOCKET 1 - -/* Define to 1 if you have the socketpair function. */ -#define HAVE_SOCKETPAIR 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SOCKET_H */ - -/* Define to 1 if you have the `SSLv2_client_method' function. */ -/* #undef HAVE_SSLV2_CLIENT_METHOD */ - -/* Define to 1 if you have the `SSL_get_shutdown' function. */ -#define HAVE_SSL_GET_SHUTDOWN 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SSL_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STDBOOL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the strcasecmp function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the strcmpi function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the strdup function. */ -#define HAVE_STRDUP 1 - -/* Define to 1 if you have the strerror_r function. */ -#define HAVE_STRERROR_R 1 - -/* Define to 1 if you have the stricmp function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the strncasecmp function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the strncmpi function. */ -/* #undef HAVE_STRNCMPI */ - -/* Define to 1 if you have the strnicmp function. */ -/* #undef HAVE_STRNICMP */ - -/* Define to 1 if you have the header file. */ -/* Disabled for packager as it breaks some linux distros. */ -/* #undef HAVE_STROPTS_H */ - -/* Define to 1 if you have the strstr function. */ -#define HAVE_STRSTR 1 - -/* Define to 1 if you have the strtok_r function. */ -#define HAVE_STRTOK_R 1 - -/* Define to 1 if you have the strtoll function. */ -#define HAVE_STRTOLL 1 - -/* if struct sockaddr_storage is defined */ -#define HAVE_STRUCT_SOCKADDR_STORAGE 1 - -/* Define to 1 if you have the timeval struct. */ -#define HAVE_STRUCT_TIMEVAL 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_FILIO_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_IOCTL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_PARAM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_POLL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_RESOURCE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SELECT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SOCKET_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_SOCKIO_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UN_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_UTIME_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_WAIT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_XATTR_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_TERMIOS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_TERMIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `utime' function. */ -#define HAVE_UTIME 1 - -/* Define to 1 if you have the `utimes' function. */ -#define HAVE_UTIMES 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UTIME_H 1 - -/* Define to 1 if compiler supports C99 variadic macro style. */ -#define HAVE_VARIADIC_MACROS_C99 1 - -/* Define to 1 if compiler supports old gcc variadic macro style. */ -#define HAVE_VARIADIC_MACROS_GCC 1 - -/* Define to 1 if you have the winber.h header file. */ -/* #undef HAVE_WINBER_H */ - -/* Define to 1 if you have the windows.h header file. */ -/* #undef HAVE_WINDOWS_H */ - -/* Define to 1 if you have the winldap.h header file. */ -/* #undef HAVE_WINLDAP_H */ - -/* Define to 1 if you have the winsock2.h header file. */ -/* #undef HAVE_WINSOCK2_H */ - -/* Define to 1 if you have the winsock.h header file. */ -/* #undef HAVE_WINSOCK_H */ - -/* Define to 1 if you have the `wolfSSLv3_client_method' function. */ -/* #undef HAVE_WOLFSSLV3_CLIENT_METHOD */ - -/* Define to 1 if you have the `wolfSSL_CTX_UseSupportedCurve' function. */ -/* #undef HAVE_WOLFSSL_CTX_USESUPPORTEDCURVE */ - -/* Define to 1 if you have the `wolfSSL_get_peer_certificate' function. */ -/* #undef HAVE_WOLFSSL_GET_PEER_CERTIFICATE */ - -/* Define to 1 if you have the `wolfSSL_UseALPN' function. */ -/* #undef HAVE_WOLFSSL_USEALPN */ - -/* Define this symbol if your OS supports changing the contents of argv */ -#define HAVE_WRITABLE_ARGV 1 - -/* Define to 1 if you have the writev function. */ -#define HAVE_WRITEV 1 - -/* Define to 1 if you have the ws2tcpip.h header file. */ -/* #undef HAVE_WS2TCPIP_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_X509_H */ - -/* if you have the zlib.h header file */ -/* #undef HAVE_ZLIB_H */ - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#define LT_OBJDIR ".libs/" - -/* Define to 1 if you need the lber.h header file even with ldap.h */ -/* #undef NEED_LBER_H */ - -/* Define to 1 if you need the malloc.h header file even with stdlib.h */ -/* #undef NEED_MALLOC_H */ - -/* Define to 1 if you need the memory.h header file even with stdlib.h */ -/* #undef NEED_MEMORY_H */ - -/* Define to 1 if _REENTRANT preprocessor symbol must be defined. */ -/* #undef NEED_REENTRANT */ - -/* Define to 1 if _THREAD_SAFE preprocessor symbol must be defined. */ -/* #undef NEED_THREAD_SAFE */ - -/* Define to enable NTLM delegation to winbind's ntlm_auth helper. */ -#define NTLM_WB_ENABLED 1 - -/* Define absolute filename for winbind's ntlm_auth helper. */ -#define NTLM_WB_FILE "/usr/bin/ntlm_auth" - -/* cpu-machine-OS */ -#define OS "x86_64-unknown-linux-gnu" - -/* Name of package */ -#define PACKAGE "curl" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT \ - "a suitable curl mailing list: https://curl.haxx.se/mail/" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "curl" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "curl -" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "curl" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "-" - -/* a suitable file to read random data from */ -#define RANDOM_FILE "/dev/urandom" - -/* Define to the type of arg 1 for recv. */ -#define RECV_TYPE_ARG1 int - -/* Define to the type of arg 2 for recv. */ -#define RECV_TYPE_ARG2 void * - -/* Define to the type of arg 3 for recv. */ -#define RECV_TYPE_ARG3 size_t - -/* Define to the type of arg 4 for recv. */ -#define RECV_TYPE_ARG4 int - -/* Define to the function return type for recv. */ -#define RECV_TYPE_RETV ssize_t - -/* Define as the return type of signal handlers (`int' or `void'). */ -#define RETSIGTYPE void - -/* Define to the type qualifier of arg 5 for select. */ -#define SELECT_QUAL_ARG5 - -/* Define to the type of arg 1 for select. */ -#define SELECT_TYPE_ARG1 int - -/* Define to the type of args 2, 3 and 4 for select. */ -#define SELECT_TYPE_ARG234 fd_set * - -/* Define to the type of arg 5 for select. */ -#define SELECT_TYPE_ARG5 struct timeval * - -/* Define to the function return type for select. */ -#define SELECT_TYPE_RETV int - -/* Define to the type qualifier of arg 2 for send. */ -#define SEND_QUAL_ARG2 const - -/* Define to the type of arg 1 for send. */ -#define SEND_TYPE_ARG1 int - -/* Define to the type of arg 2 for send. */ -#define SEND_TYPE_ARG2 void * - -/* Define to the type of arg 3 for send. */ -#define SEND_TYPE_ARG3 size_t - -/* Define to the type of arg 4 for send. */ -#define SEND_TYPE_ARG4 int - -/* Define to the function return type for send. */ -#define SEND_TYPE_RETV ssize_t - -/* The size of `curl_off_t', as computed by sizeof. */ -#define SIZEOF_CURL_OFF_T 8 - -/* The size of `int', as computed by sizeof. */ -#define SIZEOF_INT 4 - -/* The size of `long', as computed by sizeof. */ -#define SIZEOF_LONG 8 - -/* The size of `long long', as computed by sizeof. */ -/* #undef SIZEOF_LONG_LONG */ - -/* The size of `off_t', as computed by sizeof. */ -#define SIZEOF_OFF_T 8 - -/* The size of `short', as computed by sizeof. */ -#define SIZEOF_SHORT 2 - -/* The size of `size_t', as computed by sizeof. */ -#define SIZEOF_SIZE_T 8 - -/* The size of `time_t', as computed by sizeof. */ -#define SIZEOF_TIME_T 8 - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to the type of arg 3 for strerror_r. */ -#define STRERROR_R_TYPE_ARG3 size_t - -/* Define to 1 if you can safely include both and . */ -#define TIME_WITH_SYS_TIME 1 - -/* Define to enable c-ares support */ -/* #undef USE_ARES */ - -/* if axTLS is enabled */ -/* #undef USE_AXTLS */ - -/* if CyaSSL/WolfSSL is enabled */ -/* #undef USE_CYASSL */ - -/* to enable Apple OS native SSL/TLS support */ -/* #undef USE_DARWINSSL */ - -/* if GnuTLS is enabled */ -/* #undef USE_GNUTLS */ - -/* if GnuTLS uses nettle as crypto backend */ -/* #undef USE_GNUTLS_NETTLE */ - -/* PSL support enabled */ -/* #undef USE_LIBPSL */ - -/* if librtmp is in use */ -/* #undef USE_LIBRTMP */ - -/* if libSSH2 is in use */ -/* #undef USE_LIBSSH2 */ - -/* If you want to build curl with the built-in manual */ -#define USE_MANUAL 1 - -/* if mbedTLS is enabled */ -/* #undef USE_MBEDTLS */ - -/* Define to enable metalink support */ -/* #undef USE_METALINK */ - -/* if nghttp2 is in use */ -/* #undef USE_NGHTTP2 */ - -/* if NSS is enabled */ -/* #undef USE_NSS */ - -/* Use OpenLDAP-specific code */ -#define USE_OPENLDAP 1 - -/* if OpenSSL is in use */ -#define USE_OPENSSL 1 - -/* if PolarSSL is enabled */ -/* #undef USE_POLARSSL */ - -/* to enable Windows native SSL/TLS support */ -/* #undef USE_SCHANNEL */ - -/* if you want POSIX threaded DNS lookup */ -#define USE_THREADS_POSIX 1 - -/* if you want Win32 threaded DNS lookup */ -/* #undef USE_THREADS_WIN32 */ - -/* Use TLS-SRP authentication */ -#ifndef HAVE_BORINGSSL -#define USE_TLS_SRP 1 -#endif - -/* Use Unix domain sockets */ -#define USE_UNIX_SOCKETS 1 - -/* Define to 1 if you have the `normaliz' (WinIDN) library (-lnormaliz). */ -/* #undef USE_WIN32_IDN */ - -/* Define to 1 if you are building a Windows target with large file support. - */ -/* #undef USE_WIN32_LARGE_FILES */ - -/* Use Windows LDAP implementation */ -/* #undef USE_WIN32_LDAP */ - -/* Define to 1 if you are building a Windows target without large file - support. */ -/* #undef USE_WIN32_SMALL_FILES */ - -/* to enable SSPI support */ -/* #undef USE_WINDOWS_SSPI */ - -/* Version number of package */ -#define VERSION "-" - -/* Define to 1 to provide own prototypes. */ -/* #undef WANT_IDN_PROTOTYPES */ - -/* Define to 1 if OS is AIX. */ -#ifndef _ALL_SOURCE -/* # undef _ALL_SOURCE */ -#endif - -/* Enable large inode numbers on Mac OS X 10.5. */ -#ifndef _DARWIN_USE_64_BIT_INODE -# define _DARWIN_USE_64_BIT_INODE 1 -#endif - -/* Number of bits in a file offset, on hosts where this is settable. */ -/* #undef _FILE_OFFSET_BITS */ - -/* Define for large files, on AIX-style hosts. */ -/* #undef _LARGE_FILES */ - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Type to use in place of in_addr_t when system does not provide it. */ -/* #undef in_addr_t */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ - -/* the signed version of size_t */ -/* #undef ssize_t */ diff --git a/packager/third_party/curl/config/linux/find_curl_ca_bundle.sh b/packager/third_party/curl/config/linux/find_curl_ca_bundle.sh deleted file mode 100755 index 24b6f1fcd9..0000000000 --- a/packager/third_party/curl/config/linux/find_curl_ca_bundle.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -e - -# Copyright 2015 Google Inc. All rights reserved. -# -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file or at -# https://developers.google.com/open-source/licenses/bsd - -# Scan ca bundle in its common appearing locations. -paths=('/etc/pki/tls/certs/ca-bundle.crt' - '/etc/ssl/ca-bundle.pem' - '/etc/ssl/cert.pem' - '/etc/ssl/certs/ca-bundle.crt' - '/etc/ssl/certs/ca-certificates.crt' - '/usr/local/share/certs/ca-root.crt' - '/usr/share/ssl/certs/ca-bundle.crt') - -for path in "${paths[@]}"; do - if test -f "$path"; then - echo "$path" - exit 0 - fi -done - -echo 'Failed to locate SSL CA cert.' -exit 1 diff --git a/packager/third_party/curl/config/mac/curl_config.h b/packager/third_party/curl/config/mac/curl_config.h deleted file mode 100644 index dc1555825c..0000000000 --- a/packager/third_party/curl/config/mac/curl_config.h +++ /dev/null @@ -1,1043 +0,0 @@ -/* lib/curl_config.h. Generated from curl_config.h.in by configure. */ -/* lib/curl_config.h.in. Generated from configure.ac by autoheader. */ - -/* Location of default ca bundle */ -/* #undef CURL_CA_BUNDLE */ - -/* define "1" to use built in CA store of SSL library */ -/* #undef CURL_CA_FALLBACK */ - -/* Location of default ca path */ -/* #undef CURL_CA_PATH */ - -/* Default SSL backend */ -/* #undef CURL_DEFAULT_SSL_BACKEND */ - -/* to disable cookies support */ -/* #undef CURL_DISABLE_COOKIES */ - -/* to disable cryptographic authentication */ -/* #undef CURL_DISABLE_CRYPTO_AUTH */ - -/* to disable DICT */ -/* #undef CURL_DISABLE_DICT */ - -/* to disable FILE */ -/* #undef CURL_DISABLE_FILE */ - -/* to disable FTP */ -/* #undef CURL_DISABLE_FTP */ - -/* to disable Gopher */ -/* #undef CURL_DISABLE_GOPHER */ - -/* to disable HTTP */ -/* #undef CURL_DISABLE_HTTP */ - -/* to disable IMAP */ -/* #undef CURL_DISABLE_IMAP */ - -/* to disable LDAP */ -/* #undef CURL_DISABLE_LDAP */ - -/* to disable LDAPS */ -/* #undef CURL_DISABLE_LDAPS */ - -/* to disable --libcurl C code generation option */ -/* #undef CURL_DISABLE_LIBCURL_OPTION */ - -/* to disable POP3 */ -/* #undef CURL_DISABLE_POP3 */ - -/* to disable proxies */ -/* #undef CURL_DISABLE_PROXY */ - -/* to disable RTSP */ -/* #undef CURL_DISABLE_RTSP */ - -/* to disable SMB/CIFS */ -/* #undef CURL_DISABLE_SMB */ - -/* to disable SMTP */ -/* #undef CURL_DISABLE_SMTP */ - -/* to disable TELNET */ -/* #undef CURL_DISABLE_TELNET */ - -/* to disable TFTP */ -/* #undef CURL_DISABLE_TFTP */ - -/* to disable TLS-SRP authentication */ -/* #undef CURL_DISABLE_TLS_SRP */ - -/* to disable verbose strings */ -/* #undef CURL_DISABLE_VERBOSE_STRINGS */ - -/* Definition to make a library symbol externally visible. */ -#define CURL_EXTERN_SYMBOL __attribute__ ((__visibility__ ("default"))) - -/* built with multiple SSL backends */ -/* #undef CURL_WITH_MULTI_SSL */ - -/* your Entropy Gathering Daemon socket pathname */ -/* #undef EGD_SOCKET */ - -/* Define if you want to enable IPv6 support */ -#define ENABLE_IPV6 1 - -/* Define to the type of arg 2 for gethostname. */ -#define GETHOSTNAME_TYPE_ARG2 size_t - -/* Define to the type qualifier of arg 1 for getnameinfo. */ -#define GETNAMEINFO_QUAL_ARG1 const - -/* Define to the type of arg 1 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG1 struct sockaddr * - -/* Define to the type of arg 2 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG2 socklen_t - -/* Define to the type of args 4 and 6 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG46 socklen_t - -/* Define to the type of arg 7 for getnameinfo. */ -#define GETNAMEINFO_TYPE_ARG7 int - -/* Specifies the number of arguments to getservbyport_r */ -/* #undef GETSERVBYPORT_R_ARGS */ - -/* Specifies the size of the buffer to pass to getservbyport_r */ -/* #undef GETSERVBYPORT_R_BUFSIZE */ - -/* Define to 1 if you have the alarm function. */ -#define HAVE_ALARM 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ALLOCA_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_INET_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ARPA_TFTP_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_ASSERT_H 1 - -/* Define to 1 if you have the basename function. */ -#define HAVE_BASENAME 1 - -/* Define to 1 if bool is an available type. */ -#define HAVE_BOOL_T 1 - -/* Define to 1 if using BoringSSL. */ -/* packager uses BORINGSSL. */ -#define HAVE_BORINGSSL 1 -/* Uses RSA_flags which does not exist in boringssl */ -#define OPENSSL_NO_RSA 1 - -/* Define to 1 if you have the __builtin_available function. */ -/* #undef HAVE_BUILTIN_AVAILABLE */ - -/* Define to 1 if you have the clock_gettime function and monotonic timer. */ -/* Disabled for packager. */ -/* #undef HAVE_CLOCK_GETTIME_MONOTONIC */ - -/* Define to 1 if you have the closesocket function. */ -/* #undef HAVE_CLOSESOCKET */ - -/* Define to 1 if you have the CloseSocket camel case function. */ -/* #undef HAVE_CLOSESOCKET_CAMEL */ - -/* Define to 1 if you have the connect function. */ -#define HAVE_CONNECT 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_CRYPTO_H */ - -/* Define to 1 if you have the `CyaSSL_CTX_UseSupportedCurve' function. */ -/* #undef HAVE_CYASSL_CTX_USESUPPORTEDCURVE */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_CYASSL_ERROR_SSL_H */ - -/* Define to 1 if you have the `CyaSSL_get_peer_certificate' function. */ -/* #undef HAVE_CYASSL_GET_PEER_CERTIFICATE */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_CYASSL_OPTIONS_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you have the `ENGINE_cleanup' function. */ -#ifndef HAVE_BORINGSSL -#define HAVE_ENGINE_CLEANUP 1 -#endif - -/* Define to 1 if you have the `ENGINE_load_builtin_engines' function. */ -#ifndef HAVE_BORINGSSL -#define HAVE_ENGINE_LOAD_BUILTIN_ENGINES 1 -#endif - -/* Define to 1 if you have the header file. */ -#define HAVE_ERRNO_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_ERR_H */ - -/* Define to 1 if you have the fcntl function. */ -#define HAVE_FCNTL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_FCNTL_H 1 - -/* Define to 1 if you have a working fcntl O_NONBLOCK function. */ -#define HAVE_FCNTL_O_NONBLOCK 1 - -/* Define to 1 if you have the fdopen function. */ -#define HAVE_FDOPEN 1 - -/* Define to 1 if you have the freeaddrinfo function. */ -#define HAVE_FREEADDRINFO 1 - -/* Define to 1 if you have the freeifaddrs function. */ -#define HAVE_FREEIFADDRS 1 - -/* Define to 1 if you have the fsetxattr function. */ -#define HAVE_FSETXATTR 1 - -/* fsetxattr() takes 5 args */ -/* #undef HAVE_FSETXATTR_5 */ - -/* fsetxattr() takes 6 args */ -#define HAVE_FSETXATTR_6 1 - -/* Define to 1 if you have the ftruncate function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the gai_strerror function. */ -#define HAVE_GAI_STRERROR 1 - -/* Define to 1 if you have a working getaddrinfo function. */ -#define HAVE_GETADDRINFO 1 - -/* Define to 1 if the getaddrinfo function is threadsafe. */ -#define HAVE_GETADDRINFO_THREADSAFE 1 - -/* Define to 1 if you have the `geteuid' function. */ -#define HAVE_GETEUID 1 - -/* Define to 1 if you have the gethostbyaddr function. */ -#define HAVE_GETHOSTBYADDR 1 - -/* Define to 1 if you have the gethostbyaddr_r function. */ -/* #undef HAVE_GETHOSTBYADDR_R */ - -/* gethostbyaddr_r() takes 5 args */ -/* #undef HAVE_GETHOSTBYADDR_R_5 */ - -/* gethostbyaddr_r() takes 7 args */ -/* #undef HAVE_GETHOSTBYADDR_R_7 */ - -/* gethostbyaddr_r() takes 8 args */ -/* #undef HAVE_GETHOSTBYADDR_R_8 */ - -/* Define to 1 if you have the gethostbyname function. */ -#define HAVE_GETHOSTBYNAME 1 - -/* Define to 1 if you have the gethostbyname_r function. */ -/* #undef HAVE_GETHOSTBYNAME_R */ - -/* gethostbyname_r() takes 3 args */ -/* #undef HAVE_GETHOSTBYNAME_R_3 */ - -/* gethostbyname_r() takes 5 args */ -/* #undef HAVE_GETHOSTBYNAME_R_5 */ - -/* gethostbyname_r() takes 6 args */ -/* #undef HAVE_GETHOSTBYNAME_R_6 */ - -/* Define to 1 if you have the gethostname function. */ -#define HAVE_GETHOSTNAME 1 - -/* Define to 1 if you have a working getifaddrs function. */ -#define HAVE_GETIFADDRS 1 - -/* Define to 1 if you have the getnameinfo function. */ -#define HAVE_GETNAMEINFO 1 - -/* Define to 1 if you have the `getpass_r' function. */ -/* #undef HAVE_GETPASS_R */ - -/* Define to 1 if you have the `getppid' function. */ -#define HAVE_GETPPID 1 - -/* Define to 1 if you have the `getpwuid' function. */ -#define HAVE_GETPWUID 1 - -/* Define to 1 if you have the `getpwuid_r' function. */ -/* Disabled for packager. Not verified yet. */ -/* #undef HAVE_GETPWUID_R */ - -/* Define to 1 if you have the `getrlimit' function. */ -#define HAVE_GETRLIMIT 1 - -/* Define to 1 if you have the getservbyport_r function. */ -/* #undef HAVE_GETSERVBYPORT_R */ - -/* Define to 1 if you have the `gettimeofday' function. */ -#define HAVE_GETTIMEOFDAY 1 - -/* Define to 1 if you have a working glibc-style strerror_r function. */ -/* #undef HAVE_GLIBC_STRERROR_R */ - -/* Define to 1 if you have a working gmtime_r function. */ -#define HAVE_GMTIME_R 1 - -/* Define to 1 if you have the `gnutls_alpn_set_protocols' function. */ -/* #undef HAVE_GNUTLS_ALPN_SET_PROTOCOLS */ - -/* Define to 1 if you have the `gnutls_certificate_set_x509_key_file2' - function. */ -/* #undef HAVE_GNUTLS_CERTIFICATE_SET_X509_KEY_FILE2 */ - -/* Define to 1 if you have the `gnutls_ocsp_req_init' function. */ -/* #undef HAVE_GNUTLS_OCSP_REQ_INIT */ - -/* if you have the function gnutls_srp_verifier */ -/* #undef HAVE_GNUTLS_SRP */ - -/* if you have GSS-API libraries */ -/* #undef HAVE_GSSAPI */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_GSSAPI_GSSAPI_GENERIC_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_GSSAPI_GSSAPI_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_GSSAPI_GSSAPI_KRB5_H */ - -/* if you have GNU GSS */ -/* #undef HAVE_GSSGNU */ - -/* if you have Heimdal */ -/* #undef HAVE_GSSHEIMDAL */ - -/* if you have MIT Kerberos */ -/* #undef HAVE_GSSMIT */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_IDN2_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_IFADDRS_H 1 - -/* Define to 1 if you have the `if_nametoindex' function. */ -/* Disabled for packager. Not verified yet. */ -/* #undef HAVE_IF_NAMETOINDEX */ - -/* Define to 1 if you have the inet_ntoa_r function. */ -/* #undef HAVE_INET_NTOA_R */ - -/* inet_ntoa_r() takes 2 args */ -/* #undef HAVE_INET_NTOA_R_2 */ - -/* inet_ntoa_r() takes 3 args */ -/* #undef HAVE_INET_NTOA_R_3 */ - -/* Define to 1 if you have a IPv6 capable working inet_ntop function. */ -#define HAVE_INET_NTOP 1 - -/* Define to 1 if you have a IPv6 capable working inet_pton function. */ -#define HAVE_INET_PTON 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if you have the ioctl function. */ -#define HAVE_IOCTL 1 - -/* Define to 1 if you have the ioctlsocket function. */ -/* #undef HAVE_IOCTLSOCKET */ - -/* Define to 1 if you have the IoctlSocket camel case function. */ -/* #undef HAVE_IOCTLSOCKET_CAMEL */ - -/* Define to 1 if you have a working IoctlSocket camel case FIONBIO function. - */ -/* #undef HAVE_IOCTLSOCKET_CAMEL_FIONBIO */ - -/* Define to 1 if you have a working ioctlsocket FIONBIO function. */ -/* #undef HAVE_IOCTLSOCKET_FIONBIO */ - -/* Define to 1 if you have a working ioctl FIONBIO function. */ -#define HAVE_IOCTL_FIONBIO 1 - -/* Define to 1 if you have a working ioctl SIOCGIFADDR function. */ -#define HAVE_IOCTL_SIOCGIFADDR 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_IO_H */ - -/* Define to 1 if you have the lber.h header file. */ -#define HAVE_LBER_H 1 - -/* Define to 1 if you have the ldapssl.h header file. */ -/* #undef HAVE_LDAPSSL_H */ - -/* Define to 1 if you have the ldap.h header file. */ -#define HAVE_LDAP_H 1 - -/* Define to 1 if you have the `ldap_init_fd' function. */ -/* #undef HAVE_LDAP_INIT_FD */ - -/* Use LDAPS implementation */ -#define HAVE_LDAP_SSL 1 - -/* Define to 1 if you have the ldap_ssl.h header file. */ -/* #undef HAVE_LDAP_SSL_H */ - -/* Define to 1 if you have the `ldap_url_parse' function. */ -#define HAVE_LDAP_URL_PARSE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the `idn2' library (-lidn2). */ -/* #undef HAVE_LIBIDN2 */ - -/* Define to 1 if using libressl. */ -/* #undef HAVE_LIBRESSL */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LIBRTMP_RTMP_H */ - -/* Define to 1 if you have the `ssh2' library (-lssh2). */ -/* #undef HAVE_LIBSSH2 */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LIBSSH2_H */ - -/* Define to 1 if you have the `ssl' library (-lssl). */ -#define HAVE_LIBSSL 1 - -/* if zlib is available */ -/* #undef HAVE_LIBZ */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIMITS_H 1 - -/* if your compiler supports LL */ -#define HAVE_LL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LOCALE_H 1 - -/* Define to 1 if you have a working localtime_r function. */ -#define HAVE_LOCALTIME_R 1 - -/* Define to 1 if the compiler supports the 'long long' data type. */ -#define HAVE_LONGLONG 1 - -/* Define to 1 if you have the malloc.h header file. */ -/* #undef HAVE_MALLOC_H */ - -/* Define to 1 if you have the memory.h header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the memrchr function or macro. */ -/* #undef HAVE_MEMRCHR */ - -/* Define to 1 if you have the MSG_NOSIGNAL flag. */ -/* #undef HAVE_MSG_NOSIGNAL */ - -/* Define to 1 if you have the header file. */ -#define HAVE_NETDB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_IN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NETINET_TCP_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_NET_IF_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_NGHTTP2_NGHTTP2_H */ - -/* Define to 1 if NI_WITHSCOPEID exists and works. */ -/* #undef HAVE_NI_WITHSCOPEID */ - -/* if you have an old MIT Kerberos version, lacking GSS_C_NT_HOSTBASED_SERVICE - */ -/* #undef HAVE_OLD_GSSMIT */ - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_CRYPTO_H 1 - -/* Define to 1 if you have the header file. */ -#ifndef HAVE_BORINGSSL -#define HAVE_OPENSSL_ENGINE_H 1 -#endif - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_ERR_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_PEM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_RSA_H 1 - -/* if you have the function SRP_Calc_client_key */ -#define HAVE_OPENSSL_SRP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_SSL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_OPENSSL_X509_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_PEM_H */ - -/* Define to 1 if you have the `pipe' function. */ -#define HAVE_PIPE 1 - -/* Define to 1 if you have a working poll function. */ -#define HAVE_POLL 1 - -/* If you have a fine poll */ -#define HAVE_POLL_FINE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_POLL_H 1 - -/* Define to 1 if you have a working POSIX-style strerror_r function. */ -#define HAVE_POSIX_STRERROR_R 1 - -/* if you have */ -#define HAVE_PTHREAD_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_PWD_H 1 - -/* Define to 1 if you have the `RAND_egd' function. */ -#define HAVE_RAND_EGD 1 - -/* Define to 1 if you have the recv function. */ -#define HAVE_RECV 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_RSA_H */ - -/* Define to 1 if you have the select function. */ -#define HAVE_SELECT 1 - -/* Define to 1 if you have the send function. */ -#define HAVE_SEND 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SETJMP_H 1 - -/* Define to 1 if you have the `setlocale' function. */ -#define HAVE_SETLOCALE 1 - -/* Define to 1 if you have the `setmode' function. */ -#define HAVE_SETMODE 1 - -/* Define to 1 if you have the `setrlimit' function. */ -#define HAVE_SETRLIMIT 1 - -/* Define to 1 if you have the setsockopt function. */ -#define HAVE_SETSOCKOPT 1 - -/* Define to 1 if you have a working setsockopt SO_NONBLOCK function. */ -/* #undef HAVE_SETSOCKOPT_SO_NONBLOCK */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SGTTY_H 1 - -/* Define to 1 if you have the sigaction function. */ -#define HAVE_SIGACTION 1 - -/* Define to 1 if you have the siginterrupt function. */ -#define HAVE_SIGINTERRUPT 1 - -/* Define to 1 if you have the signal function. */ -#define HAVE_SIGNAL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SIGNAL_H 1 - -/* Define to 1 if you have the sigsetjmp function or macro. */ -#define HAVE_SIGSETJMP 1 - -/* Define to 1 if sig_atomic_t is an available typedef. */ -#define HAVE_SIG_ATOMIC_T 1 - -/* Define to 1 if sig_atomic_t is already defined as volatile. */ -/* #undef HAVE_SIG_ATOMIC_T_VOLATILE */ - -/* Define to 1 if struct sockaddr_in6 has the sin6_scope_id member */ -#define HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID 1 - -/* Define to 1 if you have the socket function. */ -#define HAVE_SOCKET 1 - -/* Define to 1 if you have the socketpair function. */ -#define HAVE_SOCKETPAIR 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SOCKET_H */ - -/* Define to 1 if you have the `SSLv2_client_method' function. */ -#define HAVE_SSLV2_CLIENT_METHOD 1 - -/* Define to 1 if you have the `SSL_get_shutdown' function. */ -#define HAVE_SSL_GET_SHUTDOWN 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SSL_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STDBOOL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the strcasecmp function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the strcmpi function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the strdup function. */ -#define HAVE_STRDUP 1 - -/* Define to 1 if you have the strerror_r function. */ -#define HAVE_STRERROR_R 1 - -/* Define to 1 if you have the stricmp function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the strncasecmp function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the strncmpi function. */ -/* #undef HAVE_STRNCMPI */ - -/* Define to 1 if you have the strnicmp function. */ -/* #undef HAVE_STRNICMP */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_STROPTS_H */ - -/* Define to 1 if you have the strstr function. */ -#define HAVE_STRSTR 1 - -/* Define to 1 if you have the strtok_r function. */ -#define HAVE_STRTOK_R 1 - -/* Define to 1 if you have the strtoll function. */ -#define HAVE_STRTOLL 1 - -/* if struct sockaddr_storage is defined */ -#define HAVE_STRUCT_SOCKADDR_STORAGE 1 - -/* Define to 1 if you have the timeval struct. */ -#define HAVE_STRUCT_TIMEVAL 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_FILIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_IOCTL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_PARAM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_POLL_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_RESOURCE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SELECT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SOCKET_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_SOCKIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_UN_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_UTIME_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_WAIT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_XATTR_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_TERMIOS_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_TERMIO_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `utime' function. */ -#define HAVE_UTIME 1 - -/* Define to 1 if you have the `utimes' function. */ -#define HAVE_UTIMES 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UTIME_H 1 - -/* Define to 1 if compiler supports C99 variadic macro style. */ -#define HAVE_VARIADIC_MACROS_C99 1 - -/* Define to 1 if compiler supports old gcc variadic macro style. */ -#define HAVE_VARIADIC_MACROS_GCC 1 - -/* Define to 1 if you have the winber.h header file. */ -/* #undef HAVE_WINBER_H */ - -/* Define to 1 if you have the windows.h header file. */ -/* #undef HAVE_WINDOWS_H */ - -/* Define to 1 if you have the winldap.h header file. */ -/* #undef HAVE_WINLDAP_H */ - -/* Define to 1 if you have the winsock2.h header file. */ -/* #undef HAVE_WINSOCK2_H */ - -/* Define to 1 if you have the winsock.h header file. */ -/* #undef HAVE_WINSOCK_H */ - -/* Define to 1 if you have the `wolfSSLv3_client_method' function. */ -/* #undef HAVE_WOLFSSLV3_CLIENT_METHOD */ - -/* Define to 1 if you have the `wolfSSL_CTX_UseSupportedCurve' function. */ -/* #undef HAVE_WOLFSSL_CTX_USESUPPORTEDCURVE */ - -/* Define to 1 if you have the `wolfSSL_get_peer_certificate' function. */ -/* #undef HAVE_WOLFSSL_GET_PEER_CERTIFICATE */ - -/* Define to 1 if you have the `wolfSSL_UseALPN' function. */ -/* #undef HAVE_WOLFSSL_USEALPN */ - -/* Define this symbol if your OS supports changing the contents of argv */ -#define HAVE_WRITABLE_ARGV 1 - -/* Define to 1 if you have the writev function. */ -#define HAVE_WRITEV 1 - -/* Define to 1 if you have the ws2tcpip.h header file. */ -/* #undef HAVE_WS2TCPIP_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_X509_H */ - -/* if you have the zlib.h header file */ -/* #undef HAVE_ZLIB_H */ - -/* Define to the sub-directory where libtool stores uninstalled libraries. */ -#define LT_OBJDIR ".libs/" - -/* Define to 1 if you need the lber.h header file even with ldap.h */ -/* #undef NEED_LBER_H */ - -/* Define to 1 if you need the malloc.h header file even with stdlib.h */ -/* #undef NEED_MALLOC_H */ - -/* Define to 1 if you need the memory.h header file even with stdlib.h */ -/* #undef NEED_MEMORY_H */ - -/* Define to 1 if _REENTRANT preprocessor symbol must be defined. */ -/* #undef NEED_REENTRANT */ - -/* Define to 1 if _THREAD_SAFE preprocessor symbol must be defined. */ -/* #undef NEED_THREAD_SAFE */ - -/* Define to enable NTLM delegation to winbind's ntlm_auth helper. */ -#define NTLM_WB_ENABLED 1 - -/* Define absolute filename for winbind's ntlm_auth helper. */ -#define NTLM_WB_FILE "/usr/bin/ntlm_auth" - -/* cpu-machine-OS */ -#define OS "x86_64-apple-darwin" - -/* Name of package */ -#define PACKAGE "curl" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT \ - "a suitable curl mailing list: https://curl.haxx.se/mail/" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "curl" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "curl -" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "curl" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "-" - -/* a suitable file to read random data from */ -#define RANDOM_FILE "/dev/urandom" - -/* Define to the type of arg 1 for recv. */ -#define RECV_TYPE_ARG1 int - -/* Define to the type of arg 2 for recv. */ -#define RECV_TYPE_ARG2 void * - -/* Define to the type of arg 3 for recv. */ -#define RECV_TYPE_ARG3 size_t - -/* Define to the type of arg 4 for recv. */ -#define RECV_TYPE_ARG4 int - -/* Define to the function return type for recv. */ -#define RECV_TYPE_RETV ssize_t - -/* Define as the return type of signal handlers (`int' or `void'). */ -#define RETSIGTYPE void - -/* Define to the type qualifier of arg 5 for select. */ -#define SELECT_QUAL_ARG5 - -/* Define to the type of arg 1 for select. */ -#define SELECT_TYPE_ARG1 int - -/* Define to the type of args 2, 3 and 4 for select. */ -#define SELECT_TYPE_ARG234 fd_set * - -/* Define to the type of arg 5 for select. */ -#define SELECT_TYPE_ARG5 struct timeval * - -/* Define to the function return type for select. */ -#define SELECT_TYPE_RETV int - -/* Define to the type qualifier of arg 2 for send. */ -#define SEND_QUAL_ARG2 const - -/* Define to the type of arg 1 for send. */ -#define SEND_TYPE_ARG1 int - -/* Define to the type of arg 2 for send. */ -#define SEND_TYPE_ARG2 void * - -/* Define to the type of arg 3 for send. */ -#define SEND_TYPE_ARG3 size_t - -/* Define to the type of arg 4 for send. */ -#define SEND_TYPE_ARG4 int - -/* Define to the function return type for send. */ -#define SEND_TYPE_RETV ssize_t - -/* The size of `curl_off_t', as computed by sizeof. */ -#define SIZEOF_CURL_OFF_T 8 - -/* The size of `int', as computed by sizeof. */ -#define SIZEOF_INT 4 - -/* The size of `long', as computed by sizeof. */ -#define SIZEOF_LONG 8 - -/* The size of `long long', as computed by sizeof. */ -/* #undef SIZEOF_LONG_LONG */ - -/* The size of `off_t', as computed by sizeof. */ -#define SIZEOF_OFF_T 8 - -/* The size of `short', as computed by sizeof. */ -#define SIZEOF_SHORT 2 - -/* The size of `size_t', as computed by sizeof. */ -#define SIZEOF_SIZE_T 8 - -/* The size of `time_t', as computed by sizeof. */ -#define SIZEOF_TIME_T 8 - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to the type of arg 3 for strerror_r. */ -#define STRERROR_R_TYPE_ARG3 size_t - -/* Define to 1 if you can safely include both and . */ -#define TIME_WITH_SYS_TIME 1 - -/* Define to enable c-ares support */ -/* #undef USE_ARES */ - -/* if axTLS is enabled */ -/* #undef USE_AXTLS */ - -/* if CyaSSL/WolfSSL is enabled */ -/* #undef USE_CYASSL */ - -/* to enable Apple OS native SSL/TLS support */ -/* #undef USE_DARWINSSL */ - -/* if GnuTLS is enabled */ -/* #undef USE_GNUTLS */ - -/* if GnuTLS uses nettle as crypto backend */ -/* #undef USE_GNUTLS_NETTLE */ - -/* PSL support enabled */ -/* #undef USE_LIBPSL */ - -/* if librtmp is in use */ -/* #undef USE_LIBRTMP */ - -/* if libSSH2 is in use */ -/* #undef USE_LIBSSH2 */ - -/* If you want to build curl with the built-in manual */ -#define USE_MANUAL 1 - -/* if mbedTLS is enabled */ -/* #undef USE_MBEDTLS */ - -/* Define to enable metalink support */ -/* #undef USE_METALINK */ - -/* if nghttp2 is in use */ -/* #undef USE_NGHTTP2 */ - -/* if NSS is enabled */ -/* #undef USE_NSS */ - -/* Use OpenLDAP-specific code */ -/* #undef USE_OPENLDAP */ - -/* if OpenSSL is in use */ -#define USE_OPENSSL 1 - -/* if PolarSSL is enabled */ -/* #undef USE_POLARSSL */ - -/* to enable Windows native SSL/TLS support */ -/* #undef USE_SCHANNEL */ - -/* if you want POSIX threaded DNS lookup */ -#define USE_THREADS_POSIX 1 - -/* if you want Win32 threaded DNS lookup */ -/* #undef USE_THREADS_WIN32 */ - -/* Use TLS-SRP authentication */ -#ifndef HAVE_BORINGSSL -#define USE_TLS_SRP 1 -#endif - -/* Use Unix domain sockets */ -#define USE_UNIX_SOCKETS 1 - -/* Define to 1 if you have the `normaliz' (WinIDN) library (-lnormaliz). */ -/* #undef USE_WIN32_IDN */ - -/* Define to 1 if you are building a Windows target with large file support. - */ -/* #undef USE_WIN32_LARGE_FILES */ - -/* Use Windows LDAP implementation */ -/* #undef USE_WIN32_LDAP */ - -/* Define to 1 if you are building a Windows target without large file - support. */ -/* #undef USE_WIN32_SMALL_FILES */ - -/* to enable SSPI support */ -/* #undef USE_WINDOWS_SSPI */ - -/* Version number of package */ -#define VERSION "-" - -/* Define to 1 to provide own prototypes. */ -/* #undef WANT_IDN_PROTOTYPES */ - -/* Define to 1 if OS is AIX. */ -#ifndef _ALL_SOURCE -/* # undef _ALL_SOURCE */ -#endif - -/* Enable large inode numbers on Mac OS X 10.5. */ -#ifndef _DARWIN_USE_64_BIT_INODE -# define _DARWIN_USE_64_BIT_INODE 1 -#endif - -/* Number of bits in a file offset, on hosts where this is settable. */ -/* #undef _FILE_OFFSET_BITS */ - -/* Define for large files, on AIX-style hosts. */ -/* #undef _LARGE_FILES */ - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Type to use in place of in_addr_t when system does not provide it. */ -/* #undef in_addr_t */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ - -/* the signed version of size_t */ -/* #undef ssize_t */ diff --git a/packager/third_party/curl/config/mac/find_curl_ca_bundle.sh b/packager/third_party/curl/config/mac/find_curl_ca_bundle.sh deleted file mode 100755 index c480f04a1c..0000000000 --- a/packager/third_party/curl/config/mac/find_curl_ca_bundle.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -e - -# Copyright 2015 Google Inc. All rights reserved. -# -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file or at -# https://developers.google.com/open-source/licenses/bsd - -# Scan ca bundle in its common appearing locations. -paths=('/opt/local/etc/openssl/cert.pem' # macports - '/opt/local/share/curl/curl-ca-bundle.crt' # macports - '/usr/local/etc/openssl/cert.pem' # homebrew - '/etc/ssl/cert.pem') - -for path in "${paths[@]}"; do - if test -f "$path"; then - echo "$path" - exit 0 - fi -done - -echo 'Failed to locate SSL CA cert.' -exit 1 diff --git a/packager/third_party/curl/curl.gyp b/packager/third_party/curl/curl.gyp deleted file mode 100644 index 86c8fe6ea0..0000000000 --- a/packager/third_party/curl/curl.gyp +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file or at -# https://developers.google.com/open-source/licenses/bsd - -{ - 'targets': [ - { - 'target_name': 'curl_config', - 'type': '<(component)', - 'sources': [ - 'config/curl/curlbuild.h', - ], - 'direct_dependent_settings': { - 'defines': [ - 'HTTP_ONLY', - 'USE_IPV6', - ], - 'include_dirs': [ - 'config', - 'config/curl', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - 'config/curl', - ], - }, - }, - 'conditions': [ - ['OS == "linux"', { - 'sources': [ - 'config/linux/curl_config.h', - ], - 'direct_dependent_settings': { - 'defines': [ - 'HAVE_CONFIG_H', - 'CURL_CA_BUNDLE=" -#include - -#include "gflags_declare.h" // IWYU pragma: export - - -// We always want to export variables defined in user code -#ifndef GFLAGS_DLL_DEFINE_FLAG -# ifdef _MSC_VER -# define GFLAGS_DLL_DEFINE_FLAG __declspec(dllexport) -# else -# define GFLAGS_DLL_DEFINE_FLAG -# endif -#endif - - -namespace GFLAGS_NAMESPACE { - - -// -------------------------------------------------------------------- -// To actually define a flag in a file, use DEFINE_bool, -// DEFINE_string, etc. at the bottom of this file. You may also find -// it useful to register a validator with the flag. This ensures that -// when the flag is parsed from the commandline, or is later set via -// SetCommandLineOption, we call the validation function. It is _not_ -// called when you assign the value to the flag directly using the = operator. -// -// The validation function should return true if the flag value is valid, and -// false otherwise. If the function returns false for the new setting of the -// flag, the flag will retain its current value. If it returns false for the -// default value, ParseCommandLineFlags() will die. -// -// This function is safe to call at global construct time (as in the -// example below). -// -// Example use: -// static bool ValidatePort(const char* flagname, int32 value) { -// if (value > 0 && value < 32768) // value is ok -// return true; -// printf("Invalid value for --%s: %d\n", flagname, (int)value); -// return false; -// } -// DEFINE_int32(port, 0, "What port to listen on"); -// static bool dummy = RegisterFlagValidator(&FLAGS_port, &ValidatePort); - -// Returns true if successfully registered, false if not (because the -// first argument doesn't point to a command-line flag, or because a -// validator is already registered for this flag). -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const bool* flag, bool (*validate_fn)(const char*, bool)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const int32* flag, bool (*validate_fn)(const char*, int32)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const int64* flag, bool (*validate_fn)(const char*, int64)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const uint64* flag, bool (*validate_fn)(const char*, uint64)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const double* flag, bool (*validate_fn)(const char*, double)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const std::string* flag, bool (*validate_fn)(const char*, const std::string&)); - -// Convenience macro for the registration of a flag validator -#define DEFINE_validator(name, validator) \ - static const bool name##_validator_registered = \ - GFLAGS_NAMESPACE::RegisterFlagValidator(&FLAGS_##name, validator) - - -// -------------------------------------------------------------------- -// These methods are the best way to get access to info about the -// list of commandline flags. Note that these routines are pretty slow. -// GetAllFlags: mostly-complete info about the list, sorted by file. -// ShowUsageWithFlags: pretty-prints the list to stdout (what --help does) -// ShowUsageWithFlagsRestrict: limit to filenames with restrict as a substr -// -// In addition to accessing flags, you can also access argv[0] (the program -// name) and argv (the entire commandline), which we sock away a copy of. -// These variables are static, so you should only set them once. -// -// No need to export this data only structure from DLL, avoiding VS warning 4251. -struct CommandLineFlagInfo { - std::string name; // the name of the flag - std::string type; // the type of the flag: int32, etc - std::string description; // the "help text" associated with the flag - std::string current_value; // the current value, as a string - std::string default_value; // the default value, as a string - std::string filename; // 'cleaned' version of filename holding the flag - bool has_validator_fn; // true if RegisterFlagValidator called on this flag - bool is_default; // true if the flag has the default value and - // has not been set explicitly from the cmdline - // or via SetCommandLineOption - const void* flag_ptr; // pointer to the flag's current value (i.e. FLAGS_foo) -}; - -// Using this inside of a validator is a recipe for a deadlock. -// TODO(user) Fix locking when validators are running, to make it safe to -// call validators during ParseAllFlags. -// Also make sure then to uncomment the corresponding unit test in -// gflags_unittest.sh -extern GFLAGS_DLL_DECL void GetAllFlags(std::vector* OUTPUT); -// These two are actually defined in gflags_reporting.cc. -extern GFLAGS_DLL_DECL void ShowUsageWithFlags(const char *argv0); // what --help does -extern GFLAGS_DLL_DECL void ShowUsageWithFlagsRestrict(const char *argv0, const char *restrict); - -// Create a descriptive string for a flag. -// Goes to some trouble to make pretty line breaks. -extern GFLAGS_DLL_DECL std::string DescribeOneFlag(const CommandLineFlagInfo& flag); - -// Thread-hostile; meant to be called before any threads are spawned. -extern GFLAGS_DLL_DECL void SetArgv(int argc, const char** argv); - -// The following functions are thread-safe as long as SetArgv() is -// only called before any threads start. -extern GFLAGS_DLL_DECL const std::vector& GetArgvs(); -extern GFLAGS_DLL_DECL const char* GetArgv(); // all of argv as a string -extern GFLAGS_DLL_DECL const char* GetArgv0(); // only argv0 -extern GFLAGS_DLL_DECL uint32 GetArgvSum(); // simple checksum of argv -extern GFLAGS_DLL_DECL const char* ProgramInvocationName(); // argv0, or "UNKNOWN" if not set -extern GFLAGS_DLL_DECL const char* ProgramInvocationShortName(); // basename(argv0) - -// ProgramUsage() is thread-safe as long as SetUsageMessage() is only -// called before any threads start. -extern GFLAGS_DLL_DECL const char* ProgramUsage(); // string set by SetUsageMessage() - -// VersionString() is thread-safe as long as SetVersionString() is only -// called before any threads start. -extern GFLAGS_DLL_DECL const char* VersionString(); // string set by SetVersionString() - - - -// -------------------------------------------------------------------- -// Normally you access commandline flags by just saying "if (FLAGS_foo)" -// or whatever, and set them by calling "FLAGS_foo = bar" (or, more -// commonly, via the DEFINE_foo macro). But if you need a bit more -// control, we have programmatic ways to get/set the flags as well. -// These programmatic ways to access flags are thread-safe, but direct -// access is only thread-compatible. - -// Return true iff the flagname was found. -// OUTPUT is set to the flag's value, or unchanged if we return false. -extern GFLAGS_DLL_DECL bool GetCommandLineOption(const char* name, std::string* OUTPUT); - -// Return true iff the flagname was found. OUTPUT is set to the flag's -// CommandLineFlagInfo or unchanged if we return false. -extern GFLAGS_DLL_DECL bool GetCommandLineFlagInfo(const char* name, CommandLineFlagInfo* OUTPUT); - -// Return the CommandLineFlagInfo of the flagname. exit() if name not found. -// Example usage, to check if a flag's value is currently the default value: -// if (GetCommandLineFlagInfoOrDie("foo").is_default) ... -extern GFLAGS_DLL_DECL CommandLineFlagInfo GetCommandLineFlagInfoOrDie(const char* name); - -enum GFLAGS_DLL_DECL FlagSettingMode { - // update the flag's value (can call this multiple times). - SET_FLAGS_VALUE, - // update the flag's value, but *only if* it has not yet been updated - // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef". - SET_FLAG_IF_DEFAULT, - // set the flag's default value to this. If the flag has not yet updated - // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef") - // change the flag's current value to the new default value as well. - SET_FLAGS_DEFAULT -}; - -// Set a particular flag ("command line option"). Returns a string -// describing the new value that the option has been set to. The -// return value API is not well-specified, so basically just depend on -// it to be empty if the setting failed for some reason -- the name is -// not a valid flag name, or the value is not a valid value -- and -// non-empty else. - -// SetCommandLineOption uses set_mode == SET_FLAGS_VALUE (the common case) -extern GFLAGS_DLL_DECL std::string SetCommandLineOption (const char* name, const char* value); -extern GFLAGS_DLL_DECL std::string SetCommandLineOptionWithMode(const char* name, const char* value, FlagSettingMode set_mode); - - -// -------------------------------------------------------------------- -// Saves the states (value, default value, whether the user has set -// the flag, registered validators, etc) of all flags, and restores -// them when the FlagSaver is destroyed. This is very useful in -// tests, say, when you want to let your tests change the flags, but -// make sure that they get reverted to the original states when your -// test is complete. -// -// Example usage: -// void TestFoo() { -// FlagSaver s1; -// FLAG_foo = false; -// FLAG_bar = "some value"; -// -// // test happens here. You can return at any time -// // without worrying about restoring the FLAG values. -// } -// -// Note: This class is marked with GFLAGS_ATTRIBUTE_UNUSED because all -// the work is done in the constructor and destructor, so in the standard -// usage example above, the compiler would complain that it's an -// unused variable. -// -// This class is thread-safe. However, its destructor writes to -// exactly the set of flags that have changed value during its -// lifetime, so concurrent _direct_ access to those flags -// (i.e. FLAGS_foo instead of {Get,Set}CommandLineOption()) is unsafe. - -class GFLAGS_DLL_DECL FlagSaver { - public: - FlagSaver(); - ~FlagSaver(); - - private: - class FlagSaverImpl* impl_; // we use pimpl here to keep API steady - - FlagSaver(const FlagSaver&); // no copying! - void operator=(const FlagSaver&); -}__attribute((unused)); - -// -------------------------------------------------------------------- -// Some deprecated or hopefully-soon-to-be-deprecated functions. - -// This is often used for logging. TODO(csilvers): figure out a better way -extern GFLAGS_DLL_DECL std::string CommandlineFlagsIntoString(); -// Usually where this is used, a FlagSaver should be used instead. -extern GFLAGS_DLL_DECL -bool ReadFlagsFromString(const std::string& flagfilecontents, - const char* prog_name, - bool errors_are_fatal); // uses SET_FLAGS_VALUE - -// These let you manually implement --flagfile functionality. -// DEPRECATED. -extern GFLAGS_DLL_DECL bool AppendFlagsIntoFile(const std::string& filename, const char* prog_name); -extern GFLAGS_DLL_DECL bool ReadFromFlagsFile(const std::string& filename, const char* prog_name, bool errors_are_fatal); // uses SET_FLAGS_VALUE - - -// -------------------------------------------------------------------- -// Useful routines for initializing flags from the environment. -// In each case, if 'varname' does not exist in the environment -// return defval. If 'varname' does exist but is not valid -// (e.g., not a number for an int32 flag), abort with an error. -// Otherwise, return the value. NOTE: for booleans, for true use -// 't' or 'T' or 'true' or '1', for false 'f' or 'F' or 'false' or '0'. - -extern GFLAGS_DLL_DECL bool BoolFromEnv(const char *varname, bool defval); -extern GFLAGS_DLL_DECL int32 Int32FromEnv(const char *varname, int32 defval); -extern GFLAGS_DLL_DECL int64 Int64FromEnv(const char *varname, int64 defval); -extern GFLAGS_DLL_DECL uint64 Uint64FromEnv(const char *varname, uint64 defval); -extern GFLAGS_DLL_DECL double DoubleFromEnv(const char *varname, double defval); -extern GFLAGS_DLL_DECL const char *StringFromEnv(const char *varname, const char *defval); - - -// -------------------------------------------------------------------- -// The next two functions parse gflags from main(): - -// Set the "usage" message for this program. For example: -// string usage("This program does nothing. Sample usage:\n"); -// usage += argv[0] + " "; -// SetUsageMessage(usage); -// Do not include commandline flags in the usage: we do that for you! -// Thread-hostile; meant to be called before any threads are spawned. -extern GFLAGS_DLL_DECL void SetUsageMessage(const std::string& usage); - -// Sets the version string, which is emitted with --version. -// For instance: SetVersionString("1.3"); -// Thread-hostile; meant to be called before any threads are spawned. -extern GFLAGS_DLL_DECL void SetVersionString(const std::string& version); - - -// Looks for flags in argv and parses them. Rearranges argv to put -// flags first, or removes them entirely if remove_flags is true. -// If a flag is defined more than once in the command line or flag -// file, the last definition is used. Returns the index (into argv) -// of the first non-flag argument. -// See top-of-file for more details on this function. -#ifndef SWIG // In swig, use ParseCommandLineFlagsScript() instead. -extern GFLAGS_DLL_DECL uint32 ParseCommandLineFlags(int *argc, char*** argv, bool remove_flags); -#endif - - -// Calls to ParseCommandLineNonHelpFlags and then to -// HandleCommandLineHelpFlags can be used instead of a call to -// ParseCommandLineFlags during initialization, in order to allow for -// changing default values for some FLAGS (via -// e.g. SetCommandLineOptionWithMode calls) between the time of -// command line parsing and the time of dumping help information for -// the flags as a result of command line parsing. If a flag is -// defined more than once in the command line or flag file, the last -// definition is used. Returns the index (into argv) of the first -// non-flag argument. (If remove_flags is true, will always return 1.) -extern GFLAGS_DLL_DECL uint32 ParseCommandLineNonHelpFlags(int *argc, char*** argv, bool remove_flags); - -// This is actually defined in gflags_reporting.cc. -// This function is misnamed (it also handles --version, etc.), but -// it's too late to change that now. :-( -extern GFLAGS_DLL_DECL void HandleCommandLineHelpFlags(); // in gflags_reporting.cc - -// Allow command line reparsing. Disables the error normally -// generated when an unknown flag is found, since it may be found in a -// later parse. Thread-hostile; meant to be called before any threads -// are spawned. -extern GFLAGS_DLL_DECL void AllowCommandLineReparsing(); - -// Reparse the flags that have not yet been recognized. Only flags -// registered since the last parse will be recognized. Any flag value -// must be provided as part of the argument using "=", not as a -// separate command line argument that follows the flag argument. -// Intended for handling flags from dynamically loaded libraries, -// since their flags are not registered until they are loaded. -extern GFLAGS_DLL_DECL void ReparseCommandLineNonHelpFlags(); - -// Clean up memory allocated by flags. This is only needed to reduce -// the quantity of "potentially leaked" reports emitted by memory -// debugging tools such as valgrind. It is not required for normal -// operation, or for the google perftools heap-checker. It must only -// be called when the process is about to exit, and all threads that -// might access flags are quiescent. Referencing flags after this is -// called will have unexpected consequences. This is not safe to run -// when multiple threads might be running: the function is -// thread-hostile. -extern GFLAGS_DLL_DECL void ShutDownCommandLineFlags(); - - -// -------------------------------------------------------------------- -// Now come the command line flag declaration/definition macros that -// will actually be used. They're kind of hairy. A major reason -// for this is initialization: we want people to be able to access -// variables in global constructors and have that not crash, even if -// their global constructor runs before the global constructor here. -// (Obviously, we can't guarantee the flags will have the correct -// default value in that case, but at least accessing them is safe.) -// The only way to do that is have flags point to a static buffer. -// So we make one, using a union to ensure proper alignment, and -// then use placement-new to actually set up the flag with the -// correct default value. In the same vein, we have to worry about -// flag access in global destructors, so FlagRegisterer has to be -// careful never to destroy the flag-values it constructs. -// -// Note that when we define a flag variable FLAGS_, we also -// preemptively define a junk variable, FLAGS_no. This is to -// cause a link-time error if someone tries to define 2 flags with -// names like "logging" and "nologging". We do this because a bool -// flag FLAG can be set from the command line to true with a "-FLAG" -// argument, and to false with a "-noFLAG" argument, and so this can -// potentially avert confusion. -// -// We also put flags into their own namespace. It is purposefully -// named in an opaque way that people should have trouble typing -// directly. The idea is that DEFINE puts the flag in the weird -// namespace, and DECLARE imports the flag from there into the current -// namespace. The net result is to force people to use DECLARE to get -// access to a flag, rather than saying "extern GFLAGS_DLL_DECL bool FLAGS_whatever;" -// or some such instead. We want this so we can put extra -// functionality (like sanity-checking) in DECLARE if we want, and -// make sure it is picked up everywhere. -// -// We also put the type of the variable in the namespace, so that -// people can't DECLARE_int32 something that they DEFINE_bool'd -// elsewhere. - -class GFLAGS_DLL_DECL FlagRegisterer { - public: - FlagRegisterer(const char* name, const char* type, - const char* help, const char* filename, - void* current_storage, void* defvalue_storage); -}; - -// If your application #defines STRIP_FLAG_HELP to a non-zero value -// before #including this file, we remove the help message from the -// binary file. This can reduce the size of the resulting binary -// somewhat, and may also be useful for security reasons. - -extern GFLAGS_DLL_DECL const char kStrippedFlagHelp[]; - - -} // namespace GFLAGS_NAMESPACE - - -#ifndef SWIG // In swig, ignore the main flag declarations - -#if defined(STRIP_FLAG_HELP) && STRIP_FLAG_HELP > 0 -// Need this construct to avoid the 'defined but not used' warning. -#define MAYBE_STRIPPED_HELP(txt) \ - (false ? (txt) : GFLAGS_NAMESPACE::kStrippedFlagHelp) -#else -#define MAYBE_STRIPPED_HELP(txt) txt -#endif - -// Each command-line flag has two variables associated with it: one -// with the current value, and one with the default value. However, -// we have a third variable, which is where value is assigned; it's a -// constant. This guarantees that FLAG_##value is initialized at -// static initialization time (e.g. before program-start) rather than -// than global construction time (which is after program-start but -// before main), at least when 'value' is a compile-time constant. We -// use a small trick for the "default value" variable, and call it -// FLAGS_no. This serves the second purpose of assuring a -// compile error if someone tries to define a flag named no -// which is illegal (--foo and --nofoo both affect the "foo" flag). -#define DEFINE_VARIABLE(type, shorttype, name, value, help) \ - namespace fL##shorttype { \ - static const type FLAGS_nono##name = value; \ - /* We always want to export defined variables, dll or no */ \ - GFLAGS_DLL_DEFINE_FLAG type FLAGS_##name = FLAGS_nono##name; \ - type FLAGS_no##name = FLAGS_nono##name; \ - static GFLAGS_NAMESPACE::FlagRegisterer o_##name( \ - #name, #type, MAYBE_STRIPPED_HELP(help), __FILE__, \ - &FLAGS_##name, &FLAGS_no##name); \ - } \ - using fL##shorttype::FLAGS_##name - -// For DEFINE_bool, we want to do the extra check that the passed-in -// value is actually a bool, and not a string or something that can be -// coerced to a bool. These declarations (no definition needed!) will -// help us do that, and never evaluate From, which is important. -// We'll use 'sizeof(IsBool(val))' to distinguish. This code requires -// that the compiler have different sizes for bool & double. Since -// this is not guaranteed by the standard, we check it with a -// COMPILE_ASSERT. -namespace fLB { -struct CompileAssert {}; -typedef CompileAssert expected_sizeof_double_neq_sizeof_bool[ - (sizeof(double) != sizeof(bool)) ? 1 : -1]; -template double GFLAGS_DLL_DECL IsBoolFlag(const From& from); -GFLAGS_DLL_DECL bool IsBoolFlag(bool from); -} // namespace fLB - -// Here are the actual DEFINE_*-macros. The respective DECLARE_*-macros -// are in a separate include, gflags_declare.h, for reducing -// the physical transitive size for DECLARE use. -#define DEFINE_bool(name, val, txt) \ - namespace fLB { \ - typedef ::fLB::CompileAssert FLAG_##name##_value_is_not_a_bool[ \ - (sizeof(::fLB::IsBoolFlag(val)) != sizeof(double))? 1: -1]; \ - } \ - DEFINE_VARIABLE(bool, B, name, val, txt) - -#define DEFINE_int32(name, val, txt) \ - DEFINE_VARIABLE(GFLAGS_NAMESPACE::int32, I, \ - name, val, txt) - -#define DEFINE_int64(name, val, txt) \ - DEFINE_VARIABLE(GFLAGS_NAMESPACE::int64, I64, \ - name, val, txt) - -#define DEFINE_uint64(name,val, txt) \ - DEFINE_VARIABLE(GFLAGS_NAMESPACE::uint64, U64, \ - name, val, txt) - -#define DEFINE_double(name, val, txt) \ - DEFINE_VARIABLE(double, D, name, val, txt) - -// Strings are trickier, because they're not a POD, so we can't -// construct them at static-initialization time (instead they get -// constructed at global-constructor time, which is much later). To -// try to avoid crashes in that case, we use a char buffer to store -// the string, which we can static-initialize, and then placement-new -// into it later. It's not perfect, but the best we can do. - -namespace fLS { - -inline clstring* dont_pass0toDEFINE_string(char *stringspot, - const char *value) { - return new(stringspot) clstring(value); -} -inline clstring* dont_pass0toDEFINE_string(char *stringspot, - const clstring &value) { - return new(stringspot) clstring(value); -} -inline clstring* dont_pass0toDEFINE_string(char *stringspot, - int value); -} // namespace fLS - -// We need to define a var named FLAGS_no##name so people don't define -// --string and --nostring. And we need a temporary place to put val -// so we don't have to evaluate it twice. Two great needs that go -// great together! -// The weird 'using' + 'extern' inside the fLS namespace is to work around -// an unknown compiler bug/issue with the gcc 4.2.1 on SUSE 10. See -// http://code.google.com/p/google-gflags/issues/detail?id=20 -#define DEFINE_string(name, val, txt) \ - namespace fLS { \ - using ::fLS::clstring; \ - static union { void* align; char s[sizeof(clstring)]; } s_##name[2]; \ - clstring* const FLAGS_no##name = ::fLS:: \ - dont_pass0toDEFINE_string(s_##name[0].s, \ - val); \ - static GFLAGS_NAMESPACE::FlagRegisterer o_##name( \ - #name, "string", MAYBE_STRIPPED_HELP(txt), __FILE__, \ - s_##name[0].s, new (s_##name[1].s) clstring(*FLAGS_no##name)); \ - extern GFLAGS_DLL_DEFINE_FLAG clstring& FLAGS_##name; \ - using fLS::FLAGS_##name; \ - clstring& FLAGS_##name = *FLAGS_no##name; \ - } \ - using fLS::FLAGS_##name - -#endif // SWIG - - -// Import gflags library symbols into alternative/deprecated namespace(s) -#include "gflags_gflags.h" - - -#endif // GFLAGS_GFLAGS_H_ diff --git a/packager/third_party/gflags/gen/posix/include/gflags/gflags_completions.h b/packager/third_party/gflags/gen/posix/include/gflags/gflags_completions.h deleted file mode 100644 index f951c1e02d..0000000000 --- a/packager/third_party/gflags/gen/posix/include/gflags/gflags_completions.h +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2008, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// --- - -// -// Implement helpful bash-style command line flag completions -// -// ** Functional API: -// HandleCommandLineCompletions() should be called early during -// program startup, but after command line flag code has been -// initialized, such as the beginning of HandleCommandLineHelpFlags(). -// It checks the value of the flag --tab_completion_word. If this -// flag is empty, nothing happens here. If it contains a string, -// however, then HandleCommandLineCompletions() will hijack the -// process, attempting to identify the intention behind this -// completion. Regardless of the outcome of this deduction, the -// process will be terminated, similar to --helpshort flag -// handling. -// -// ** Overview of Bash completions: -// Bash can be told to programatically determine completions for the -// current 'cursor word'. It does this by (in this case) invoking a -// command with some additional arguments identifying the command -// being executed, the word being completed, and the previous word -// (if any). Bash then expects a sequence of output lines to be -// printed to stdout. If these lines all contain a common prefix -// longer than the cursor word, bash will replace the cursor word -// with that common prefix, and display nothing. If there isn't such -// a common prefix, bash will display the lines in pages using 'more'. -// -// ** Strategy taken for command line completions: -// If we can deduce either the exact flag intended, or a common flag -// prefix, we'll output exactly that. Otherwise, if information -// must be displayed to the user, we'll take the opportunity to add -// some helpful information beyond just the flag name (specifically, -// we'll include the default flag value and as much of the flag's -// description as can fit on a single terminal line width, as specified -// by the flag --tab_completion_columns). Furthermore, we'll try to -// make bash order the output such that the most useful or relevent -// flags are the most likely to be shown at the top. -// -// ** Additional features: -// To assist in finding that one really useful flag, substring matching -// was implemented. Before pressing a to get completion for the -// current word, you can append one or more '?' to the flag to do -// substring matching. Here's the semantics: -// --foo Show me all flags with names prefixed by 'foo' -// --foo? Show me all flags with 'foo' somewhere in the name -// --foo?? Same as prior case, but also search in module -// definition path for 'foo' -// --foo??? Same as prior case, but also search in flag -// descriptions for 'foo' -// Finally, we'll trim the output to a relatively small number of -// flags to keep bash quiet about the verbosity of output. If one -// really wanted to see all possible matches, appending a '+' to the -// search word will force the exhaustive list of matches to be printed. -// -// ** How to have bash accept completions from a binary: -// Bash requires that it be informed about each command that programmatic -// completion should be enabled for. Example addition to a .bashrc -// file would be (your path to gflags_completions.sh file may differ): - -/* -$ complete -o bashdefault -o default -o nospace -C \ - '/home/build/eng/bash/bash_completions.sh --tab_completion_columns $COLUMNS' \ - time env binary_name another_binary [...] -*/ - -// This would allow the following to work: -// $ /path/to/binary_name --vmodule -// Or: -// $ ./bin/path/another_binary --gfs_u -// (etc) -// -// Sadly, it appears that bash gives no easy way to force this behavior for -// all commands. That's where the "time" in the above example comes in. -// If you haven't specifically added a command to the list of completion -// supported commands, you can still get completions by prefixing the -// entire command with "env". -// $ env /some/brand/new/binary --vmod -// Assuming that "binary" is a newly compiled binary, this should still -// produce the expected completion output. - - -#ifndef GFLAGS_COMPLETIONS_H_ -#define GFLAGS_COMPLETIONS_H_ - -namespace google { - -extern void HandleCommandLineCompletions(void); - -} - -#endif // GFLAGS_COMPLETIONS_H_ diff --git a/packager/third_party/gflags/gen/posix/include/gflags/gflags_declare.h b/packager/third_party/gflags/gen/posix/include/gflags/gflags_declare.h deleted file mode 100644 index 935a20e775..0000000000 --- a/packager/third_party/gflags/gen/posix/include/gflags/gflags_declare.h +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 1999, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// --- -// -// Revamped and reorganized by Craig Silverstein -// -// This is the file that should be included by any file which declares -// command line flag. - -#ifndef GFLAGS_DECLARE_H_ -#define GFLAGS_DECLARE_H_ - - -// --------------------------------------------------------------------------- -// Namespace of gflags library symbols. -#define GFLAGS_NAMESPACE google - -// --------------------------------------------------------------------------- -// Windows DLL import/export. - -// We always want to import the symbols of the gflags library -#ifndef GFLAGS_DLL_DECL -# if 0 && defined(_MSC_VER) -# define GFLAGS_DLL_DECL __declspec(dllimport) -# else -# define GFLAGS_DLL_DECL -# endif -#endif - -// We always want to import variables declared in user code -#ifndef GFLAGS_DLL_DECLARE_FLAG -# ifdef _MSC_VER -# define GFLAGS_DLL_DECLARE_FLAG __declspec(dllimport) -# else -# define GFLAGS_DLL_DECLARE_FLAG -# endif -#endif - -// --------------------------------------------------------------------------- -// Flag types -#include -#if 1 -# include // the normal place uint32_t is defined -#elif 1 -# include // the normal place u_int32_t is defined -#elif 1 -# include // a third place for uint32_t or u_int32_t -#endif - -namespace GFLAGS_NAMESPACE { - -#if 1 // C99 -typedef int32_t int32; -typedef uint32_t uint32; -typedef int64_t int64; -typedef uint64_t uint64; -#elif 0 // BSD -typedef int32_t int32; -typedef u_int32_t uint32; -typedef int64_t int64; -typedef u_int64_t uint64; -#elif 0 // Windows -typedef __int32 int32; -typedef unsigned __int32 uint32; -typedef __int64 int64; -typedef unsigned __int64 uint64; -#else -# error Do not know how to define a 32-bit integer quantity on your system -#endif - -} // namespace GFLAGS_NAMESPACE - - -namespace fLS { - -// The meaning of "string" might be different between now and when the -// macros below get invoked (e.g., if someone is experimenting with -// other string implementations that get defined after this file is -// included). Save the current meaning now and use it in the macros. -typedef std::string clstring; - -} // namespace fLS - - -#define DECLARE_VARIABLE(type, shorttype, name) \ - /* We always want to import declared variables, dll or no */ \ - namespace fL##shorttype { extern GFLAGS_DLL_DECLARE_FLAG type FLAGS_##name; } \ - using fL##shorttype::FLAGS_##name - -#define DECLARE_bool(name) \ - DECLARE_VARIABLE(bool, B, name) - -#define DECLARE_int32(name) \ - DECLARE_VARIABLE(::GFLAGS_NAMESPACE::int32, I, name) - -#define DECLARE_int64(name) \ - DECLARE_VARIABLE(::GFLAGS_NAMESPACE::int64, I64, name) - -#define DECLARE_uint64(name) \ - DECLARE_VARIABLE(::GFLAGS_NAMESPACE::uint64, U64, name) - -#define DECLARE_double(name) \ - DECLARE_VARIABLE(double, D, name) - -#define DECLARE_string(name) \ - /* We always want to import declared variables, dll or no */ \ - namespace fLS { \ - using ::fLS::clstring; \ - extern GFLAGS_DLL_DECLARE_FLAG ::fLS::clstring& FLAGS_##name; \ - } \ - using fLS::FLAGS_##name - - -#endif // GFLAGS_DECLARE_H_ diff --git a/packager/third_party/gflags/gen/posix/include/gflags/gflags_gflags.h b/packager/third_party/gflags/gen/posix/include/gflags/gflags_gflags.h deleted file mode 100644 index 0c17825dd6..0000000000 --- a/packager/third_party/gflags/gen/posix/include/gflags/gflags_gflags.h +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2014, Andreas Schuh -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// ----------------------------------------------------------------------------- -// Imports the gflags library symbols into an alternative/deprecated namespace. - -#ifndef GFLAGS_GFLAGS_H_ -# error The internal header gflags_gflags.h may only be included by gflags.h -#endif - -#ifndef GFLAGS_NS_GFLAGS_H_ -#define GFLAGS_NS_GFLAGS_H_ - - -namespace gflags { - - -using GFLAGS_NAMESPACE::int32; -using GFLAGS_NAMESPACE::uint32; -using GFLAGS_NAMESPACE::int64; -using GFLAGS_NAMESPACE::uint64; - -using GFLAGS_NAMESPACE::RegisterFlagValidator; -using GFLAGS_NAMESPACE::CommandLineFlagInfo; -using GFLAGS_NAMESPACE::GetAllFlags; -using GFLAGS_NAMESPACE::ShowUsageWithFlags; -using GFLAGS_NAMESPACE::ShowUsageWithFlagsRestrict; -using GFLAGS_NAMESPACE::DescribeOneFlag; -using GFLAGS_NAMESPACE::SetArgv; -using GFLAGS_NAMESPACE::GetArgvs; -using GFLAGS_NAMESPACE::GetArgv; -using GFLAGS_NAMESPACE::GetArgv0; -using GFLAGS_NAMESPACE::GetArgvSum; -using GFLAGS_NAMESPACE::ProgramInvocationName; -using GFLAGS_NAMESPACE::ProgramInvocationShortName; -using GFLAGS_NAMESPACE::ProgramUsage; -using GFLAGS_NAMESPACE::VersionString; -using GFLAGS_NAMESPACE::GetCommandLineOption; -using GFLAGS_NAMESPACE::GetCommandLineFlagInfo; -using GFLAGS_NAMESPACE::GetCommandLineFlagInfoOrDie; -using GFLAGS_NAMESPACE::FlagSettingMode; -using GFLAGS_NAMESPACE::SET_FLAGS_VALUE; -using GFLAGS_NAMESPACE::SET_FLAG_IF_DEFAULT; -using GFLAGS_NAMESPACE::SET_FLAGS_DEFAULT; -using GFLAGS_NAMESPACE::SetCommandLineOption; -using GFLAGS_NAMESPACE::SetCommandLineOptionWithMode; -using GFLAGS_NAMESPACE::FlagSaver; -using GFLAGS_NAMESPACE::CommandlineFlagsIntoString; -using GFLAGS_NAMESPACE::ReadFlagsFromString; -using GFLAGS_NAMESPACE::AppendFlagsIntoFile; -using GFLAGS_NAMESPACE::ReadFromFlagsFile; -using GFLAGS_NAMESPACE::BoolFromEnv; -using GFLAGS_NAMESPACE::Int32FromEnv; -using GFLAGS_NAMESPACE::Int64FromEnv; -using GFLAGS_NAMESPACE::Uint64FromEnv; -using GFLAGS_NAMESPACE::DoubleFromEnv; -using GFLAGS_NAMESPACE::StringFromEnv; -using GFLAGS_NAMESPACE::SetUsageMessage; -using GFLAGS_NAMESPACE::SetVersionString; -using GFLAGS_NAMESPACE::ParseCommandLineNonHelpFlags; -using GFLAGS_NAMESPACE::HandleCommandLineHelpFlags; -using GFLAGS_NAMESPACE::AllowCommandLineReparsing; -using GFLAGS_NAMESPACE::ReparseCommandLineNonHelpFlags; -using GFLAGS_NAMESPACE::ShutDownCommandLineFlags; -using GFLAGS_NAMESPACE::FlagRegisterer; - -#ifndef SWIG -using GFLAGS_NAMESPACE::ParseCommandLineFlags; -#endif - - -} // namespace gflags - - -#endif // GFLAGS_NS_GFLAGS_H_ diff --git a/packager/third_party/gflags/gen/posix/include/private/config.h b/packager/third_party/gflags/gen/posix/include/private/config.h deleted file mode 100644 index 592d61c4c0..0000000000 --- a/packager/third_party/gflags/gen/posix/include/private/config.h +++ /dev/null @@ -1,112 +0,0 @@ -/* Generated from config.h.in during build configuration using CMake. */ - -// Note: This header file is only used internally. It is not part of public interface! - -// --------------------------------------------------------------------------- -// System checks - -// Define if you build this library for a MS Windows OS. -/* #undef OS_WINDOWS */ - -// Define if you have the header file. -#define HAVE_STDINT_H - -// Define if you have the header file. -#define HAVE_SYS_TYPES_H - -// Define if you have the header file. -#define HAVE_INTTYPES_H - -// Define if you have the header file. -#define HAVE_SYS_STAT_H - -// Define if you have the header file. -#define HAVE_UNISTD_H - -// Define if you have the header file. -#define HAVE_FNMATCH_H - -// Define if you have the header file (Windows 2000/XP). -/* #undef HAVE_SHLWAPI_H */ - -// Define if you have the strtoll function. -#define HAVE_STRTOLL - -// Define if you have the strtoq function. -/* #undef HAVE_STRTOQ */ - -// Define if you have the header file. -#define HAVE_PTHREAD - -// Define if your pthread library defines the type pthread_rwlock_t -#define HAVE_RWLOCK - -// gcc requires this to get PRId64, etc. -#if defined(HAVE_INTTYPES_H) && !defined(__STDC_FORMAT_MACROS) -# define __STDC_FORMAT_MACROS 1 -#endif - -// --------------------------------------------------------------------------- -// Package information - -// Name of package. -#define PACKAGE gflags - -// Define to the full name of this package. -#define PACKAGE_NAME gflags - -// Define to the full name and version of this package. -#define PACKAGE_STRING gflags 2.2.0 - -// Define to the one symbol short name of this package. -#define PACKAGE_TARNAME gflags-2.2.0 - -// Define to the version of this package. -#define PACKAGE_VERSION 2.2.0 - -// Version number of package. -#define VERSION PACKAGE_VERSION - -// Define to the address where bug reports for this package should be sent. -#define PACKAGE_BUGREPORT https://github.com/schuhschuh/gflags/issues - -// --------------------------------------------------------------------------- -// Path separator -#ifndef PATH_SEPARATOR -# ifdef OS_WINDOWS -# define PATH_SEPARATOR '\\' -# else -# define PATH_SEPARATOR '/' -# endif -#endif - -// --------------------------------------------------------------------------- -// Windows - -// Whether gflags library is a DLL. -#ifndef GFLAGS_IS_A_DLL -# define GFLAGS_IS_A_DLL 0 -#endif - -// Always export symbols when compiling a shared library as this file is only -// included by internal modules when building the gflags library itself. -// The gflags_declare.h header file will set it to import these symbols otherwise. -#ifndef GFLAGS_DLL_DECL -# if GFLAGS_IS_A_DLL && defined(_MSC_VER) -# define GFLAGS_DLL_DECL __declspec(dllexport) -# else -# define GFLAGS_DLL_DECL -# endif -#endif -// Flags defined by the gflags library itself must be exported -#ifndef GFLAGS_DLL_DEFINE_FLAG -# define GFLAGS_DLL_DEFINE_FLAG GFLAGS_DLL_DECL -#endif - -#ifdef OS_WINDOWS -// The unittests import the symbols of the shared gflags library -# if GFLAGS_IS_A_DLL && defined(_MSC_VER) -# define GFLAGS_DLL_DECL_FOR_UNITTESTS __declspec(dllimport) -# endif -# include "windows_port.h" -#endif diff --git a/packager/third_party/gflags/gen/win/include/gflags/gflags.h b/packager/third_party/gflags/gen/win/include/gflags/gflags.h deleted file mode 100644 index 357eec6be7..0000000000 --- a/packager/third_party/gflags/gen/win/include/gflags/gflags.h +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright (c) 2006, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// --- -// Revamped and reorganized by Craig Silverstein -// -// This is the file that should be included by any file which declares -// or defines a command line flag or wants to parse command line flags -// or print a program usage message (which will include information about -// flags). Executive summary, in the form of an example foo.cc file: -// -// #include "foo.h" // foo.h has a line "DECLARE_int32(start);" -// #include "validators.h" // hypothetical file defining ValidateIsFile() -// -// DEFINE_int32(end, 1000, "The last record to read"); -// -// DEFINE_string(filename, "my_file.txt", "The file to read"); -// // Crash if the specified file does not exist. -// static bool dummy = RegisterFlagValidator(&FLAGS_filename, -// &ValidateIsFile); -// -// DECLARE_bool(verbose); // some other file has a DEFINE_bool(verbose, ...) -// -// void MyFunc() { -// if (FLAGS_verbose) printf("Records %d-%d\n", FLAGS_start, FLAGS_end); -// } -// -// Then, at the command-line: -// ./foo --noverbose --start=5 --end=100 -// -// For more details, see -// doc/gflags.html -// -// --- A note about thread-safety: -// -// We describe many functions in this routine as being thread-hostile, -// thread-compatible, or thread-safe. Here are the meanings we use: -// -// thread-safe: it is safe for multiple threads to call this routine -// (or, when referring to a class, methods of this class) -// concurrently. -// thread-hostile: it is not safe for multiple threads to call this -// routine (or methods of this class) concurrently. In gflags, -// most thread-hostile routines are intended to be called early in, -// or even before, main() -- that is, before threads are spawned. -// thread-compatible: it is safe for multiple threads to read from -// this variable (when applied to variables), or to call const -// methods of this class (when applied to classes), as long as no -// other thread is writing to the variable or calling non-const -// methods of this class. - -#ifndef GFLAGS_GFLAGS_H_ -#define GFLAGS_GFLAGS_H_ - -#include -#include - -#include "gflags_declare.h" // IWYU pragma: export - - -// We always want to export variables defined in user code -#ifndef GFLAGS_DLL_DEFINE_FLAG -# ifdef _MSC_VER -# define GFLAGS_DLL_DEFINE_FLAG __declspec(dllexport) -# else -# define GFLAGS_DLL_DEFINE_FLAG -# endif -#endif - - -namespace GFLAGS_NAMESPACE { - - -// -------------------------------------------------------------------- -// To actually define a flag in a file, use DEFINE_bool, -// DEFINE_string, etc. at the bottom of this file. You may also find -// it useful to register a validator with the flag. This ensures that -// when the flag is parsed from the commandline, or is later set via -// SetCommandLineOption, we call the validation function. It is _not_ -// called when you assign the value to the flag directly using the = operator. -// -// The validation function should return true if the flag value is valid, and -// false otherwise. If the function returns false for the new setting of the -// flag, the flag will retain its current value. If it returns false for the -// default value, ParseCommandLineFlags() will die. -// -// This function is safe to call at global construct time (as in the -// example below). -// -// Example use: -// static bool ValidatePort(const char* flagname, int32 value) { -// if (value > 0 && value < 32768) // value is ok -// return true; -// printf("Invalid value for --%s: %d\n", flagname, (int)value); -// return false; -// } -// DEFINE_int32(port, 0, "What port to listen on"); -// static bool dummy = RegisterFlagValidator(&FLAGS_port, &ValidatePort); - -// Returns true if successfully registered, false if not (because the -// first argument doesn't point to a command-line flag, or because a -// validator is already registered for this flag). -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const bool* flag, bool (*validate_fn)(const char*, bool)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const int32* flag, bool (*validate_fn)(const char*, int32)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const int64* flag, bool (*validate_fn)(const char*, int64)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const uint64* flag, bool (*validate_fn)(const char*, uint64)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const double* flag, bool (*validate_fn)(const char*, double)); -extern GFLAGS_DLL_DECL bool RegisterFlagValidator(const std::string* flag, bool (*validate_fn)(const char*, const std::string&)); - -// Convenience macro for the registration of a flag validator -#define DEFINE_validator(name, validator) \ - static const bool name##_validator_registered = \ - GFLAGS_NAMESPACE::RegisterFlagValidator(&FLAGS_##name, validator) - - -// -------------------------------------------------------------------- -// These methods are the best way to get access to info about the -// list of commandline flags. Note that these routines are pretty slow. -// GetAllFlags: mostly-complete info about the list, sorted by file. -// ShowUsageWithFlags: pretty-prints the list to stdout (what --help does) -// ShowUsageWithFlagsRestrict: limit to filenames with restrict as a substr -// -// In addition to accessing flags, you can also access argv[0] (the program -// name) and argv (the entire commandline), which we sock away a copy of. -// These variables are static, so you should only set them once. -// -// No need to export this data only structure from DLL, avoiding VS warning 4251. -struct CommandLineFlagInfo { - std::string name; // the name of the flag - std::string type; // the type of the flag: int32, etc - std::string description; // the "help text" associated with the flag - std::string current_value; // the current value, as a string - std::string default_value; // the default value, as a string - std::string filename; // 'cleaned' version of filename holding the flag - bool has_validator_fn; // true if RegisterFlagValidator called on this flag - bool is_default; // true if the flag has the default value and - // has not been set explicitly from the cmdline - // or via SetCommandLineOption - const void* flag_ptr; // pointer to the flag's current value (i.e. FLAGS_foo) -}; - -// Using this inside of a validator is a recipe for a deadlock. -// TODO(user) Fix locking when validators are running, to make it safe to -// call validators during ParseAllFlags. -// Also make sure then to uncomment the corresponding unit test in -// gflags_unittest.sh -extern GFLAGS_DLL_DECL void GetAllFlags(std::vector* OUTPUT); -// These two are actually defined in gflags_reporting.cc. -extern GFLAGS_DLL_DECL void ShowUsageWithFlags(const char *argv0); // what --help does -extern GFLAGS_DLL_DECL void ShowUsageWithFlagsRestrict(const char *argv0, const char *restrict); - -// Create a descriptive string for a flag. -// Goes to some trouble to make pretty line breaks. -extern GFLAGS_DLL_DECL std::string DescribeOneFlag(const CommandLineFlagInfo& flag); - -// Thread-hostile; meant to be called before any threads are spawned. -extern GFLAGS_DLL_DECL void SetArgv(int argc, const char** argv); - -// The following functions are thread-safe as long as SetArgv() is -// only called before any threads start. -extern GFLAGS_DLL_DECL const std::vector& GetArgvs(); -extern GFLAGS_DLL_DECL const char* GetArgv(); // all of argv as a string -extern GFLAGS_DLL_DECL const char* GetArgv0(); // only argv0 -extern GFLAGS_DLL_DECL uint32 GetArgvSum(); // simple checksum of argv -extern GFLAGS_DLL_DECL const char* ProgramInvocationName(); // argv0, or "UNKNOWN" if not set -extern GFLAGS_DLL_DECL const char* ProgramInvocationShortName(); // basename(argv0) - -// ProgramUsage() is thread-safe as long as SetUsageMessage() is only -// called before any threads start. -extern GFLAGS_DLL_DECL const char* ProgramUsage(); // string set by SetUsageMessage() - -// VersionString() is thread-safe as long as SetVersionString() is only -// called before any threads start. -extern GFLAGS_DLL_DECL const char* VersionString(); // string set by SetVersionString() - - - -// -------------------------------------------------------------------- -// Normally you access commandline flags by just saying "if (FLAGS_foo)" -// or whatever, and set them by calling "FLAGS_foo = bar" (or, more -// commonly, via the DEFINE_foo macro). But if you need a bit more -// control, we have programmatic ways to get/set the flags as well. -// These programmatic ways to access flags are thread-safe, but direct -// access is only thread-compatible. - -// Return true iff the flagname was found. -// OUTPUT is set to the flag's value, or unchanged if we return false. -extern GFLAGS_DLL_DECL bool GetCommandLineOption(const char* name, std::string* OUTPUT); - -// Return true iff the flagname was found. OUTPUT is set to the flag's -// CommandLineFlagInfo or unchanged if we return false. -extern GFLAGS_DLL_DECL bool GetCommandLineFlagInfo(const char* name, CommandLineFlagInfo* OUTPUT); - -// Return the CommandLineFlagInfo of the flagname. exit() if name not found. -// Example usage, to check if a flag's value is currently the default value: -// if (GetCommandLineFlagInfoOrDie("foo").is_default) ... -extern GFLAGS_DLL_DECL CommandLineFlagInfo GetCommandLineFlagInfoOrDie(const char* name); - -enum GFLAGS_DLL_DECL FlagSettingMode { - // update the flag's value (can call this multiple times). - SET_FLAGS_VALUE, - // update the flag's value, but *only if* it has not yet been updated - // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef". - SET_FLAG_IF_DEFAULT, - // set the flag's default value to this. If the flag has not yet updated - // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef") - // change the flag's current value to the new default value as well. - SET_FLAGS_DEFAULT -}; - -// Set a particular flag ("command line option"). Returns a string -// describing the new value that the option has been set to. The -// return value API is not well-specified, so basically just depend on -// it to be empty if the setting failed for some reason -- the name is -// not a valid flag name, or the value is not a valid value -- and -// non-empty else. - -// SetCommandLineOption uses set_mode == SET_FLAGS_VALUE (the common case) -extern GFLAGS_DLL_DECL std::string SetCommandLineOption (const char* name, const char* value); -extern GFLAGS_DLL_DECL std::string SetCommandLineOptionWithMode(const char* name, const char* value, FlagSettingMode set_mode); - - -// -------------------------------------------------------------------- -// Saves the states (value, default value, whether the user has set -// the flag, registered validators, etc) of all flags, and restores -// them when the FlagSaver is destroyed. This is very useful in -// tests, say, when you want to let your tests change the flags, but -// make sure that they get reverted to the original states when your -// test is complete. -// -// Example usage: -// void TestFoo() { -// FlagSaver s1; -// FLAG_foo = false; -// FLAG_bar = "some value"; -// -// // test happens here. You can return at any time -// // without worrying about restoring the FLAG values. -// } -// -// Note: This class is marked with GFLAGS_ATTRIBUTE_UNUSED because all -// the work is done in the constructor and destructor, so in the standard -// usage example above, the compiler would complain that it's an -// unused variable. -// -// This class is thread-safe. However, its destructor writes to -// exactly the set of flags that have changed value during its -// lifetime, so concurrent _direct_ access to those flags -// (i.e. FLAGS_foo instead of {Get,Set}CommandLineOption()) is unsafe. - -class GFLAGS_DLL_DECL FlagSaver { - public: - FlagSaver(); - ~FlagSaver(); - - private: - class FlagSaverImpl* impl_; // we use pimpl here to keep API steady - - FlagSaver(const FlagSaver&); // no copying! - void operator=(const FlagSaver&); -}; - -// -------------------------------------------------------------------- -// Some deprecated or hopefully-soon-to-be-deprecated functions. - -// This is often used for logging. TODO(csilvers): figure out a better way -extern GFLAGS_DLL_DECL std::string CommandlineFlagsIntoString(); -// Usually where this is used, a FlagSaver should be used instead. -extern GFLAGS_DLL_DECL -bool ReadFlagsFromString(const std::string& flagfilecontents, - const char* prog_name, - bool errors_are_fatal); // uses SET_FLAGS_VALUE - -// These let you manually implement --flagfile functionality. -// DEPRECATED. -extern GFLAGS_DLL_DECL bool AppendFlagsIntoFile(const std::string& filename, const char* prog_name); -extern GFLAGS_DLL_DECL bool ReadFromFlagsFile(const std::string& filename, const char* prog_name, bool errors_are_fatal); // uses SET_FLAGS_VALUE - - -// -------------------------------------------------------------------- -// Useful routines for initializing flags from the environment. -// In each case, if 'varname' does not exist in the environment -// return defval. If 'varname' does exist but is not valid -// (e.g., not a number for an int32 flag), abort with an error. -// Otherwise, return the value. NOTE: for booleans, for true use -// 't' or 'T' or 'true' or '1', for false 'f' or 'F' or 'false' or '0'. - -extern GFLAGS_DLL_DECL bool BoolFromEnv(const char *varname, bool defval); -extern GFLAGS_DLL_DECL int32 Int32FromEnv(const char *varname, int32 defval); -extern GFLAGS_DLL_DECL int64 Int64FromEnv(const char *varname, int64 defval); -extern GFLAGS_DLL_DECL uint64 Uint64FromEnv(const char *varname, uint64 defval); -extern GFLAGS_DLL_DECL double DoubleFromEnv(const char *varname, double defval); -extern GFLAGS_DLL_DECL const char *StringFromEnv(const char *varname, const char *defval); - - -// -------------------------------------------------------------------- -// The next two functions parse gflags from main(): - -// Set the "usage" message for this program. For example: -// string usage("This program does nothing. Sample usage:\n"); -// usage += argv[0] + " "; -// SetUsageMessage(usage); -// Do not include commandline flags in the usage: we do that for you! -// Thread-hostile; meant to be called before any threads are spawned. -extern GFLAGS_DLL_DECL void SetUsageMessage(const std::string& usage); - -// Sets the version string, which is emitted with --version. -// For instance: SetVersionString("1.3"); -// Thread-hostile; meant to be called before any threads are spawned. -extern GFLAGS_DLL_DECL void SetVersionString(const std::string& version); - - -// Looks for flags in argv and parses them. Rearranges argv to put -// flags first, or removes them entirely if remove_flags is true. -// If a flag is defined more than once in the command line or flag -// file, the last definition is used. Returns the index (into argv) -// of the first non-flag argument. -// See top-of-file for more details on this function. -#ifndef SWIG // In swig, use ParseCommandLineFlagsScript() instead. -extern GFLAGS_DLL_DECL uint32 ParseCommandLineFlags(int *argc, char*** argv, bool remove_flags); -#endif - - -// Calls to ParseCommandLineNonHelpFlags and then to -// HandleCommandLineHelpFlags can be used instead of a call to -// ParseCommandLineFlags during initialization, in order to allow for -// changing default values for some FLAGS (via -// e.g. SetCommandLineOptionWithMode calls) between the time of -// command line parsing and the time of dumping help information for -// the flags as a result of command line parsing. If a flag is -// defined more than once in the command line or flag file, the last -// definition is used. Returns the index (into argv) of the first -// non-flag argument. (If remove_flags is true, will always return 1.) -extern GFLAGS_DLL_DECL uint32 ParseCommandLineNonHelpFlags(int *argc, char*** argv, bool remove_flags); - -// This is actually defined in gflags_reporting.cc. -// This function is misnamed (it also handles --version, etc.), but -// it's too late to change that now. :-( -extern GFLAGS_DLL_DECL void HandleCommandLineHelpFlags(); // in gflags_reporting.cc - -// Allow command line reparsing. Disables the error normally -// generated when an unknown flag is found, since it may be found in a -// later parse. Thread-hostile; meant to be called before any threads -// are spawned. -extern GFLAGS_DLL_DECL void AllowCommandLineReparsing(); - -// Reparse the flags that have not yet been recognized. Only flags -// registered since the last parse will be recognized. Any flag value -// must be provided as part of the argument using "=", not as a -// separate command line argument that follows the flag argument. -// Intended for handling flags from dynamically loaded libraries, -// since their flags are not registered until they are loaded. -extern GFLAGS_DLL_DECL void ReparseCommandLineNonHelpFlags(); - -// Clean up memory allocated by flags. This is only needed to reduce -// the quantity of "potentially leaked" reports emitted by memory -// debugging tools such as valgrind. It is not required for normal -// operation, or for the google perftools heap-checker. It must only -// be called when the process is about to exit, and all threads that -// might access flags are quiescent. Referencing flags after this is -// called will have unexpected consequences. This is not safe to run -// when multiple threads might be running: the function is -// thread-hostile. -extern GFLAGS_DLL_DECL void ShutDownCommandLineFlags(); - - -// -------------------------------------------------------------------- -// Now come the command line flag declaration/definition macros that -// will actually be used. They're kind of hairy. A major reason -// for this is initialization: we want people to be able to access -// variables in global constructors and have that not crash, even if -// their global constructor runs before the global constructor here. -// (Obviously, we can't guarantee the flags will have the correct -// default value in that case, but at least accessing them is safe.) -// The only way to do that is have flags point to a static buffer. -// So we make one, using a union to ensure proper alignment, and -// then use placement-new to actually set up the flag with the -// correct default value. In the same vein, we have to worry about -// flag access in global destructors, so FlagRegisterer has to be -// careful never to destroy the flag-values it constructs. -// -// Note that when we define a flag variable FLAGS_, we also -// preemptively define a junk variable, FLAGS_no. This is to -// cause a link-time error if someone tries to define 2 flags with -// names like "logging" and "nologging". We do this because a bool -// flag FLAG can be set from the command line to true with a "-FLAG" -// argument, and to false with a "-noFLAG" argument, and so this can -// potentially avert confusion. -// -// We also put flags into their own namespace. It is purposefully -// named in an opaque way that people should have trouble typing -// directly. The idea is that DEFINE puts the flag in the weird -// namespace, and DECLARE imports the flag from there into the current -// namespace. The net result is to force people to use DECLARE to get -// access to a flag, rather than saying "extern GFLAGS_DLL_DECL bool FLAGS_whatever;" -// or some such instead. We want this so we can put extra -// functionality (like sanity-checking) in DECLARE if we want, and -// make sure it is picked up everywhere. -// -// We also put the type of the variable in the namespace, so that -// people can't DECLARE_int32 something that they DEFINE_bool'd -// elsewhere. - -class GFLAGS_DLL_DECL FlagRegisterer { - public: - FlagRegisterer(const char* name, const char* type, - const char* help, const char* filename, - void* current_storage, void* defvalue_storage); -}; - -// If your application #defines STRIP_FLAG_HELP to a non-zero value -// before #including this file, we remove the help message from the -// binary file. This can reduce the size of the resulting binary -// somewhat, and may also be useful for security reasons. - -extern GFLAGS_DLL_DECL const char kStrippedFlagHelp[]; - - -} // namespace GFLAGS_NAMESPACE - - -#ifndef SWIG // In swig, ignore the main flag declarations - -#if defined(STRIP_FLAG_HELP) && STRIP_FLAG_HELP > 0 -// Need this construct to avoid the 'defined but not used' warning. -#define MAYBE_STRIPPED_HELP(txt) \ - (false ? (txt) : GFLAGS_NAMESPACE::kStrippedFlagHelp) -#else -#define MAYBE_STRIPPED_HELP(txt) txt -#endif - -// Each command-line flag has two variables associated with it: one -// with the current value, and one with the default value. However, -// we have a third variable, which is where value is assigned; it's a -// constant. This guarantees that FLAG_##value is initialized at -// static initialization time (e.g. before program-start) rather than -// than global construction time (which is after program-start but -// before main), at least when 'value' is a compile-time constant. We -// use a small trick for the "default value" variable, and call it -// FLAGS_no. This serves the second purpose of assuring a -// compile error if someone tries to define a flag named no -// which is illegal (--foo and --nofoo both affect the "foo" flag). -#define DEFINE_VARIABLE(type, shorttype, name, value, help) \ - namespace fL##shorttype { \ - static const type FLAGS_nono##name = value; \ - /* We always want to export defined variables, dll or no */ \ - GFLAGS_DLL_DEFINE_FLAG type FLAGS_##name = FLAGS_nono##name; \ - type FLAGS_no##name = FLAGS_nono##name; \ - static GFLAGS_NAMESPACE::FlagRegisterer o_##name( \ - #name, #type, MAYBE_STRIPPED_HELP(help), __FILE__, \ - &FLAGS_##name, &FLAGS_no##name); \ - } \ - using fL##shorttype::FLAGS_##name - -// For DEFINE_bool, we want to do the extra check that the passed-in -// value is actually a bool, and not a string or something that can be -// coerced to a bool. These declarations (no definition needed!) will -// help us do that, and never evaluate From, which is important. -// We'll use 'sizeof(IsBool(val))' to distinguish. This code requires -// that the compiler have different sizes for bool & double. Since -// this is not guaranteed by the standard, we check it with a -// COMPILE_ASSERT. -namespace fLB { -struct CompileAssert {}; -typedef CompileAssert expected_sizeof_double_neq_sizeof_bool[ - (sizeof(double) != sizeof(bool)) ? 1 : -1]; -template double GFLAGS_DLL_DECL IsBoolFlag(const From& from); -GFLAGS_DLL_DECL bool IsBoolFlag(bool from); -} // namespace fLB - -// Here are the actual DEFINE_*-macros. The respective DECLARE_*-macros -// are in a separate include, gflags_declare.h, for reducing -// the physical transitive size for DECLARE use. -#define DEFINE_bool(name, val, txt) \ - namespace fLB { \ - typedef ::fLB::CompileAssert FLAG_##name##_value_is_not_a_bool[ \ - (sizeof(::fLB::IsBoolFlag(val)) != sizeof(double))? 1: -1]; \ - } \ - DEFINE_VARIABLE(bool, B, name, val, txt) - -#define DEFINE_int32(name, val, txt) \ - DEFINE_VARIABLE(GFLAGS_NAMESPACE::int32, I, \ - name, val, txt) - -#define DEFINE_int64(name, val, txt) \ - DEFINE_VARIABLE(GFLAGS_NAMESPACE::int64, I64, \ - name, val, txt) - -#define DEFINE_uint64(name,val, txt) \ - DEFINE_VARIABLE(GFLAGS_NAMESPACE::uint64, U64, \ - name, val, txt) - -#define DEFINE_double(name, val, txt) \ - DEFINE_VARIABLE(double, D, name, val, txt) - -// Strings are trickier, because they're not a POD, so we can't -// construct them at static-initialization time (instead they get -// constructed at global-constructor time, which is much later). To -// try to avoid crashes in that case, we use a char buffer to store -// the string, which we can static-initialize, and then placement-new -// into it later. It's not perfect, but the best we can do. - -namespace fLS { - -inline clstring* dont_pass0toDEFINE_string(char *stringspot, - const char *value) { - return new(stringspot) clstring(value); -} -inline clstring* dont_pass0toDEFINE_string(char *stringspot, - const clstring &value) { - return new(stringspot) clstring(value); -} -inline clstring* dont_pass0toDEFINE_string(char *stringspot, - int value); -} // namespace fLS - -// We need to define a var named FLAGS_no##name so people don't define -// --string and --nostring. And we need a temporary place to put val -// so we don't have to evaluate it twice. Two great needs that go -// great together! -// The weird 'using' + 'extern' inside the fLS namespace is to work around -// an unknown compiler bug/issue with the gcc 4.2.1 on SUSE 10. See -// http://code.google.com/p/google-gflags/issues/detail?id=20 -#define DEFINE_string(name, val, txt) \ - namespace fLS { \ - using ::fLS::clstring; \ - static union { void* align; char s[sizeof(clstring)]; } s_##name[2]; \ - clstring* const FLAGS_no##name = ::fLS:: \ - dont_pass0toDEFINE_string(s_##name[0].s, \ - val); \ - static GFLAGS_NAMESPACE::FlagRegisterer o_##name( \ - #name, "string", MAYBE_STRIPPED_HELP(txt), __FILE__, \ - s_##name[0].s, new (s_##name[1].s) clstring(*FLAGS_no##name)); \ - extern GFLAGS_DLL_DEFINE_FLAG clstring& FLAGS_##name; \ - using fLS::FLAGS_##name; \ - clstring& FLAGS_##name = *FLAGS_no##name; \ - } \ - using fLS::FLAGS_##name - -#endif // SWIG - - -// Import gflags library symbols into alternative/deprecated namespace(s) -#include "gflags_gflags.h" - - -#endif // GFLAGS_GFLAGS_H_ diff --git a/packager/third_party/gflags/gen/win/include/gflags/gflags_completions.h b/packager/third_party/gflags/gen/win/include/gflags/gflags_completions.h deleted file mode 100644 index f951c1e02d..0000000000 --- a/packager/third_party/gflags/gen/win/include/gflags/gflags_completions.h +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2008, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// --- - -// -// Implement helpful bash-style command line flag completions -// -// ** Functional API: -// HandleCommandLineCompletions() should be called early during -// program startup, but after command line flag code has been -// initialized, such as the beginning of HandleCommandLineHelpFlags(). -// It checks the value of the flag --tab_completion_word. If this -// flag is empty, nothing happens here. If it contains a string, -// however, then HandleCommandLineCompletions() will hijack the -// process, attempting to identify the intention behind this -// completion. Regardless of the outcome of this deduction, the -// process will be terminated, similar to --helpshort flag -// handling. -// -// ** Overview of Bash completions: -// Bash can be told to programatically determine completions for the -// current 'cursor word'. It does this by (in this case) invoking a -// command with some additional arguments identifying the command -// being executed, the word being completed, and the previous word -// (if any). Bash then expects a sequence of output lines to be -// printed to stdout. If these lines all contain a common prefix -// longer than the cursor word, bash will replace the cursor word -// with that common prefix, and display nothing. If there isn't such -// a common prefix, bash will display the lines in pages using 'more'. -// -// ** Strategy taken for command line completions: -// If we can deduce either the exact flag intended, or a common flag -// prefix, we'll output exactly that. Otherwise, if information -// must be displayed to the user, we'll take the opportunity to add -// some helpful information beyond just the flag name (specifically, -// we'll include the default flag value and as much of the flag's -// description as can fit on a single terminal line width, as specified -// by the flag --tab_completion_columns). Furthermore, we'll try to -// make bash order the output such that the most useful or relevent -// flags are the most likely to be shown at the top. -// -// ** Additional features: -// To assist in finding that one really useful flag, substring matching -// was implemented. Before pressing a to get completion for the -// current word, you can append one or more '?' to the flag to do -// substring matching. Here's the semantics: -// --foo Show me all flags with names prefixed by 'foo' -// --foo? Show me all flags with 'foo' somewhere in the name -// --foo?? Same as prior case, but also search in module -// definition path for 'foo' -// --foo??? Same as prior case, but also search in flag -// descriptions for 'foo' -// Finally, we'll trim the output to a relatively small number of -// flags to keep bash quiet about the verbosity of output. If one -// really wanted to see all possible matches, appending a '+' to the -// search word will force the exhaustive list of matches to be printed. -// -// ** How to have bash accept completions from a binary: -// Bash requires that it be informed about each command that programmatic -// completion should be enabled for. Example addition to a .bashrc -// file would be (your path to gflags_completions.sh file may differ): - -/* -$ complete -o bashdefault -o default -o nospace -C \ - '/home/build/eng/bash/bash_completions.sh --tab_completion_columns $COLUMNS' \ - time env binary_name another_binary [...] -*/ - -// This would allow the following to work: -// $ /path/to/binary_name --vmodule -// Or: -// $ ./bin/path/another_binary --gfs_u -// (etc) -// -// Sadly, it appears that bash gives no easy way to force this behavior for -// all commands. That's where the "time" in the above example comes in. -// If you haven't specifically added a command to the list of completion -// supported commands, you can still get completions by prefixing the -// entire command with "env". -// $ env /some/brand/new/binary --vmod -// Assuming that "binary" is a newly compiled binary, this should still -// produce the expected completion output. - - -#ifndef GFLAGS_COMPLETIONS_H_ -#define GFLAGS_COMPLETIONS_H_ - -namespace google { - -extern void HandleCommandLineCompletions(void); - -} - -#endif // GFLAGS_COMPLETIONS_H_ diff --git a/packager/third_party/gflags/gen/win/include/gflags/gflags_declare.h b/packager/third_party/gflags/gen/win/include/gflags/gflags_declare.h deleted file mode 100644 index fbc8466fd4..0000000000 --- a/packager/third_party/gflags/gen/win/include/gflags/gflags_declare.h +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 1999, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// --- -// -// Revamped and reorganized by Craig Silverstein -// -// This is the file that should be included by any file which declares -// command line flag. - -#ifndef GFLAGS_DECLARE_H_ -#define GFLAGS_DECLARE_H_ - - -// --------------------------------------------------------------------------- -// Namespace of gflags library symbols. -#define GFLAGS_NAMESPACE google - -// --------------------------------------------------------------------------- -// Windows DLL import/export. - -// We always want to import the symbols of the gflags library -#ifndef GFLAGS_DLL_DECL -# if 0 && defined(_MSC_VER) -# define GFLAGS_DLL_DECL __declspec(dllimport) -# else -# define GFLAGS_DLL_DECL -# endif -#endif - -// We always want to import variables declared in user code -#ifndef GFLAGS_DLL_DECLARE_FLAG -# ifdef _MSC_VER -# define GFLAGS_DLL_DECLARE_FLAG __declspec(dllimport) -# else -# define GFLAGS_DLL_DECLARE_FLAG -# endif -#endif - -// --------------------------------------------------------------------------- -// Flag types -#include -#if 1 -# include // the normal place uint32_t is defined -#elif 1 -# include // the normal place u_int32_t is defined -#elif 0 -# include // a third place for uint32_t or u_int32_t -#endif - -namespace GFLAGS_NAMESPACE { - -#if 0 // C99 -typedef int32_t int32; -typedef uint32_t uint32; -typedef int64_t int64; -typedef uint64_t uint64; -#elif 0 // BSD -typedef int32_t int32; -typedef u_int32_t uint32; -typedef int64_t int64; -typedef u_int64_t uint64; -#elif 1 // Windows -typedef __int32 int32; -typedef unsigned __int32 uint32; -typedef __int64 int64; -typedef unsigned __int64 uint64; -#else -# error Do not know how to define a 32-bit integer quantity on your system -#endif - -} // namespace GFLAGS_NAMESPACE - - -namespace fLS { - -// The meaning of "string" might be different between now and when the -// macros below get invoked (e.g., if someone is experimenting with -// other string implementations that get defined after this file is -// included). Save the current meaning now and use it in the macros. -typedef std::string clstring; - -} // namespace fLS - - -#define DECLARE_VARIABLE(type, shorttype, name) \ - /* We always want to import declared variables, dll or no */ \ - namespace fL##shorttype { extern GFLAGS_DLL_DECLARE_FLAG type FLAGS_##name; } \ - using fL##shorttype::FLAGS_##name - -#define DECLARE_bool(name) \ - DECLARE_VARIABLE(bool, B, name) - -#define DECLARE_int32(name) \ - DECLARE_VARIABLE(::GFLAGS_NAMESPACE::int32, I, name) - -#define DECLARE_int64(name) \ - DECLARE_VARIABLE(::GFLAGS_NAMESPACE::int64, I64, name) - -#define DECLARE_uint64(name) \ - DECLARE_VARIABLE(::GFLAGS_NAMESPACE::uint64, U64, name) - -#define DECLARE_double(name) \ - DECLARE_VARIABLE(double, D, name) - -#define DECLARE_string(name) \ - /* We always want to import declared variables, dll or no */ \ - namespace fLS { \ - using ::fLS::clstring; \ - extern GFLAGS_DLL_DECLARE_FLAG ::fLS::clstring& FLAGS_##name; \ - } \ - using fLS::FLAGS_##name - - -#endif // GFLAGS_DECLARE_H_ diff --git a/packager/third_party/gflags/gen/win/include/gflags/gflags_gflags.h b/packager/third_party/gflags/gen/win/include/gflags/gflags_gflags.h deleted file mode 100644 index 0c17825dd6..0000000000 --- a/packager/third_party/gflags/gen/win/include/gflags/gflags_gflags.h +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2014, Andreas Schuh -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// ----------------------------------------------------------------------------- -// Imports the gflags library symbols into an alternative/deprecated namespace. - -#ifndef GFLAGS_GFLAGS_H_ -# error The internal header gflags_gflags.h may only be included by gflags.h -#endif - -#ifndef GFLAGS_NS_GFLAGS_H_ -#define GFLAGS_NS_GFLAGS_H_ - - -namespace gflags { - - -using GFLAGS_NAMESPACE::int32; -using GFLAGS_NAMESPACE::uint32; -using GFLAGS_NAMESPACE::int64; -using GFLAGS_NAMESPACE::uint64; - -using GFLAGS_NAMESPACE::RegisterFlagValidator; -using GFLAGS_NAMESPACE::CommandLineFlagInfo; -using GFLAGS_NAMESPACE::GetAllFlags; -using GFLAGS_NAMESPACE::ShowUsageWithFlags; -using GFLAGS_NAMESPACE::ShowUsageWithFlagsRestrict; -using GFLAGS_NAMESPACE::DescribeOneFlag; -using GFLAGS_NAMESPACE::SetArgv; -using GFLAGS_NAMESPACE::GetArgvs; -using GFLAGS_NAMESPACE::GetArgv; -using GFLAGS_NAMESPACE::GetArgv0; -using GFLAGS_NAMESPACE::GetArgvSum; -using GFLAGS_NAMESPACE::ProgramInvocationName; -using GFLAGS_NAMESPACE::ProgramInvocationShortName; -using GFLAGS_NAMESPACE::ProgramUsage; -using GFLAGS_NAMESPACE::VersionString; -using GFLAGS_NAMESPACE::GetCommandLineOption; -using GFLAGS_NAMESPACE::GetCommandLineFlagInfo; -using GFLAGS_NAMESPACE::GetCommandLineFlagInfoOrDie; -using GFLAGS_NAMESPACE::FlagSettingMode; -using GFLAGS_NAMESPACE::SET_FLAGS_VALUE; -using GFLAGS_NAMESPACE::SET_FLAG_IF_DEFAULT; -using GFLAGS_NAMESPACE::SET_FLAGS_DEFAULT; -using GFLAGS_NAMESPACE::SetCommandLineOption; -using GFLAGS_NAMESPACE::SetCommandLineOptionWithMode; -using GFLAGS_NAMESPACE::FlagSaver; -using GFLAGS_NAMESPACE::CommandlineFlagsIntoString; -using GFLAGS_NAMESPACE::ReadFlagsFromString; -using GFLAGS_NAMESPACE::AppendFlagsIntoFile; -using GFLAGS_NAMESPACE::ReadFromFlagsFile; -using GFLAGS_NAMESPACE::BoolFromEnv; -using GFLAGS_NAMESPACE::Int32FromEnv; -using GFLAGS_NAMESPACE::Int64FromEnv; -using GFLAGS_NAMESPACE::Uint64FromEnv; -using GFLAGS_NAMESPACE::DoubleFromEnv; -using GFLAGS_NAMESPACE::StringFromEnv; -using GFLAGS_NAMESPACE::SetUsageMessage; -using GFLAGS_NAMESPACE::SetVersionString; -using GFLAGS_NAMESPACE::ParseCommandLineNonHelpFlags; -using GFLAGS_NAMESPACE::HandleCommandLineHelpFlags; -using GFLAGS_NAMESPACE::AllowCommandLineReparsing; -using GFLAGS_NAMESPACE::ReparseCommandLineNonHelpFlags; -using GFLAGS_NAMESPACE::ShutDownCommandLineFlags; -using GFLAGS_NAMESPACE::FlagRegisterer; - -#ifndef SWIG -using GFLAGS_NAMESPACE::ParseCommandLineFlags; -#endif - - -} // namespace gflags - - -#endif // GFLAGS_NS_GFLAGS_H_ diff --git a/packager/third_party/gflags/gen/win/include/private/config.h b/packager/third_party/gflags/gen/win/include/private/config.h deleted file mode 100644 index d541580eab..0000000000 --- a/packager/third_party/gflags/gen/win/include/private/config.h +++ /dev/null @@ -1,112 +0,0 @@ -/* Generated from config.h.in during build configuration using CMake. */ - -// Note: This header file is only used internally. It is not part of public interface! - -// --------------------------------------------------------------------------- -// System checks - -// Define if you build this library for a MS Windows OS. -#define OS_WINDOWS - -// Define if you have the header file. -#define HAVE_STDINT_H - -// Define if you have the header file. -#define HAVE_SYS_TYPES_H - -// Define if you have the header file. -/* #undef HAVE_INTTYPES_H */ - -// Define if you have the header file. -#define HAVE_SYS_STAT_H - -// Define if you have the header file. -/* #undef HAVE_UNISTD_H */ - -// Define if you have the header file. -/* #undef HAVE_FNMATCH_H */ - -// Define if you have the header file (Windows 2000/XP). -#define HAVE_SHLWAPI_H - -// Define if you have the strtoll function. -/* #undef HAVE_STRTOLL */ - -// Define if you have the strtoq function. -/* #undef HAVE_STRTOQ */ - -// Define if you have the header file. -/* #undef HAVE_PTHREAD */ - -// Define if your pthread library defines the type pthread_rwlock_t -/* #undef HAVE_RWLOCK */ - -// gcc requires this to get PRId64, etc. -#if defined(HAVE_INTTYPES_H) && !defined(__STDC_FORMAT_MACROS) -# define __STDC_FORMAT_MACROS 1 -#endif - -// --------------------------------------------------------------------------- -// Package information - -// Name of package. -#define PACKAGE gflags - -// Define to the full name of this package. -#define PACKAGE_NAME gflags - -// Define to the full name and version of this package. -#define PACKAGE_STRING gflags 2.2.0 - -// Define to the one symbol short name of this package. -#define PACKAGE_TARNAME gflags-2.2.0 - -// Define to the version of this package. -#define PACKAGE_VERSION 2.2.0 - -// Version number of package. -#define VERSION PACKAGE_VERSION - -// Define to the address where bug reports for this package should be sent. -#define PACKAGE_BUGREPORT https://github.com/schuhschuh/gflags/issues - -// --------------------------------------------------------------------------- -// Path separator -#ifndef PATH_SEPARATOR -# ifdef OS_WINDOWS -# define PATH_SEPARATOR '\\' -# else -# define PATH_SEPARATOR '/' -# endif -#endif - -// --------------------------------------------------------------------------- -// Windows - -// Whether gflags library is a DLL. -#ifndef GFLAGS_IS_A_DLL -# define GFLAGS_IS_A_DLL 0 -#endif - -// Always export symbols when compiling a shared library as this file is only -// included by internal modules when building the gflags library itself. -// The gflags_declare.h header file will set it to import these symbols otherwise. -#ifndef GFLAGS_DLL_DECL -# if GFLAGS_IS_A_DLL && defined(_MSC_VER) -# define GFLAGS_DLL_DECL __declspec(dllexport) -# else -# define GFLAGS_DLL_DECL -# endif -#endif -// Flags defined by the gflags library itself must be exported -#ifndef GFLAGS_DLL_DEFINE_FLAG -# define GFLAGS_DLL_DEFINE_FLAG GFLAGS_DLL_DECL -#endif - -#ifdef OS_WINDOWS -// The unittests import the symbols of the shared gflags library -# if GFLAGS_IS_A_DLL && defined(_MSC_VER) -# define GFLAGS_DLL_DECL_FOR_UNITTESTS __declspec(dllimport) -# endif -# include "windows_port.h" -#endif diff --git a/packager/third_party/gflags/gflags.gyp b/packager/third_party/gflags/gflags.gyp deleted file mode 100644 index 07d26ae137..0000000000 --- a/packager/third_party/gflags/gflags.gyp +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2011 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -{ - 'variables': { - 'gflags_root': '<(DEPTH)/third_party/gflags', - 'conditions': [ - ['OS=="win"', { - 'gflags_gen_arch_root': '<(gflags_root)/gen/win', - }, { - 'gflags_gen_arch_root': '<(gflags_root)/gen/posix', - }], - ], - }, - 'targets': [ - { - 'target_name': 'gflags', - 'type': 'static_library', - 'include_dirs': [ - '<(gflags_gen_arch_root)/include/gflags', # For configured files. - '<(gflags_gen_arch_root)/include/private', # For config.h - '<(gflags_root)/src/src', # For everything else. - ], - 'defines': [ - # These macros exist so flags and symbols are properly - # exported when building DLLs. Since we don't build DLLs, we - # need to disable them. - 'GFLAGS_DLL_DECL=', - 'GFLAGS_DLL_DECLARE_FLAG=', - 'GFLAGS_DLL_DEFINE_FLAG=', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - '<(gflags_gen_arch_root)/include', # For configured files. - '<(gflags_root)/src/src', # For everything else. - ], - 'defines': [ - 'GFLAGS_DLL_DECL=', - 'GFLAGS_DLL_DECLARE_FLAG=', - 'GFLAGS_DLL_DEFINE_FLAG=', - ], - }, - 'sources': [ - 'src/src/gflags.cc', - 'src/src/gflags_completions.cc', - 'src/src/gflags_reporting.cc', - ], - 'conditions': [ - ['OS=="win"', { - 'sources': [ - 'src/src/windows_port.cc', - ], - 'msvs_disabled_warnings': [ - 4005, # WIN32_LEAN_AND_MEAN redefinition. - 4267, # Conversion from size_t to "type". - ], - 'configurations': { - 'Common_Base': { - 'msvs_configuration_attributes': { - 'CharacterSet': '2', # Use Multi-byte Character Set. - }, - }, - }, - }], - # TODO(andrew): Look into fixing this warning upstream: - # http://code.google.com/p/webrtc/issues/detail?id=760 - ['OS=="win" and clang==1', { - 'msvs_settings': { - 'VCCLCompilerTool': { - 'AdditionalOptions': [ - '-Wno-microsoft-include', - ], - }, - }, - }], - ], - }, - ], -} diff --git a/packager/third_party/glog/CMakeLists.txt b/packager/third_party/glog/CMakeLists.txt new file mode 100644 index 0000000000..5a54dd0805 --- /dev/null +++ b/packager/third_party/glog/CMakeLists.txt @@ -0,0 +1,19 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# CMake build file to host glog configuration. + +# We will use abseil for flags, so turn off glog's search for gflags. +set(WITH_GFLAGS OFF) + +# Don't depend on gtest, since we won't be building glog's tests. +set(WITH_GTEST OFF) + +# Don't depend on or search for libunwind. +set(WITH_UNWIND OFF) + +# With these set in scope of this folder, load the library's own CMakeLists.txt. +add_subdirectory(source) diff --git a/packager/third_party/glog/source b/packager/third_party/glog/source new file mode 160000 index 0000000000..c515e1ae2f --- /dev/null +++ b/packager/third_party/glog/source @@ -0,0 +1 @@ +Subproject commit c515e1ae2fc8b36ca19362842f9347e9429be7ad diff --git a/packager/third_party/googletest/CMakeLists.txt b/packager/third_party/googletest/CMakeLists.txt new file mode 100644 index 0000000000..e692eb1597 --- /dev/null +++ b/packager/third_party/googletest/CMakeLists.txt @@ -0,0 +1,18 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# CMake build file to host gtest configuration. + +# This is required, but doesn't have to correspond to the actual version we pin +# to. +set(GOOGLETEST_VERSION "1.12.0") + +# Force gtest to use a shared CRT on MSVC. Without this, we get link errors on +# our tests on Windows. +set(gtest_force_shared_crt ON) + +# With these set in scope of this folder, load the library's own CMakeLists.txt. +add_subdirectory(source/googlemock) diff --git a/packager/third_party/googletest/source b/packager/third_party/googletest/source new file mode 160000 index 0000000000..15460959cb --- /dev/null +++ b/packager/third_party/googletest/source @@ -0,0 +1 @@ +Subproject commit 15460959cbbfa20e66ef0b5ab497367e47fc0a04 diff --git a/packager/third_party/json b/packager/third_party/json new file mode 160000 index 0000000000..954b10ad3b --- /dev/null +++ b/packager/third_party/json @@ -0,0 +1 @@ +Subproject commit 954b10ad3baa5d92bb9cd5bb93c7258433cd2bb2 diff --git a/packager/third_party/libc++-static/README b/packager/third_party/libc++-static/README deleted file mode 100644 index 7e509d1c35..0000000000 --- a/packager/third_party/libc++-static/README +++ /dev/null @@ -1,5 +0,0 @@ -This is a placeholder. Shaka Packager depends on an old version of the -Chromium build system, which unconditionally adds this folder to the library -search path. We no longer fetch a copy of libc++-static, but chromium's -common.gypi file can't be configured to exclude this from the linker path. -Therefore we just add a placeholder here to silence linker warnings. diff --git a/packager/third_party/mbedtls/CMakeLists.txt b/packager/third_party/mbedtls/CMakeLists.txt new file mode 100644 index 0000000000..b7ae82ecf6 --- /dev/null +++ b/packager/third_party/mbedtls/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# CMake build file to host mbedtls configuration. + +# Don't build executables or tests from mbedtls. +set(ENABLE_PROGRAMS OFF) +set(ENABLE_TESTING OFF) + +# Don't generate any files, which would mean a build-time dependency on Perl. +set(GEN_FILES OFF) + +# With these set in scope of this folder, load the library's own CMakeLists.txt. +add_subdirectory(source) diff --git a/packager/third_party/mbedtls/source b/packager/third_party/mbedtls/source new file mode 160000 index 0000000000..d65aeb3734 --- /dev/null +++ b/packager/third_party/mbedtls/source @@ -0,0 +1 @@ +Subproject commit d65aeb37349ad1a50e0f6c9b694d4b5290d60e49 diff --git a/packager/third_party/yasm/BUILD.gn b/packager/third_party/yasm/BUILD.gn deleted file mode 100644 index 9ed8e46d68..0000000000 --- a/packager/third_party/yasm/BUILD.gn +++ /dev/null @@ -1,472 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# The yasm build process creates a slew of small C subprograms that -# dynamically generate files at various point in the build process. This makes -# the build integration moderately complex. -# -# There are three classes of dynamically generated files: -# 1) C source files that should be included in the build (eg., lc3bid.c) -# 2) C source files that are #included by static C sources (eg., license.c) -# 3) Intermediate files that are used as input by other subprograms to -# further generate files in category #1 or #2. (eg., version.mac) -# -# This structure is represented with the following targets: -# 1) yasm -- Sources, flags for the main yasm executable. Also has most of -# of the actions and rules that invoke the subprograms. -# 2) yasm_config -- General build configuration including setting a -# inputs listing the checked in version of files -# generated by manually running configure. These manually -# generated files are used by all binaries. -# 3) yasm_utils -- Object files with memory management and hashing utilities -# shared between yasm and the genperf subprogram. -# 4) genmacro, genmodule, etc. -- One executable target for each subprogram. -# 5) generate_license, generate_module, etc. -- Actions that invoke programs -# built in #4 to generate .c files. -# 6) compile_gperf, compile_re2c, etc. -- Actions that invoke programs that -# turn intermediate files into .c files. - -if (current_toolchain == host_toolchain) { - # Various files referenced by multiple targets. - yasm_gen_include_dir = "$target_gen_dir/include" - config_makefile = "source/config/$host_os/Makefile" - version_file = "version.mac" - - import("//build/compiled_action.gni") - - config("yasm_config") { - include_dirs = [ - "source/config/$host_os", - "source/patched-yasm", - ] - defines = [ "HAVE_CONFIG_H" ] - if (is_posix) { - cflags = [ "-std=gnu99" ] - } - } - - executable("genmacro") { - sources = [ - "source/patched-yasm/tools/genmacro/genmacro.c", - ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - } - - executable("genmodule") { - sources = [ - "source/patched-yasm/libyasm/genmodule.c", - ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - } - - executable("genperf") { - sources = [ - "source/patched-yasm/tools/genperf/genperf.c", - "source/patched-yasm/tools/genperf/perfect.c", - ] - - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - - deps = [ - ":yasm_utils", - ] - } - - # Used by both yasm and genperf binaries. - source_set("yasm_utils") { - sources = [ - "source/patched-yasm/libyasm/phash.c", - "source/patched-yasm/libyasm/xmalloc.c", - "source/patched-yasm/libyasm/xstrdup.c", - ] - - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - } - - executable("genstring") { - sources = [ - "source/patched-yasm/genstring.c", - ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - } - - executable("genversion") { - sources = [ - "source/patched-yasm/modules/preprocs/nasm/genversion.c", - ] - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - } - - executable("re2c") { - sources = [ - "source/patched-yasm/tools/re2c/actions.c", - "source/patched-yasm/tools/re2c/code.c", - "source/patched-yasm/tools/re2c/dfa.c", - "source/patched-yasm/tools/re2c/main.c", - "source/patched-yasm/tools/re2c/mbo_getopt.c", - "source/patched-yasm/tools/re2c/parser.c", - "source/patched-yasm/tools/re2c/scanner.c", - "source/patched-yasm/tools/re2c/substr.c", - "source/patched-yasm/tools/re2c/translate.c", - ] - - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - - # re2c is missing CLOSEVOP from one switch. - if (is_posix) { - cflags = [ "-Wno-switch" ] - } - } - - executable("yasm") { - sources = [ - "source/patched-yasm/frontends/yasm/yasm-options.c", - "source/patched-yasm/frontends/yasm/yasm.c", - "source/patched-yasm/libyasm/assocdat.c", - "source/patched-yasm/libyasm/bc-align.c", - "source/patched-yasm/libyasm/bc-data.c", - "source/patched-yasm/libyasm/bc-incbin.c", - "source/patched-yasm/libyasm/bc-org.c", - "source/patched-yasm/libyasm/bc-reserve.c", - "source/patched-yasm/libyasm/bitvect.c", - "source/patched-yasm/libyasm/bytecode.c", - "source/patched-yasm/libyasm/errwarn.c", - "source/patched-yasm/libyasm/expr.c", - "source/patched-yasm/libyasm/file.c", - "source/patched-yasm/libyasm/floatnum.c", - "source/patched-yasm/libyasm/hamt.c", - "source/patched-yasm/libyasm/insn.c", - "source/patched-yasm/libyasm/intnum.c", - "source/patched-yasm/libyasm/inttree.c", - "source/patched-yasm/libyasm/linemap.c", - "source/patched-yasm/libyasm/md5.c", - "source/patched-yasm/libyasm/mergesort.c", - "source/patched-yasm/libyasm/section.c", - "source/patched-yasm/libyasm/strcasecmp.c", - "source/patched-yasm/libyasm/strsep.c", - "source/patched-yasm/libyasm/symrec.c", - "source/patched-yasm/libyasm/valparam.c", - "source/patched-yasm/libyasm/value.c", - "source/patched-yasm/modules/arch/lc3b/lc3barch.c", - "source/patched-yasm/modules/arch/lc3b/lc3bbc.c", - "source/patched-yasm/modules/arch/x86/x86arch.c", - "source/patched-yasm/modules/arch/x86/x86bc.c", - "source/patched-yasm/modules/arch/x86/x86expr.c", - "source/patched-yasm/modules/arch/x86/x86id.c", - "source/patched-yasm/modules/dbgfmts/codeview/cv-dbgfmt.c", - "source/patched-yasm/modules/dbgfmts/codeview/cv-symline.c", - "source/patched-yasm/modules/dbgfmts/codeview/cv-type.c", - "source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-aranges.c", - "source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c", - "source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-info.c", - "source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-line.c", - "source/patched-yasm/modules/dbgfmts/null/null-dbgfmt.c", - "source/patched-yasm/modules/dbgfmts/stabs/stabs-dbgfmt.c", - "source/patched-yasm/modules/listfmts/nasm/nasm-listfmt.c", - "source/patched-yasm/modules/objfmts/bin/bin-objfmt.c", - "source/patched-yasm/modules/objfmts/coff/coff-objfmt.c", - "source/patched-yasm/modules/objfmts/coff/win64-except.c", - "source/patched-yasm/modules/objfmts/dbg/dbg-objfmt.c", - "source/patched-yasm/modules/objfmts/elf/elf-objfmt.c", - "source/patched-yasm/modules/objfmts/elf/elf-x86-amd64.c", - "source/patched-yasm/modules/objfmts/elf/elf-x86-x86.c", - "source/patched-yasm/modules/objfmts/elf/elf.c", - "source/patched-yasm/modules/objfmts/macho/macho-objfmt.c", - "source/patched-yasm/modules/objfmts/rdf/rdf-objfmt.c", - "source/patched-yasm/modules/objfmts/xdf/xdf-objfmt.c", - "source/patched-yasm/modules/parsers/gas/gas-parse-intel.c", - "source/patched-yasm/modules/parsers/gas/gas-parse.c", - "source/patched-yasm/modules/parsers/gas/gas-parser.c", - "source/patched-yasm/modules/parsers/nasm/nasm-parse.c", - "source/patched-yasm/modules/parsers/nasm/nasm-parser.c", - "source/patched-yasm/modules/preprocs/cpp/cpp-preproc.c", - "source/patched-yasm/modules/preprocs/nasm/nasm-eval.c", - "source/patched-yasm/modules/preprocs/nasm/nasm-pp.c", - "source/patched-yasm/modules/preprocs/nasm/nasm-preproc.c", - "source/patched-yasm/modules/preprocs/nasm/nasmlib.c", - "source/patched-yasm/modules/preprocs/raw/raw-preproc.c", - - # Files generated by compile_gperf - "$target_gen_dir/x86cpu.c", - "$target_gen_dir/x86regtmod.c", - - # Files generated by compile_re2c - "$target_gen_dir/gas-token.c", - "$target_gen_dir/nasm-token.c", - - # File generated by compile_re2c_lc3b - "$target_gen_dir/lc3bid.c", - - # File generated by generate_module - "$target_gen_dir/module.c", - ] - - configs -= [ "//build/config/compiler:chromium_code" ] - configs += [ - ":yasm_config", - "//build/config/compiler:no_chromium_code", - ] - - # Yasm generates a bunch of .c files which its source file #include. - # Add the |target_gen_dir| into the include path so it can find them. - # Ideally, these generated .c files would be placed into a separate - # directory, but the gen_x86_insn.py script does not make this easy. - include_dirs = [ yasm_gen_include_dir ] - - if (!is_win) { - cflags = [ - "-ansi", - "-pedantic", - ] - if (is_clang) { - cflags += [ "-Wno-incompatible-pointer-types" ] - } - } - - # TODO(ajwong): This should take most of the generated output as - # inputs. - deps = [ - ":compile_gperf", - ":compile_gperf_for_include", - ":compile_nasm_macros", - ":compile_nasm_version", - ":compile_re2c_lc3b", - ":compile_win64_gas", - ":compile_win64_nasm", - ":compile_re2c", - ":generate_license", - ":generate_module", - ":generate_version", - ":yasm_utils", - ] - } - - compiled_action_foreach("compile_gperf") { - tool = ":genperf" - sources = [ - "source/patched-yasm/modules/arch/x86/x86cpu.gperf", - "source/patched-yasm/modules/arch/x86/x86regtmod.gperf", - ] - - outputs = [ - "$target_gen_dir/{{source_name_part}}.c", - ] - args = [ - "{{source}}", - rebase_path(target_gen_dir, root_build_dir) + "/{{source_name_part}}.c", - ] - deps = [ - ":generate_x86_insn", - ] - } - - # This differs from |compile_gperf| in where it places it output files. - compiled_action_foreach("compile_gperf_for_include") { - tool = ":genperf" - sources = [ - # Make sure the generated gperf files in $target_gen_dir are synced with - # the outputs for the related generate_*_insn actions in the - # generate_files target below. - # - # The output for these two are #included by - # source/patched-yasm/modules/arch/x86/x86id.c - "$yasm_gen_include_dir/x86insn_gas.gperf", - "$yasm_gen_include_dir/x86insn_nasm.gperf", - ] - - outputs = [ - "$yasm_gen_include_dir/{{source_name_part}}.c", - ] - args = [ - "{{source}}", - rebase_path(yasm_gen_include_dir, root_build_dir) + - "/{{source_name_part}}.c", - ] - deps = [ - ":generate_x86_insn", - ] - } - - template("compile_macro") { - compiled_action(target_name) { - tool = ":genmacro" - - # Output #included by source/patched-yasm/frontends/yasm/yasm.c. - inputs = invoker.sources - outputs = invoker.outputs - args = [ - rebase_path(outputs[0], root_build_dir), - invoker.macro_varname, - rebase_path(inputs[0], root_build_dir), - ] - if (defined(invoker.deps)) { - deps = invoker.deps - } - } - } - - compile_macro("compile_nasm_macros") { - # Output #included by - # source/patched-yasm/modules/preprocs/nasm/nasm-parser.c - sources = [ - "source/patched-yasm/modules/parsers/nasm/nasm-std.mac", - ] - outputs = [ - "$yasm_gen_include_dir/nasm-macros.c", - ] - macro_varname = "nasm_standard_mac" - } - - compile_macro("compile_nasm_version") { - # Output #included by - # source/patched-yasm/modules/preprocs/nasm/nasm-preproc.c - sources = [ - "$target_gen_dir/$version_file", - ] - outputs = [ - "$yasm_gen_include_dir/nasm-version.c", - ] - macro_varname = "nasm_version_mac" - deps = [ - ":generate_version", - ] - } - - compile_macro("compile_win64_gas") { - # Output #included by source/patched-yasm/frontends/yasm/yasm.c. - sources = [ - "source/patched-yasm/modules/objfmts/coff/win64-gas.mac", - ] - outputs = [ - "$yasm_gen_include_dir/win64-gas.c", - ] - macro_varname = "win64_gas_stdmac" - } - - compile_macro("compile_win64_nasm") { - # Output #included by source/patched-yasm/frontends/yasm/yasm.c. - sources = [ - "source/patched-yasm/modules/objfmts/coff/win64-nasm.mac", - ] - outputs = [ - "$yasm_gen_include_dir/win64-nasm.c", - ] - macro_varname = "win64_nasm_stdmac" - } - - compiled_action_foreach("compile_re2c") { - tool = ":re2c" - sources = [ - "source/patched-yasm/modules/parsers/gas/gas-token.re", - "source/patched-yasm/modules/parsers/nasm/nasm-token.re", - ] - outputs = [ - "$target_gen_dir/{{source_name_part}}.c", - ] - args = [ - "-b", - "-o", - rebase_path(target_gen_dir, root_build_dir) + "/{{source_name_part}}.c", - "{{source}}", - ] - } - - # This call doesn't fit into the re2c template above. - compiled_action("compile_re2c_lc3b") { - tool = ":re2c" - inputs = [ - "source/patched-yasm/modules/arch/lc3b/lc3bid.re", - ] - outputs = [ - "$target_gen_dir/lc3bid.c", - ] - args = [ - "-s", - "-o", - rebase_path(outputs[0], root_build_dir), - rebase_path(inputs[0], root_build_dir), - ] - } - - compiled_action("generate_license") { - tool = ":genstring" - - # Output #included by source/patched-yasm/frontends/yasm/yasm.c. - inputs = [ - "source/patched-yasm/COPYING", - ] - outputs = [ - "$yasm_gen_include_dir/license.c", - ] - args = [ - "license_msg", - rebase_path(outputs[0], root_build_dir), - rebase_path(inputs[0], root_build_dir), - ] - } - - compiled_action("generate_module") { - tool = ":genmodule" - inputs = [ - "source/patched-yasm/libyasm/module.in", - config_makefile, - ] - outputs = [ - "$target_gen_dir/module.c", - ] - args = [ - rebase_path(inputs[0], root_build_dir), - rebase_path(config_makefile, root_build_dir), - rebase_path(outputs[0], root_build_dir), - ] - } - - compiled_action("generate_version") { - tool = ":genversion" - outputs = [ - "$target_gen_dir/$version_file", - ] - args = [ rebase_path(outputs[0], root_build_dir) ] - } - - action("generate_x86_insn") { - script = "source/patched-yasm/modules/arch/x86/gen_x86_insn.py" - - # Output eventually #included by source/patched-yasm/frontends/yasm/x86id.c - outputs = [ - "$yasm_gen_include_dir/x86insns.c", - "$yasm_gen_include_dir/x86insn_gas.gperf", - "$yasm_gen_include_dir/x86insn_nasm.gperf", - ] - args = [ rebase_path(yasm_gen_include_dir, root_build_dir) ] - } -} diff --git a/packager/third_party/yasm/CHROMIUM.diff b/packager/third_party/yasm/CHROMIUM.diff deleted file mode 100644 index 764cbf738b..0000000000 --- a/packager/third_party/yasm/CHROMIUM.diff +++ /dev/null @@ -1,24 +0,0 @@ ---- frontends/tasm/tasm.c -+++ frontends/tasm/tasm.c -@@ -224,7 +224,9 @@ - /* version message */ - /*@observer@*/ static const char *version_msg[] = { - PACKAGE_STRING, -+#if !defined(DONT_EMBED_BUILD_METADATA) || defined(OFFICIAL_BUILD) - "Compiled on " __DATE__ ".", -+#endif - "Copyright (c) 2001-2010 Peter Johnson and other Yasm developers.", - "Run yasm --license for licensing overview and summary." - }; ---- frontends/yasm/yasm.c -+++ frontends/yasm/yasm.c -@@ -213,7 +213,9 @@ - /* version message */ - /*@observer@*/ static const char *version_msg[] = { - PACKAGE_STRING, -+#if !defined(DONT_EMBED_BUILD_METADATA) || defined(OFFICIAL_BUILD) - "Compiled on " __DATE__ ".", -+#endif - "Copyright (c) 2001-2011 Peter Johnson and other Yasm developers.", - "Run yasm --license for licensing overview and summary." - }; diff --git a/packager/third_party/yasm/OWNERS b/packager/third_party/yasm/OWNERS deleted file mode 100644 index c7233bd75b..0000000000 --- a/packager/third_party/yasm/OWNERS +++ /dev/null @@ -1 +0,0 @@ -hclam@chromium.org diff --git a/packager/third_party/yasm/README.chromium b/packager/third_party/yasm/README.chromium deleted file mode 100644 index 7006a6341b..0000000000 --- a/packager/third_party/yasm/README.chromium +++ /dev/null @@ -1,142 +0,0 @@ -Name: yasm -URL: http://www.tortall.net/projects/yasm/ -Version: 1.2.0 -License: 2-clause or 3-clause BSD licensed, with the exception of bitvect, which is triple-licensed under the Artistic license, GPL, and LGPL -License File: source/patched-yasm/COPYING -License Android Compatible: yes -Security Critical: no - -With these patches merged: -* https://github.com/yasm/yasm/commit/a2cbb10ee1b90b73647667ac849c74d65761d412 -* https://github.com/yasm/yasm/commit/01ab853e68ef8aeded716d6f5b34895200f66a51 -* https://github.com/yasm/yasm/commit/82fafa7b5619e702c8681c959ade0746498e3cbc -* https://github.com/yasm/yasm/commit/2bd66514b6b100887c19d8598da38347b3cff40e -* https://github.com/yasm/yasm/commit/ab19547382660d81e0b4a0232dccb38f44c52a36 -* https://github.com/yasm/yasm/commit/9728322335cba96500861ef766b1546d096e5600 -* CHROMIUM.diff - - -See also the yasm.gyp file for a description of the yasm build process. - -Instructions for recreating the yasm.gyp file. - 1) Get a clean version of the yasm source tree. The clean tree can be found - at: - - src/third_party/yasm/source/yasm - - 2) Run configure on the pristine source from a different directory (eg., - /tmp/yasm_build). Running configure from another directory will keep - the source tree clean. - - 3) Next, capture all the output from a build of yasm. We will use the build - log as a reference for making the yasm.gyp file. - - make yasm > yasm_build_log 2> yasm_build_err - - 4) Check yasm_build_err to see if there are any anomalies beyond yasm's - compiler warnings. - - 5) Grab the generated Makefile, libyasm-stdint.h, config.h, and put into - the correct platform location. For android platform, copy the files - generated for linux, but make sure that ENABLE_NLS is not defined to - allow mac host compiles to work. For ios, copy the files from mac. - - src/third_party/yasm/source/config/[platform] - - While we do not directly use the "Makefile" to build, it is needed by - the "genmodule" subprogram as input for creating the available modules - list. - - 6) Make sure all the subprograms are represented in yasm.gyp. - - grep '^gcc' yasm_build_log | - grep -v ' -DHAVE_CONFIG_H ' - - The yasm build creates a bunch of subprograms that in-turn generate - more .c files in the build. Luckily the commands to generate the - subprogram do not have -DHAVE_CONFIG_H as a cflag. - - From this list, make sure all the subprograms that are build have - appropriate targets in the yasm.gyp. - - You will notice, when you get to the next step, that there are some - .c source files that are compiled both for yasm, and for genperf. - - Those should go into the genperf_libs target so that they can be - shared by the genperf and yasm targets. Find those files by appending - - | grep 'gp-' - - to the command above. - - 7) Find all the source files used to build yasm proper. - - grep -E '^gcc' yasm_build_log | - grep ' -DHAVE_CONFIG_H ' | - awk '{print $NF }' | - sed -e "s/'\.\/'\`//" | # Removes some garbage from the build line. - sort -u | - sed -e "s/\(.*\)/'\1',/" # Add quotes to each line. - - Reversing the -DHAVE_CONFIG_H filter from the command above should - list the compile lines for yasm proper. - - This should get you close, but you will need to manually examine this - list. However, some of the built products are still included in the - command above. Generally, if the source file is in the root directory, - it's a generated file. - - Inspect the current yasm.gyp for a list of the subprograms and their - outputs. - - Update the sources list in the yasm target accordingly. Read step #9 - as well if you update the source list to avoid problems. - - 8) Update the actions for each of the subprograms. - - Here is the real fun. For each subprogram created, you will need to - update the actions and rules in yasm.gyp that invoke the subprogram to - generate the files needed by the rest of the build. - - I don't have any good succinct instructions for this. Grep the build - log for each subprogram invocation (eg., "./genversion"), look at - its command inputs and output, then verify our yasm.gyp does something - similar. - - The good news is things likely only link or compile if this is done - right so you'll know if there is a problem. - - Again, refer to the existing yasm.gyp for a guide to how the generated - files are used. - - Here are a few gotchas: - 1) genmodule, by default, writes module.c into the current - directory. This does not play nicely with gyp. We patch the - source during build to allow specifying a specific output file. - - 2) Most of the generated files, even though they are .c files, are - #included by other files in the build. Make sure they end up - in a directory that is in the include path for the build. - One of <(shared_generated_dir) or <(generated_dir) should work. - - 3) Some of the genperf output is #included while others need to be - compiled directly. That is why there are 2 different rules for - .gperf files in two targets. - - 9) Check for python scripts that are run. - - grep python yasm_build_log - - Yasm uses python scripts to generate the assembly code description - files in C++. Make sure to get these put into the gyp file properly as - well. An example is gen_x86_insn.py for x86 assembly. - - Note that at least the gen_x86_insn.py script suffers from the same - problem as genmacro in that it outputs to the current directory by - default. The yasm.gyp build patches this file before invoking it to - allow specifying an output directory. - - 10) Recreate the 'AdditionalOptions!': [ '/analyze' ] block so that VC++ - /analyze builds won't fail. - - 11) If all that's is finished, attempt to build....and cross your fingers. diff --git a/packager/third_party/yasm/run_yasm.py b/packager/third_party/yasm/run_yasm.py deleted file mode 100644 index cbd79ccea1..0000000000 --- a/packager/third_party/yasm/run_yasm.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""A wrapper to run yasm. - -Its main job is to provide a Python wrapper for GN integration, and to write -the makefile-style output yasm generates in stdout to a .d file for dependency -management of .inc files. - -Run with: - python run_yasm.py - -Note that must include an explicit output file (-o). This -script will append a ".d" to this and write the dependencies there. This script -will add "-M" to cause yasm to write the deps to stdout, so you don't need to -specify that. -""" - -import argparse -import sys -import subprocess - -# Extract the output file name from the yasm command line so we can generate a -# .d file with the same base name. -parser = argparse.ArgumentParser() -parser.add_argument("-o", dest="objfile") -options, _ = parser.parse_known_args() - -objfile = options.objfile -depfile = objfile + '.d' - -# Assemble. -result_code = subprocess.call(sys.argv[1:]) -if result_code != 0: - sys.exit(result_code) - -# Now generate the .d file listing the dependencies. The -M option makes yasm -# write the Makefile-style dependencies to stdout, but it seems that inhibits -# generating any compiled output so we need to do this in a separate pass. -# However, outputting deps seems faster than actually assembling, and yasm is -# so fast anyway this is not a big deal. -# -# This guarantees proper dependency management for assembly files. Otherwise, -# we would have to require people to manually specify the .inc files they -# depend on in the build file, which will surely be wrong or out-of-date in -# some cases. -deps = subprocess.check_output(sys.argv[1:] + ['-M']) -with open(depfile, "wb") as f: - f.write(deps) - diff --git a/packager/third_party/yasm/source/config/android/Makefile b/packager/third_party/yasm/source/config/android/Makefile deleted file mode 100644 index 6fccce46a9..0000000000 --- a/packager/third_party/yasm/source/config/android/Makefile +++ /dev/null @@ -1,3822 +0,0 @@ -# Makefile.in generated by automake 1.10.1 from Makefile.am. -# Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - -# $Id: Makefile.am 2184 2009-03-24 05:04:15Z peter $ - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# $Id: Makefile.inc 1718 2006-12-24 00:13:19Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1939 2007-09-10 07:15:50Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1948 2007-09-13 02:53:30Z peter $ - -# $Id: Makefile.inc 1951 2007-09-14 05:19:10Z peter $ - -# $Id: Makefile.inc 1598 2006-08-10 04:02:59Z peter $ - -# $Id: Makefile.inc 1914 2007-08-20 05:13:35Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2170 2009-01-14 08:28:13Z peter $ - -# $Id: Makefile.inc 2192 2009-03-29 23:25:05Z peter $ - -# $Id: Makefile.inc 1776 2007-02-19 02:36:10Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 1928 2007-09-07 22:03:34Z peter $ - -# $Id: Makefile.inc 1152 2004-10-02 06:18:30Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1783 2007-02-22 03:40:31Z peter $ - -# $Id: Makefile.inc 2169 2009-01-02 20:46:57Z peter $ - -# $Id$ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2190 2009-03-25 03:40:59Z peter $ - -# $Id: Makefile.inc 1137 2004-09-04 01:24:57Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 1966 2007-09-20 03:54:36Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2185 2009-03-24 06:33:32Z peter $ - -# $Id: Makefile.inc 2172 2009-01-27 06:38:14Z peter $ - -# $Id: Makefile.inc 2176 2009-03-04 07:39:02Z peter $ - -# Makefile for cpp module. -# Copied from raw preprocessor module. - -# $Id: Makefile.inc 1662 2006-10-21 18:52:29Z peter $ - -# $Id: Makefile.inc 1428 2006-03-27 02:15:19Z peter $ - -# $Id: Makefile.inc 1378 2006-02-12 01:27:39Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id$ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 1252 2005-09-28 05:50:51Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 2126 2008-10-03 08:13:00Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 1168 2004-10-31 01:07:52Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1777 2007-02-19 08:21:17Z peter $ - -# $Id: Makefile.inc 1782 2007-02-21 06:45:39Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1854 2007-05-31 06:16:49Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 1331 2006-01-15 22:48:55Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2120 2008-09-04 04:45:30Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2123 2008-09-30 03:56:37Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - - - - -pkgdatadir = $(datadir)/yasm -pkglibdir = $(libdir)/yasm -pkgincludedir = $(includedir)/yasm -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = x86_64-unknown-linux-gnu -host_triplet = x86_64-unknown-linux-gnu -bin_PROGRAMS = yasm$(EXEEXT) ytasm$(EXEEXT) -TESTS = $(am__append_3) modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/xdf/tests/xdf_test.sh bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) libyasm/tests/libyasm_test.sh -noinst_PROGRAMS = genstring$(EXEEXT) re2c$(EXEEXT) genmacro$(EXEEXT) \ - genperf$(EXEEXT) genversion$(EXEEXT) genmodule$(EXEEXT) -check_PROGRAMS = test_hd$(EXEEXT) bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) -DIST_COMMON = README $(am__configure_deps) $(dist_man_MANS) \ - $(include_HEADERS) $(modinclude_HEADERS) $(noinst_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/frontends/Makefile.inc \ - $(srcdir)/frontends/tasm/Makefile.inc \ - $(srcdir)/frontends/yasm/Makefile.inc \ - $(srcdir)/libyasm/Makefile.inc \ - $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/m4/Makefile.inc \ - $(srcdir)/modules/Makefile.inc \ - $(srcdir)/modules/arch/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/Makefile.inc \ - $(srcdir)/modules/dbgfmts/codeview/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/null/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc \ - $(srcdir)/modules/listfmts/Makefile.inc \ - $(srcdir)/modules/listfmts/nasm/Makefile.inc \ - $(srcdir)/modules/objfmts/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/dbg/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc \ - $(srcdir)/modules/parsers/Makefile.inc \ - $(srcdir)/modules/parsers/gas/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc \ - $(srcdir)/modules/preprocs/Makefile.inc \ - $(srcdir)/modules/preprocs/cpp/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/tests/Makefile.inc \ - $(srcdir)/tools/Makefile.inc \ - $(srcdir)/tools/genmacro/Makefile.inc \ - $(srcdir)/tools/genperf/Makefile.inc \ - $(srcdir)/tools/python-yasm/Makefile.inc \ - $(srcdir)/tools/python-yasm/tests/Makefile.inc \ - $(srcdir)/tools/re2c/Makefile.inc $(top_srcdir)/configure \ - ABOUT-NLS AUTHORS COPYING ChangeLog INSTALL NEWS \ - config/config.guess config/config.rpath config/config.sub \ - config/depcomp config/install-sh config/ltmain.sh \ - config/missing -#am__append_1 = _yasm.pxi yasm.pyx \ -# yasm_python.c python-setup.txt \ -# .python-build -#am__append_2 = PYTHON=${PYTHON} -#am__append_3 = tools/python-yasm/tests/python_test.sh -am__append_4 = $(dist_man_MANS) -subdir = . -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_create_stdint_h.m4 \ - $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \ - $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ - $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/nls.m4 \ - $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ - $(top_srcdir)/m4/pyrex.m4 $(top_srcdir)/m4/pythonhead.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \ - "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" \ - "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" \ - "$(DESTDIR)$(includedir)" -libLIBRARIES_INSTALL = $(INSTALL_DATA) -LIBRARIES = $(lib_LIBRARIES) -AR = ar -ARFLAGS = cru -libyasm_a_AR = $(AR) $(ARFLAGS) -libyasm_a_LIBADD = -am_libyasm_a_OBJECTS = x86arch.$(OBJEXT) x86bc.$(OBJEXT) \ - x86expr.$(OBJEXT) x86id.$(OBJEXT) lc3barch.$(OBJEXT) \ - lc3bbc.$(OBJEXT) nasm-listfmt.$(OBJEXT) gas-parser.$(OBJEXT) \ - gas-parse.$(OBJEXT) nasm-parser.$(OBJEXT) nasm-parse.$(OBJEXT) \ - nasm-preproc.$(OBJEXT) nasm-pp.$(OBJEXT) nasmlib.$(OBJEXT) \ - nasm-eval.$(OBJEXT) raw-preproc.$(OBJEXT) \ - cpp-preproc.$(OBJEXT) cv-dbgfmt.$(OBJEXT) cv-symline.$(OBJEXT) \ - cv-type.$(OBJEXT) dwarf2-dbgfmt.$(OBJEXT) \ - dwarf2-line.$(OBJEXT) dwarf2-aranges.$(OBJEXT) \ - dwarf2-info.$(OBJEXT) null-dbgfmt.$(OBJEXT) \ - stabs-dbgfmt.$(OBJEXT) dbg-objfmt.$(OBJEXT) \ - bin-objfmt.$(OBJEXT) elf.$(OBJEXT) elf-objfmt.$(OBJEXT) \ - elf-x86-x86.$(OBJEXT) elf-x86-amd64.$(OBJEXT) \ - coff-objfmt.$(OBJEXT) win64-except.$(OBJEXT) \ - macho-objfmt.$(OBJEXT) rdf-objfmt.$(OBJEXT) \ - xdf-objfmt.$(OBJEXT) assocdat.$(OBJEXT) bitvect.$(OBJEXT) \ - bc-align.$(OBJEXT) bc-data.$(OBJEXT) bc-incbin.$(OBJEXT) \ - bc-org.$(OBJEXT) bc-reserve.$(OBJEXT) bytecode.$(OBJEXT) \ - errwarn.$(OBJEXT) expr.$(OBJEXT) file.$(OBJEXT) \ - floatnum.$(OBJEXT) hamt.$(OBJEXT) insn.$(OBJEXT) \ - intnum.$(OBJEXT) inttree.$(OBJEXT) linemap.$(OBJEXT) \ - md5.$(OBJEXT) mergesort.$(OBJEXT) phash.$(OBJEXT) \ - section.$(OBJEXT) strcasecmp.$(OBJEXT) strsep.$(OBJEXT) \ - symrec.$(OBJEXT) valparam.$(OBJEXT) value.$(OBJEXT) \ - xmalloc.$(OBJEXT) xstrdup.$(OBJEXT) -nodist_libyasm_a_OBJECTS = x86cpu.$(OBJEXT) x86regtmod.$(OBJEXT) \ - lc3bid.$(OBJEXT) gas-token.$(OBJEXT) nasm-token.$(OBJEXT) \ - module.$(OBJEXT) -libyasm_a_OBJECTS = $(am_libyasm_a_OBJECTS) \ - $(nodist_libyasm_a_OBJECTS) -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) -am_bitvect_test_OBJECTS = bitvect_test.$(OBJEXT) -bitvect_test_OBJECTS = $(am_bitvect_test_OBJECTS) -am__DEPENDENCIES_1 = -bitvect_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_combpath_test_OBJECTS = combpath_test.$(OBJEXT) -combpath_test_OBJECTS = $(am_combpath_test_OBJECTS) -combpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_floatnum_test_OBJECTS = floatnum_test.$(OBJEXT) -floatnum_test_OBJECTS = $(am_floatnum_test_OBJECTS) -floatnum_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_genmacro_OBJECTS = -genmacro_OBJECTS = $(am_genmacro_OBJECTS) -genmacro_DEPENDENCIES = genmacro.$(OBJEXT) -am_genmodule_OBJECTS = -genmodule_OBJECTS = $(am_genmodule_OBJECTS) -genmodule_DEPENDENCIES = genmodule.$(OBJEXT) -am_genperf_OBJECTS = -genperf_OBJECTS = $(am_genperf_OBJECTS) -genperf_DEPENDENCIES = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -am_genstring_OBJECTS = -genstring_OBJECTS = $(am_genstring_OBJECTS) -genstring_DEPENDENCIES = genstring.$(OBJEXT) -am_genversion_OBJECTS = -genversion_OBJECTS = $(am_genversion_OBJECTS) -genversion_DEPENDENCIES = genversion.$(OBJEXT) -am_leb128_test_OBJECTS = leb128_test.$(OBJEXT) -leb128_test_OBJECTS = $(am_leb128_test_OBJECTS) -leb128_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_re2c_OBJECTS = -re2c_OBJECTS = $(am_re2c_OBJECTS) -re2c_DEPENDENCIES = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -am_splitpath_test_OBJECTS = splitpath_test.$(OBJEXT) -splitpath_test_OBJECTS = $(am_splitpath_test_OBJECTS) -splitpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_test_hd_OBJECTS = test_hd.$(OBJEXT) -test_hd_OBJECTS = $(am_test_hd_OBJECTS) -test_hd_LDADD = $(LDADD) -am_uncstring_test_OBJECTS = uncstring_test.$(OBJEXT) -uncstring_test_OBJECTS = $(am_uncstring_test_OBJECTS) -uncstring_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_yasm_OBJECTS = yasm.$(OBJEXT) yasm-options.$(OBJEXT) -yasm_OBJECTS = $(am_yasm_OBJECTS) -yasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_ytasm_OBJECTS = tasm.$(OBJEXT) tasm-options.$(OBJEXT) -ytasm_OBJECTS = $(am_ytasm_OBJECTS) -ytasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -depcomp = $(SHELL) $(top_srcdir)/config/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -SOURCES = $(libyasm_a_SOURCES) $(nodist_libyasm_a_SOURCES) \ - $(bitvect_test_SOURCES) $(combpath_test_SOURCES) \ - $(floatnum_test_SOURCES) $(genmacro_SOURCES) \ - $(genmodule_SOURCES) $(genperf_SOURCES) $(genstring_SOURCES) \ - $(genversion_SOURCES) $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -DIST_SOURCES = $(libyasm_a_SOURCES) $(bitvect_test_SOURCES) \ - $(combpath_test_SOURCES) $(floatnum_test_SOURCES) \ - $(genmacro_SOURCES) $(genmodule_SOURCES) $(genperf_SOURCES) \ - $(genstring_SOURCES) $(genversion_SOURCES) \ - $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-dvi-recursive install-exec-recursive \ - install-html-recursive install-info-recursive \ - install-pdf-recursive install-ps-recursive install-recursive \ - installcheck-recursive installdirs-recursive pdf-recursive \ - ps-recursive uninstall-recursive -man1dir = $(mandir)/man1 -man7dir = $(mandir)/man7 -NROFF = nroff -MANS = $(dist_man_MANS) -includeHEADERS_INSTALL = $(INSTALL_HEADER) -modincludeHEADERS_INSTALL = $(INSTALL_HEADER) -nodist_includeHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(include_HEADERS) $(modinclude_HEADERS) \ - $(nodist_include_HEADERS) $(noinst_HEADERS) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - { test ! -d $(distdir) \ - || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr $(distdir); }; } -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -distuninstallcheck_listfiles = find . -type f -print -distcleancheck_listfiles = find . -type f -print -ACLOCAL = ${SHELL} /tmp/yasm/config/missing --run aclocal-1.10 -AMTAR = ${SHELL} /tmp/yasm/config/missing --run tar -ARCH = x86 -AUTOCONF = ${SHELL} /tmp/yasm/config/missing --run autoconf -AUTOHEADER = ${SHELL} /tmp/yasm/config/missing --run autoheader -AUTOMAKE = ${SHELL} /tmp/yasm/config/missing --run automake-1.10 -AWK = gawk -CC = gcc -std=gnu99 -CCDEPMODE = depmode=gcc3 -CCLD_FOR_BUILD = gcc -std=gnu99 -CC_FOR_BUILD = gcc -std=gnu99 -CFLAGS = -g -O2 -CPP = gcc -E -CPPFLAGS = -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -ECHO_C = -ECHO_N = -n -ECHO_T = -EGREP = /bin/grep -E -EXEEXT = -GCC = yes -GMSGFMT = /usr/bin/msgfmt -GMSGFMT_015 = /usr/bin/msgfmt -GREP = /bin/grep -HOST_CC = gcc -std=gnu99 -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTLLIBS = -INTL_MACOSX_LIBS = -LDFLAGS = -LIBICONV = -liconv -LIBINTL = -LIBOBJS = -LIBS = -LN_S = ln -s -LTLIBICONV = -liconv -LTLIBINTL = -LTLIBOBJS = -MAINT = -MAKEINFO = ${SHELL} /tmp/yasm/config/missing --run makeinfo -MKDIR_P = /bin/mkdir -p -MORE_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter -MSGFMT = /usr/bin/msgfmt -MSGFMT_015 = /usr/bin/msgfmt -MSGMERGE = /usr/bin/msgmerge -OBJEXT = o -PACKAGE = yasm -PACKAGE_BUGREPORT = bug-yasm@tortall.net -PACKAGE_NAME = yasm -PACKAGE_STRING = yasm HEAD -PACKAGE_TARNAME = yasm -PACKAGE_VERSION = HEAD -PATH_SEPARATOR = : -POSUB = po -PYTHON = /usr/bin/python -PYTHON_EXEC_PREFIX = ${exec_prefix} -PYTHON_INCLUDES = -PYTHON_PLATFORM = linux2 -PYTHON_PREFIX = ${prefix} -PYTHON_VERSION = 2.5 -RANLIB = ranlib -SET_MAKE = -SHELL = /bin/sh -STRIP = -USE_NLS = yes -VERSION = HEAD -XGETTEXT = /usr/bin/xgettext -XGETTEXT_015 = /usr/bin/xgettext -XMLTO = xmlto -abs_builddir = /tmp/yasm -abs_srcdir = /tmp/yasm -abs_top_builddir = /tmp/yasm -abs_top_srcdir = /tmp/yasm -ac_ct_CC = gcc -am__include = include -am__leading_dot = . -am__quote = -am__tar = ${AMTAR} chof - "$$tardir" -am__untar = ${AMTAR} xf - -bindir = ${exec_prefix}/bin -build = x86_64-unknown-linux-gnu -build_alias = -build_cpu = x86_64 -build_os = linux-gnu -build_vendor = unknown -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} -dvidir = ${docdir} -exec_prefix = ${prefix} -host = x86_64-unknown-linux-gnu -host_alias = -host_cpu = x86_64 -host_os = linux-gnu -host_vendor = unknown -htmldir = ${docdir} -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /tmp/yasm/config/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mkdir_p = /bin/mkdir -p -oldincludedir = /usr/include -pdfdir = ${docdir} -pkgpyexecdir = ${pyexecdir}/yasm -pkgpythondir = ${pythondir}/yasm -prefix = /usr/local -program_transform_name = s,x,x, -psdir = ${docdir} -pyexecdir = ${exec_prefix}/lib/python2.5/site-packages -pythondir = ${prefix}/lib/python2.5/site-packages -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = . -sysconfdir = ${prefix}/etc -target_alias = -top_builddir = . -top_srcdir = . -SUBDIRS = po . -AM_YFLAGS = -d -AM_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter - -#!include modules/objfmts/omf/Makefile.inc -dist_man_MANS = yasm_arch.7 yasm_parsers.7 yasm_dbgfmts.7 \ - yasm_objfmts.7 yasm.1 -TESTS_ENVIRONMENT = $(am__append_2) -test_hd_SOURCES = test_hd.c -include_HEADERS = libyasm.h -nodist_include_HEADERS = libyasm-stdint.h -noinst_HEADERS = util.h -BUILT_SOURCES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - x86insn_nasm.c x86insn_gas.c gas-token.c nasm-token.c \ - nasm-macros.c nasm-version.c version.mac win64-nasm.c \ - win64-gas.c license.c -MAINTAINERCLEANFILES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - $(am__append_4) - -# Until this gets fixed in automake -DISTCLEANFILES = libyasm/stamp-h libyasm/stamp-h[0-9]* - -# Suffix rule for genperf -SUFFIXES = .gperf - -# configure.lineno doesn't clean up after itself? -CLEANFILES = configure.lineno $(am__append_1) x86insn_nasm.c \ - x86insn_gas.c x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c nasm-macros.c nasm-version.c version.mac \ - win64-nasm.c win64-gas.c module.c license.c - -# automake doesn't distribute mkinstalldirs? -#!EXTRA_DIST += modules/objfmts/omf/Makefile.inc -EXTRA_DIST = config/config.rpath config/mkinstalldirs \ - tools/Makefile.inc libyasm/Makefile.inc modules/Makefile.inc \ - frontends/Makefile.inc tools/re2c/Makefile.inc \ - tools/genmacro/Makefile.inc tools/genperf/Makefile.inc \ - tools/python-yasm/Makefile.inc tools/re2c/main.c \ - tools/re2c/basics.h tools/re2c/globals.h tools/re2c/ins.h \ - tools/re2c/re.h tools/re2c/token.h tools/re2c/code.c \ - tools/re2c/dfa.h tools/re2c/dfa.c tools/re2c/parse.h \ - tools/re2c/parser.h tools/re2c/parser.c tools/re2c/actions.c \ - tools/re2c/scanner.h tools/re2c/scanner.c \ - tools/re2c/mbo_getopt.h tools/re2c/mbo_getopt.c \ - tools/re2c/substr.h tools/re2c/substr.c tools/re2c/translate.c \ - tools/re2c/CHANGELOG tools/re2c/NO_WARRANTY tools/re2c/README \ - tools/re2c/scanner.re tools/re2c/re2c.1 \ - tools/re2c/bootstrap/scanner.c tools/re2c/doc/loplas.ps.gz \ - tools/re2c/doc/sample.bib tools/re2c/examples/basemmap.c \ - tools/re2c/examples/c.re tools/re2c/examples/cmmap.re \ - tools/re2c/examples/cnokw.re tools/re2c/examples/cunroll.re \ - tools/re2c/examples/modula.re tools/re2c/examples/repeater.re \ - tools/re2c/examples/sample.re tools/re2c/examples/simple.re \ - tools/re2c/examples/rexx/README \ - tools/re2c/examples/rexx/rexx.l \ - tools/re2c/examples/rexx/scanio.c tools/genmacro/genmacro.c \ - tools/genperf/genperf.c tools/genperf/perfect.c \ - tools/genperf/perfect.h tools/genperf/standard.h \ - tools/python-yasm/pyxelator/cparse.py \ - tools/python-yasm/pyxelator/genpyx.py \ - tools/python-yasm/pyxelator/ir.py \ - tools/python-yasm/pyxelator/lexer.py \ - tools/python-yasm/pyxelator/node.py \ - tools/python-yasm/pyxelator/parse_core.py \ - tools/python-yasm/pyxelator/work_unit.py \ - tools/python-yasm/pyxelator/wrap_yasm.py \ - tools/python-yasm/setup.py tools/python-yasm/yasm.pyx \ - $(PYBINDING_DEPS) tools/python-yasm/tests/Makefile.inc \ - tools/python-yasm/tests/python_test.sh \ - tools/python-yasm/tests/__init__.py \ - tools/python-yasm/tests/test_bytecode.py \ - tools/python-yasm/tests/test_expr.py \ - tools/python-yasm/tests/test_intnum.py \ - tools/python-yasm/tests/test_symrec.py \ - modules/arch/Makefile.inc modules/listfmts/Makefile.inc \ - modules/parsers/Makefile.inc modules/preprocs/Makefile.inc \ - modules/objfmts/Makefile.inc modules/arch/x86/Makefile.inc \ - modules/arch/lc3b/Makefile.inc \ - modules/arch/x86/gen_x86_insn.py x86insns.c x86insn_nasm.gperf \ - x86insn_gas.gperf modules/arch/x86/x86cpu.gperf \ - modules/arch/x86/x86regtmod.gperf \ - modules/arch/x86/tests/Makefile.inc \ - modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gen-fma-test.py \ - modules/arch/x86/tests/addbyte.asm \ - modules/arch/x86/tests/addbyte.errwarn \ - modules/arch/x86/tests/addbyte.hex \ - modules/arch/x86/tests/addrop.asm \ - modules/arch/x86/tests/addrop.errwarn \ - modules/arch/x86/tests/addrop.hex \ - modules/arch/x86/tests/addrop-err.asm \ - modules/arch/x86/tests/addrop-err.errwarn \ - modules/arch/x86/tests/aes.asm modules/arch/x86/tests/aes.hex \ - modules/arch/x86/tests/amd200707.asm \ - modules/arch/x86/tests/amd200707.hex \ - modules/arch/x86/tests/arithsmall.asm \ - modules/arch/x86/tests/arithsmall.errwarn \ - modules/arch/x86/tests/arithsmall.hex \ - modules/arch/x86/tests/avx.asm modules/arch/x86/tests/avx.hex \ - modules/arch/x86/tests/avxcc.asm \ - modules/arch/x86/tests/avxcc.hex \ - modules/arch/x86/tests/bittest.asm \ - modules/arch/x86/tests/bittest.hex \ - modules/arch/x86/tests/bswap64.asm \ - modules/arch/x86/tests/bswap64.hex \ - modules/arch/x86/tests/clmul.asm \ - modules/arch/x86/tests/clmul.hex \ - modules/arch/x86/tests/cmpxchg.asm \ - modules/arch/x86/tests/cmpxchg.hex \ - modules/arch/x86/tests/cpubasic-err.asm \ - modules/arch/x86/tests/cpubasic-err.errwarn \ - modules/arch/x86/tests/cyrix.asm \ - modules/arch/x86/tests/cyrix.hex \ - modules/arch/x86/tests/div-err.asm \ - modules/arch/x86/tests/div-err.errwarn \ - modules/arch/x86/tests/ea-nonzero.asm \ - modules/arch/x86/tests/ea-nonzero.hex \ - modules/arch/x86/tests/ea-over.asm \ - modules/arch/x86/tests/ea-over.errwarn \ - modules/arch/x86/tests/ea-over.hex \ - modules/arch/x86/tests/ea-warn.asm \ - modules/arch/x86/tests/ea-warn.errwarn \ - modules/arch/x86/tests/ea-warn.hex \ - modules/arch/x86/tests/ebpindex.asm \ - modules/arch/x86/tests/ebpindex.hex \ - modules/arch/x86/tests/effaddr.asm \ - modules/arch/x86/tests/effaddr.hex \ - modules/arch/x86/tests/enter.asm \ - modules/arch/x86/tests/enter.errwarn \ - modules/arch/x86/tests/enter.hex \ - modules/arch/x86/tests/far64.asm \ - modules/arch/x86/tests/far64.hex \ - modules/arch/x86/tests/farbasic.asm \ - modules/arch/x86/tests/farbasic.hex \ - modules/arch/x86/tests/farithr.asm \ - modules/arch/x86/tests/farithr.hex \ - modules/arch/x86/tests/fcmov.asm \ - modules/arch/x86/tests/fcmov.hex \ - modules/arch/x86/tests/fma.asm modules/arch/x86/tests/fma.hex \ - modules/arch/x86/tests/fwdequ64.asm \ - modules/arch/x86/tests/fwdequ64.hex \ - modules/arch/x86/tests/genopcode.asm \ - modules/arch/x86/tests/genopcode.hex \ - modules/arch/x86/tests/imm64.asm \ - modules/arch/x86/tests/imm64.errwarn \ - modules/arch/x86/tests/imm64.hex \ - modules/arch/x86/tests/iret.asm \ - modules/arch/x86/tests/iret.hex \ - modules/arch/x86/tests/jmp64-1.asm \ - modules/arch/x86/tests/jmp64-1.hex \ - modules/arch/x86/tests/jmp64-2.asm \ - modules/arch/x86/tests/jmp64-2.hex \ - modules/arch/x86/tests/jmp64-3.asm \ - modules/arch/x86/tests/jmp64-3.hex \ - modules/arch/x86/tests/jmp64-4.asm \ - modules/arch/x86/tests/jmp64-4.hex \ - modules/arch/x86/tests/jmp64-5.asm \ - modules/arch/x86/tests/jmp64-5.hex \ - modules/arch/x86/tests/jmp64-6.asm \ - modules/arch/x86/tests/jmp64-6.hex \ - modules/arch/x86/tests/jmpfar.asm \ - modules/arch/x86/tests/jmpfar.hex \ - modules/arch/x86/tests/lds.asm modules/arch/x86/tests/lds.hex \ - modules/arch/x86/tests/loopadsz.asm \ - modules/arch/x86/tests/loopadsz.hex \ - modules/arch/x86/tests/lsahf.asm \ - modules/arch/x86/tests/lsahf.hex \ - modules/arch/x86/tests/mem64-err.asm \ - modules/arch/x86/tests/mem64-err.errwarn \ - modules/arch/x86/tests/mem64.asm \ - modules/arch/x86/tests/mem64.errwarn \ - modules/arch/x86/tests/mem64.hex \ - modules/arch/x86/tests/mem64hi32.asm \ - modules/arch/x86/tests/mem64hi32.hex \ - modules/arch/x86/tests/mem64rip.asm \ - modules/arch/x86/tests/mem64rip.hex \ - modules/arch/x86/tests/mixcase.asm \ - modules/arch/x86/tests/mixcase.hex \ - modules/arch/x86/tests/movbe.asm \ - modules/arch/x86/tests/movbe.hex \ - modules/arch/x86/tests/movdq32.asm \ - modules/arch/x86/tests/movdq32.hex \ - modules/arch/x86/tests/movdq64.asm \ - modules/arch/x86/tests/movdq64.hex \ - modules/arch/x86/tests/negequ.asm \ - modules/arch/x86/tests/negequ.hex \ - modules/arch/x86/tests/nomem64-err.asm \ - modules/arch/x86/tests/nomem64-err.errwarn \ - modules/arch/x86/tests/nomem64-err2.asm \ - modules/arch/x86/tests/nomem64-err2.errwarn \ - modules/arch/x86/tests/nomem64.asm \ - modules/arch/x86/tests/nomem64.errwarn \ - modules/arch/x86/tests/nomem64.hex \ - modules/arch/x86/tests/o64.asm modules/arch/x86/tests/o64.hex \ - modules/arch/x86/tests/o64loop.asm \ - modules/arch/x86/tests/o64loop.errwarn \ - modules/arch/x86/tests/o64loop.hex \ - modules/arch/x86/tests/opersize.asm \ - modules/arch/x86/tests/opersize.hex \ - modules/arch/x86/tests/opsize-err.asm \ - modules/arch/x86/tests/opsize-err.errwarn \ - modules/arch/x86/tests/overflow.asm \ - modules/arch/x86/tests/overflow.errwarn \ - modules/arch/x86/tests/overflow.hex \ - modules/arch/x86/tests/padlock.asm \ - modules/arch/x86/tests/padlock.hex \ - modules/arch/x86/tests/pshift.asm \ - modules/arch/x86/tests/pshift.hex \ - modules/arch/x86/tests/push64.asm \ - modules/arch/x86/tests/push64.errwarn \ - modules/arch/x86/tests/push64.hex \ - modules/arch/x86/tests/pushf.asm \ - modules/arch/x86/tests/pushf.hex \ - modules/arch/x86/tests/pushf-err.asm \ - modules/arch/x86/tests/pushf-err.errwarn \ - modules/arch/x86/tests/pushnosize.asm \ - modules/arch/x86/tests/pushnosize.errwarn \ - modules/arch/x86/tests/pushnosize.hex \ - modules/arch/x86/tests/rep.asm modules/arch/x86/tests/rep.hex \ - modules/arch/x86/tests/ret.asm modules/arch/x86/tests/ret.hex \ - modules/arch/x86/tests/riprel1.asm \ - modules/arch/x86/tests/riprel1.hex \ - modules/arch/x86/tests/riprel2.asm \ - modules/arch/x86/tests/riprel2.errwarn \ - modules/arch/x86/tests/riprel2.hex \ - modules/arch/x86/tests/ripseg.asm \ - modules/arch/x86/tests/ripseg.errwarn \ - modules/arch/x86/tests/ripseg.hex \ - modules/arch/x86/tests/segmov.asm \ - modules/arch/x86/tests/segmov.hex \ - modules/arch/x86/tests/segoff.asm \ - modules/arch/x86/tests/segoff.hex \ - modules/arch/x86/tests/segoff-err.asm \ - modules/arch/x86/tests/segoff-err.errwarn \ - modules/arch/x86/tests/shift.asm \ - modules/arch/x86/tests/shift.hex \ - modules/arch/x86/tests/simd-1.asm \ - modules/arch/x86/tests/simd-1.hex \ - modules/arch/x86/tests/simd-2.asm \ - modules/arch/x86/tests/simd-2.hex \ - modules/arch/x86/tests/simd64-1.asm \ - modules/arch/x86/tests/simd64-1.hex \ - modules/arch/x86/tests/simd64-2.asm \ - modules/arch/x86/tests/simd64-2.hex \ - modules/arch/x86/tests/sse-prefix.asm \ - modules/arch/x86/tests/sse-prefix.hex \ - modules/arch/x86/tests/sse3.asm \ - modules/arch/x86/tests/sse3.hex \ - modules/arch/x86/tests/sse4.asm \ - modules/arch/x86/tests/sse4.hex \ - modules/arch/x86/tests/sse4-err.asm \ - modules/arch/x86/tests/sse4-err.errwarn \ - modules/arch/x86/tests/sse5-all.asm \ - modules/arch/x86/tests/sse5-all.hex \ - modules/arch/x86/tests/sse5-basic.asm \ - modules/arch/x86/tests/sse5-basic.hex \ - modules/arch/x86/tests/sse5-cc.asm \ - modules/arch/x86/tests/sse5-cc.hex \ - modules/arch/x86/tests/sse5-err.asm \ - modules/arch/x86/tests/sse5-err.errwarn \ - modules/arch/x86/tests/ssewidth.asm \ - modules/arch/x86/tests/ssewidth.hex \ - modules/arch/x86/tests/ssse3.asm \ - modules/arch/x86/tests/ssse3.c \ - modules/arch/x86/tests/ssse3.hex \ - modules/arch/x86/tests/stos.asm \ - modules/arch/x86/tests/stos.hex modules/arch/x86/tests/str.asm \ - modules/arch/x86/tests/str.hex \ - modules/arch/x86/tests/strict.asm \ - modules/arch/x86/tests/strict.errwarn \ - modules/arch/x86/tests/strict.hex \ - modules/arch/x86/tests/strict-err.asm \ - modules/arch/x86/tests/strict-err.errwarn \ - modules/arch/x86/tests/stringseg.asm \ - modules/arch/x86/tests/stringseg.errwarn \ - modules/arch/x86/tests/stringseg.hex \ - modules/arch/x86/tests/svm.asm modules/arch/x86/tests/svm.hex \ - modules/arch/x86/tests/twobytemem.asm \ - modules/arch/x86/tests/twobytemem.errwarn \ - modules/arch/x86/tests/twobytemem.hex \ - modules/arch/x86/tests/vmx.asm modules/arch/x86/tests/vmx.hex \ - modules/arch/x86/tests/vmx-err.asm \ - modules/arch/x86/tests/vmx-err.errwarn \ - modules/arch/x86/tests/x86label.asm \ - modules/arch/x86/tests/x86label.hex \ - modules/arch/x86/tests/xchg64.asm \ - modules/arch/x86/tests/xchg64.hex \ - modules/arch/x86/tests/xmm64.asm \ - modules/arch/x86/tests/xmm64.hex \ - modules/arch/x86/tests/xsave.asm \ - modules/arch/x86/tests/xsave.hex \ - modules/arch/x86/tests/gas32/Makefile.inc \ - modules/arch/x86/tests/gas64/Makefile.inc \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas32/align32.asm \ - modules/arch/x86/tests/gas32/align32.hex \ - modules/arch/x86/tests/gas32/gas-farithr.asm \ - modules/arch/x86/tests/gas32/gas-farithr.hex \ - modules/arch/x86/tests/gas32/gas-fpmem.asm \ - modules/arch/x86/tests/gas32/gas-fpmem.hex \ - modules/arch/x86/tests/gas32/gas-movdq32.asm \ - modules/arch/x86/tests/gas32/gas-movdq32.hex \ - modules/arch/x86/tests/gas32/gas-movsd.asm \ - modules/arch/x86/tests/gas32/gas-movsd.hex \ - modules/arch/x86/tests/gas32/gas32-jmpcall.asm \ - modules/arch/x86/tests/gas32/gas32-jmpcall.hex \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/x86/tests/gas64/align64.asm \ - modules/arch/x86/tests/gas64/align64.hex \ - modules/arch/x86/tests/gas64/gas-cbw.asm \ - modules/arch/x86/tests/gas64/gas-cbw.hex \ - modules/arch/x86/tests/gas64/gas-fp.asm \ - modules/arch/x86/tests/gas64/gas-fp.hex \ - modules/arch/x86/tests/gas64/gas-inout.asm \ - modules/arch/x86/tests/gas64/gas-inout.hex \ - modules/arch/x86/tests/gas64/gas-moreinsn.asm \ - modules/arch/x86/tests/gas64/gas-moreinsn.hex \ - modules/arch/x86/tests/gas64/gas-movabs.asm \ - modules/arch/x86/tests/gas64/gas-movabs.hex \ - modules/arch/x86/tests/gas64/gas-movdq64.asm \ - modules/arch/x86/tests/gas64/gas-movdq64.hex \ - modules/arch/x86/tests/gas64/gas-movsxs.asm \ - modules/arch/x86/tests/gas64/gas-movsxs.hex \ - modules/arch/x86/tests/gas64/gas-muldiv.asm \ - modules/arch/x86/tests/gas64/gas-muldiv.hex \ - modules/arch/x86/tests/gas64/gas-prefix.asm \ - modules/arch/x86/tests/gas64/gas-prefix.errwarn \ - modules/arch/x86/tests/gas64/gas-prefix.hex \ - modules/arch/x86/tests/gas64/gas-retenter.asm \ - modules/arch/x86/tests/gas64/gas-retenter.hex \ - modules/arch/x86/tests/gas64/gas-shift.asm \ - modules/arch/x86/tests/gas64/gas-shift.hex \ - modules/arch/x86/tests/gas64/gas64-jmpcall.asm \ - modules/arch/x86/tests/gas64/gas64-jmpcall.hex \ - modules/arch/x86/tests/gas64/riprel.asm \ - modules/arch/x86/tests/gas64/riprel.hex \ - modules/arch/lc3b/tests/Makefile.inc \ - modules/arch/lc3b/lc3bid.re \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/arch/lc3b/tests/lc3b-basic.asm \ - modules/arch/lc3b/tests/lc3b-basic.errwarn \ - modules/arch/lc3b/tests/lc3b-basic.hex \ - modules/arch/lc3b/tests/lc3b-br.asm \ - modules/arch/lc3b/tests/lc3b-br.hex \ - modules/arch/lc3b/tests/lc3b-ea-err.asm \ - modules/arch/lc3b/tests/lc3b-ea-err.errwarn \ - modules/arch/lc3b/tests/lc3b-mp22NC.asm \ - modules/arch/lc3b/tests/lc3b-mp22NC.hex \ - modules/arch/yasm_arch.xml modules/listfmts/nasm/Makefile.inc \ - modules/parsers/gas/Makefile.inc \ - modules/parsers/nasm/Makefile.inc \ - modules/parsers/gas/tests/Makefile.inc \ - modules/parsers/gas/gas-token.re \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/dataref-imm.asm \ - modules/parsers/gas/tests/dataref-imm.hex \ - modules/parsers/gas/tests/datavis.asm \ - modules/parsers/gas/tests/datavis.errwarn \ - modules/parsers/gas/tests/datavis.hex \ - modules/parsers/gas/tests/datavis2.asm \ - modules/parsers/gas/tests/datavis2.hex \ - modules/parsers/gas/tests/execsect.asm \ - modules/parsers/gas/tests/execsect.hex \ - modules/parsers/gas/tests/gas-fill.asm \ - modules/parsers/gas/tests/gas-fill.hex \ - modules/parsers/gas/tests/gas-float.asm \ - modules/parsers/gas/tests/gas-float.hex \ - modules/parsers/gas/tests/gas-instlabel.asm \ - modules/parsers/gas/tests/gas-instlabel.hex \ - modules/parsers/gas/tests/gas-line-err.asm \ - modules/parsers/gas/tests/gas-line-err.errwarn \ - modules/parsers/gas/tests/gas-line2-err.asm \ - modules/parsers/gas/tests/gas-line2-err.errwarn \ - modules/parsers/gas/tests/gas-push.asm \ - modules/parsers/gas/tests/gas-push.hex \ - modules/parsers/gas/tests/gas-segprefix.asm \ - modules/parsers/gas/tests/gas-segprefix.hex \ - modules/parsers/gas/tests/gas-semi.asm \ - modules/parsers/gas/tests/gas-semi.hex \ - modules/parsers/gas/tests/gassectalign.asm \ - modules/parsers/gas/tests/gassectalign.hex \ - modules/parsers/gas/tests/jmpcall.asm \ - modules/parsers/gas/tests/jmpcall.errwarn \ - modules/parsers/gas/tests/jmpcall.hex \ - modules/parsers/gas/tests/leb128.asm \ - modules/parsers/gas/tests/leb128.hex \ - modules/parsers/gas/tests/localcomm.asm \ - modules/parsers/gas/tests/localcomm.hex \ - modules/parsers/gas/tests/reggroup-err.asm \ - modules/parsers/gas/tests/reggroup-err.errwarn \ - modules/parsers/gas/tests/reggroup.asm \ - modules/parsers/gas/tests/reggroup.hex \ - modules/parsers/gas/tests/strzero.asm \ - modules/parsers/gas/tests/strzero.hex \ - modules/parsers/gas/tests/varinsn.asm \ - modules/parsers/gas/tests/varinsn.hex \ - modules/parsers/gas/tests/bin/Makefile.inc \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/gas/tests/bin/gas-comment.asm \ - modules/parsers/gas/tests/bin/gas-comment.errwarn \ - modules/parsers/gas/tests/bin/gas-comment.hex \ - modules/parsers/gas/tests/bin/gas-llabel.asm \ - modules/parsers/gas/tests/bin/gas-llabel.hex \ - modules/parsers/gas/tests/bin/gas-set.asm \ - modules/parsers/gas/tests/bin/gas-set.hex \ - modules/parsers/gas/tests/bin/rept-err.asm \ - modules/parsers/gas/tests/bin/rept-err.errwarn \ - modules/parsers/gas/tests/bin/reptempty.asm \ - modules/parsers/gas/tests/bin/reptempty.hex \ - modules/parsers/gas/tests/bin/reptlong.asm \ - modules/parsers/gas/tests/bin/reptlong.hex \ - modules/parsers/gas/tests/bin/reptnested-err.asm \ - modules/parsers/gas/tests/bin/reptnested-err.errwarn \ - modules/parsers/gas/tests/bin/reptsimple.asm \ - modules/parsers/gas/tests/bin/reptsimple.hex \ - modules/parsers/gas/tests/bin/reptwarn.asm \ - modules/parsers/gas/tests/bin/reptwarn.errwarn \ - modules/parsers/gas/tests/bin/reptwarn.hex \ - modules/parsers/gas/tests/bin/reptzero.asm \ - modules/parsers/gas/tests/bin/reptzero.hex \ - modules/parsers/nasm/nasm-token.re \ - modules/parsers/nasm/nasm-std.mac \ - modules/parsers/nasm/tests/Makefile.inc \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/alignnop16.asm \ - modules/parsers/nasm/tests/alignnop16.hex \ - modules/parsers/nasm/tests/alignnop32.asm \ - modules/parsers/nasm/tests/alignnop32.hex \ - modules/parsers/nasm/tests/charconstmath.asm \ - modules/parsers/nasm/tests/charconstmath.hex \ - modules/parsers/nasm/tests/dy.asm \ - modules/parsers/nasm/tests/dy.hex \ - modules/parsers/nasm/tests/endcomma.asm \ - modules/parsers/nasm/tests/endcomma.hex \ - modules/parsers/nasm/tests/equcolon.asm \ - modules/parsers/nasm/tests/equcolon.hex \ - modules/parsers/nasm/tests/equlocal.asm \ - modules/parsers/nasm/tests/equlocal.hex \ - modules/parsers/nasm/tests/hexconst.asm \ - modules/parsers/nasm/tests/hexconst.hex \ - modules/parsers/nasm/tests/long.asm \ - modules/parsers/nasm/tests/long.hex \ - modules/parsers/nasm/tests/locallabel.asm \ - modules/parsers/nasm/tests/locallabel.hex \ - modules/parsers/nasm/tests/locallabel2.asm \ - modules/parsers/nasm/tests/locallabel2.hex \ - modules/parsers/nasm/tests/nasm-prefix.asm \ - modules/parsers/nasm/tests/nasm-prefix.hex \ - modules/parsers/nasm/tests/newsect.asm \ - modules/parsers/nasm/tests/newsect.hex \ - modules/parsers/nasm/tests/orphannowarn.asm \ - modules/parsers/nasm/tests/orphannowarn.hex \ - modules/parsers/nasm/tests/prevlocalwarn.asm \ - modules/parsers/nasm/tests/prevlocalwarn.errwarn \ - modules/parsers/nasm/tests/prevlocalwarn.hex \ - modules/parsers/nasm/tests/strucalign.asm \ - modules/parsers/nasm/tests/strucalign.hex \ - modules/parsers/nasm/tests/struczero.asm \ - modules/parsers/nasm/tests/struczero.hex \ - modules/parsers/nasm/tests/syntax-err.asm \ - modules/parsers/nasm/tests/syntax-err.errwarn \ - modules/parsers/nasm/tests/uscore.asm \ - modules/parsers/nasm/tests/uscore.hex \ - modules/parsers/nasm/tests/worphan/Makefile.inc \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/nasm/tests/worphan/orphanwarn.asm \ - modules/parsers/nasm/tests/worphan/orphanwarn.errwarn \ - modules/parsers/nasm/tests/worphan/orphanwarn.hex \ - modules/parsers/tasm/tests/Makefile.inc \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/array.asm \ - modules/parsers/tasm/tests/array.hex \ - modules/parsers/tasm/tests/case.asm \ - modules/parsers/tasm/tests/case.hex \ - modules/parsers/tasm/tests/charstr.asm \ - modules/parsers/tasm/tests/charstr.hex \ - modules/parsers/tasm/tests/dup.asm \ - modules/parsers/tasm/tests/dup.hex \ - modules/parsers/tasm/tests/equal.asm \ - modules/parsers/tasm/tests/equal.hex \ - modules/parsers/tasm/tests/expr.asm \ - modules/parsers/tasm/tests/expr.hex \ - modules/parsers/tasm/tests/irp.asm \ - modules/parsers/tasm/tests/irp.hex \ - modules/parsers/tasm/tests/label.asm \ - modules/parsers/tasm/tests/label.hex \ - modules/parsers/tasm/tests/les.asm \ - modules/parsers/tasm/tests/les.hex \ - modules/parsers/tasm/tests/lidt.asm \ - modules/parsers/tasm/tests/lidt.hex \ - modules/parsers/tasm/tests/macro.asm \ - modules/parsers/tasm/tests/macro.hex \ - modules/parsers/tasm/tests/offset.asm \ - modules/parsers/tasm/tests/offset.hex \ - modules/parsers/tasm/tests/quote.asm \ - modules/parsers/tasm/tests/quote.hex \ - modules/parsers/tasm/tests/res.asm \ - modules/parsers/tasm/tests/res.errwarn \ - modules/parsers/tasm/tests/res.hex \ - modules/parsers/tasm/tests/segment.asm \ - modules/parsers/tasm/tests/segment.hex \ - modules/parsers/tasm/tests/size.asm \ - modules/parsers/tasm/tests/size.hex \ - modules/parsers/tasm/tests/struc.asm \ - modules/parsers/tasm/tests/struc.errwarn \ - modules/parsers/tasm/tests/struc.hex \ - modules/parsers/tasm/tests/exe/Makefile.inc \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/parsers/tasm/tests/exe/exe.asm \ - modules/parsers/tasm/tests/exe/exe.hex \ - modules/parsers/yasm_parsers.xml \ - modules/preprocs/nasm/Makefile.inc \ - modules/preprocs/raw/Makefile.inc \ - modules/preprocs/cpp/Makefile.inc \ - modules/preprocs/nasm/genversion.c \ - modules/preprocs/nasm/tests/Makefile.inc \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/nasm/tests/16args.asm \ - modules/preprocs/nasm/tests/16args.hex \ - modules/preprocs/nasm/tests/ifcritical-err.asm \ - modules/preprocs/nasm/tests/ifcritical-err.errwarn \ - modules/preprocs/nasm/tests/longline.asm \ - modules/preprocs/nasm/tests/longline.hex \ - modules/preprocs/nasm/tests/macroeof-err.asm \ - modules/preprocs/nasm/tests/macroeof-err.errwarn \ - modules/preprocs/nasm/tests/noinclude-err.asm \ - modules/preprocs/nasm/tests/noinclude-err.errwarn \ - modules/preprocs/nasm/tests/nasmpp-bigint.asm \ - modules/preprocs/nasm/tests/nasmpp-bigint.hex \ - modules/preprocs/nasm/tests/nasmpp-decimal.asm \ - modules/preprocs/nasm/tests/nasmpp-decimal.hex \ - modules/preprocs/nasm/tests/nasmpp-nested.asm \ - modules/preprocs/nasm/tests/nasmpp-nested.errwarn \ - modules/preprocs/nasm/tests/nasmpp-nested.hex \ - modules/preprocs/nasm/tests/orgsect.asm \ - modules/preprocs/nasm/tests/orgsect.hex \ - modules/preprocs/raw/tests/Makefile.inc \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/preprocs/raw/tests/longline.asm \ - modules/preprocs/raw/tests/longline.hex \ - modules/dbgfmts/codeview/Makefile.inc \ - modules/dbgfmts/dwarf2/Makefile.inc \ - modules/dbgfmts/null/Makefile.inc \ - modules/dbgfmts/stabs/Makefile.inc \ - modules/dbgfmts/codeview/cv8.txt \ - modules/dbgfmts/dwarf2/tests/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.errwarn \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.hex \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.asm \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.errwarn \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.hex \ - modules/dbgfmts/stabs/tests/Makefile.inc \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/dbgfmts/stabs/tests/stabs-elf.asm \ - modules/dbgfmts/stabs/tests/stabs-elf.hex \ - modules/dbgfmts/yasm_dbgfmts.xml \ - modules/objfmts/dbg/Makefile.inc \ - modules/objfmts/bin/Makefile.inc \ - modules/objfmts/elf/Makefile.inc \ - modules/objfmts/coff/Makefile.inc \ - modules/objfmts/macho/Makefile.inc \ - modules/objfmts/rdf/Makefile.inc \ - modules/objfmts/win32/Makefile.inc \ - modules/objfmts/win64/Makefile.inc \ - modules/objfmts/xdf/Makefile.inc \ - modules/objfmts/bin/tests/Makefile.inc \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/abs.asm \ - modules/objfmts/bin/tests/abs.hex \ - modules/objfmts/bin/tests/bigorg.asm \ - modules/objfmts/bin/tests/bigorg.hex \ - modules/objfmts/bin/tests/bigorg.errwarn \ - modules/objfmts/bin/tests/bin-farabs.asm \ - modules/objfmts/bin/tests/bin-farabs.hex \ - modules/objfmts/bin/tests/bin-rip.asm \ - modules/objfmts/bin/tests/bin-rip.hex \ - modules/objfmts/bin/tests/bintest.asm \ - modules/objfmts/bin/tests/bintest.hex \ - modules/objfmts/bin/tests/float-err.asm \ - modules/objfmts/bin/tests/float-err.errwarn \ - modules/objfmts/bin/tests/float.asm \ - modules/objfmts/bin/tests/float.hex \ - modules/objfmts/bin/tests/integer-warn.asm \ - modules/objfmts/bin/tests/integer-warn.hex \ - modules/objfmts/bin/tests/integer-warn.errwarn \ - modules/objfmts/bin/tests/integer.asm \ - modules/objfmts/bin/tests/integer.hex \ - modules/objfmts/bin/tests/levelop.asm \ - modules/objfmts/bin/tests/levelop.hex \ - modules/objfmts/bin/tests/reserve.asm \ - modules/objfmts/bin/tests/reserve.hex \ - modules/objfmts/bin/tests/reserve.errwarn \ - modules/objfmts/bin/tests/shr.asm \ - modules/objfmts/bin/tests/shr.hex \ - modules/objfmts/bin/tests/multisect/Makefile.inc \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/bin/tests/multisect/bin-align.asm \ - modules/objfmts/bin/tests/multisect/bin-align.errwarn \ - modules/objfmts/bin/tests/multisect/bin-align.hex \ - modules/objfmts/bin/tests/multisect/bin-align.map \ - modules/objfmts/bin/tests/multisect/bin-ssym.asm \ - modules/objfmts/bin/tests/multisect/bin-ssym.hex \ - modules/objfmts/bin/tests/multisect/bin-ssym.map \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.asm \ - modules/objfmts/bin/tests/multisect/initbss.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.hex \ - modules/objfmts/bin/tests/multisect/initbss.map \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.asm \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.hex \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.map \ - modules/objfmts/bin/tests/multisect/multisect1.asm \ - modules/objfmts/bin/tests/multisect/multisect1.hex \ - modules/objfmts/bin/tests/multisect/multisect1.map \ - modules/objfmts/bin/tests/multisect/multisect2.asm \ - modules/objfmts/bin/tests/multisect/multisect2.hex \ - modules/objfmts/bin/tests/multisect/multisect2.map \ - modules/objfmts/bin/tests/multisect/multisect3.asm \ - modules/objfmts/bin/tests/multisect/multisect3.hex \ - modules/objfmts/bin/tests/multisect/multisect3.map \ - modules/objfmts/bin/tests/multisect/multisect4.asm \ - modules/objfmts/bin/tests/multisect/multisect4.hex \ - modules/objfmts/bin/tests/multisect/multisect4.map \ - modules/objfmts/bin/tests/multisect/multisect5.asm \ - modules/objfmts/bin/tests/multisect/multisect5.hex \ - modules/objfmts/bin/tests/multisect/multisect5.map \ - modules/objfmts/bin/tests/multisect/nomultisect1.asm \ - modules/objfmts/bin/tests/multisect/nomultisect1.hex \ - modules/objfmts/bin/tests/multisect/nomultisect1.map \ - modules/objfmts/bin/tests/multisect/nomultisect2.asm \ - modules/objfmts/bin/tests/multisect/nomultisect2.hex \ - modules/objfmts/bin/tests/multisect/nomultisect2.map \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.errwarn \ - modules/objfmts/elf/tests/Makefile.inc \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/curpos.asm \ - modules/objfmts/elf/tests/curpos.hex \ - modules/objfmts/elf/tests/curpos-err.asm \ - modules/objfmts/elf/tests/curpos-err.errwarn \ - modules/objfmts/elf/tests/elf-overdef.asm \ - modules/objfmts/elf/tests/elf-overdef.hex \ - modules/objfmts/elf/tests/elf-x86id.asm \ - modules/objfmts/elf/tests/elf-x86id.hex \ - modules/objfmts/elf/tests/elfabssect.asm \ - modules/objfmts/elf/tests/elfabssect.hex \ - modules/objfmts/elf/tests/elfcond.asm \ - modules/objfmts/elf/tests/elfcond.hex \ - modules/objfmts/elf/tests/elfequabs.asm \ - modules/objfmts/elf/tests/elfequabs.hex \ - modules/objfmts/elf/tests/elfglobal.asm \ - modules/objfmts/elf/tests/elfglobal.hex \ - modules/objfmts/elf/tests/elfglobext.asm \ - modules/objfmts/elf/tests/elfglobext.hex \ - modules/objfmts/elf/tests/elfglobext2.asm \ - modules/objfmts/elf/tests/elfglobext2.hex \ - modules/objfmts/elf/tests/elfmanysym.asm \ - modules/objfmts/elf/tests/elfmanysym.hex \ - modules/objfmts/elf/tests/elfreloc.asm \ - modules/objfmts/elf/tests/elfreloc.hex \ - modules/objfmts/elf/tests/elfreloc-ext.asm \ - modules/objfmts/elf/tests/elfreloc-ext.hex \ - modules/objfmts/elf/tests/elfsectalign.asm \ - modules/objfmts/elf/tests/elfsectalign.hex \ - modules/objfmts/elf/tests/elfso.asm \ - modules/objfmts/elf/tests/elfso.hex \ - modules/objfmts/elf/tests/elftest.c \ - modules/objfmts/elf/tests/elftest.asm \ - modules/objfmts/elf/tests/elftest.hex \ - modules/objfmts/elf/tests/elftimes.asm \ - modules/objfmts/elf/tests/elftimes.hex \ - modules/objfmts/elf/tests/elftypesize.asm \ - modules/objfmts/elf/tests/elftypesize.hex \ - modules/objfmts/elf/tests/elfvisibility.asm \ - modules/objfmts/elf/tests/elfvisibility.errwarn \ - modules/objfmts/elf/tests/elfvisibility.hex \ - modules/objfmts/elf/tests/nasm-sectname.asm \ - modules/objfmts/elf/tests/nasm-sectname.hex \ - modules/objfmts/elf/tests/nasm-forceident.asm \ - modules/objfmts/elf/tests/nasm-forceident.hex \ - modules/objfmts/elf/tests/amd64/Makefile.inc \ - modules/objfmts/elf/tests/gas32/Makefile.inc \ - modules/objfmts/elf/tests/gas64/Makefile.inc \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/amd64/elf-rip.asm \ - modules/objfmts/elf/tests/amd64/elf-rip.hex \ - modules/objfmts/elf/tests/amd64/elfso64.asm \ - modules/objfmts/elf/tests/amd64/elfso64.hex \ - modules/objfmts/elf/tests/amd64/gotpcrel.asm \ - modules/objfmts/elf/tests/amd64/gotpcrel.hex \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.asm \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/elf/tests/gas64/crosssect.asm \ - modules/objfmts/elf/tests/gas64/crosssect.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.hex \ - modules/objfmts/coff/win64-nasm.mac \ - modules/objfmts/coff/win64-gas.mac \ - modules/objfmts/coff/tests/Makefile.inc \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/coff/tests/cofftest.c \ - modules/objfmts/coff/tests/cofftest.asm \ - modules/objfmts/coff/tests/cofftest.hex \ - modules/objfmts/coff/tests/cofftimes.asm \ - modules/objfmts/coff/tests/cofftimes.hex \ - modules/objfmts/coff/tests/x86id.asm \ - modules/objfmts/coff/tests/x86id.hex \ - modules/objfmts/coff/tests/x86id.errwarn \ - modules/objfmts/macho/tests/Makefile.inc \ - modules/objfmts/macho/tests/gas32/Makefile.inc \ - modules/objfmts/macho/tests/gas64/Makefile.inc \ - modules/objfmts/macho/tests/nasm32/Makefile.inc \ - modules/objfmts/macho/tests/nasm64/Makefile.inc \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas32/gas-macho32.asm \ - modules/objfmts/macho/tests/gas32/gas-macho32.hex \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/gas64/gas-macho64.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64.hex \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm32/machotest.c \ - modules/objfmts/macho/tests/nasm32/machotest.asm \ - modules/objfmts/macho/tests/nasm32/machotest.hex \ - modules/objfmts/macho/tests/nasm32/macho-reloc.asm \ - modules/objfmts/macho/tests/nasm32/macho-reloc.hex \ - modules/objfmts/macho/tests/nasm32/macho32-sect.asm \ - modules/objfmts/macho/tests/nasm32/macho32-sect.errwarn \ - modules/objfmts/macho/tests/nasm32/macho32-sect.hex \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.asm \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/macho/tests/nasm64/machotest64.c \ - modules/objfmts/macho/tests/nasm64/machotest64.asm \ - modules/objfmts/macho/tests/nasm64/machotest64.hex \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.asm \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.errwarn \ - modules/objfmts/rdf/tests/Makefile.inc \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/rdf/tests/rdfabs.asm \ - modules/objfmts/rdf/tests/rdfabs.errwarn \ - modules/objfmts/rdf/tests/rdfabs.hex \ - modules/objfmts/rdf/tests/rdfext.asm \ - modules/objfmts/rdf/tests/rdfext.hex \ - modules/objfmts/rdf/tests/rdfseg.asm \ - modules/objfmts/rdf/tests/rdfseg.hex \ - modules/objfmts/rdf/tests/rdfseg2.asm \ - modules/objfmts/rdf/tests/rdfseg2.hex \ - modules/objfmts/rdf/tests/rdftest1.asm \ - modules/objfmts/rdf/tests/rdftest1.hex \ - modules/objfmts/rdf/tests/rdftest2.asm \ - modules/objfmts/rdf/tests/rdftest2.hex \ - modules/objfmts/rdf/tests/rdtlib.asm \ - modules/objfmts/rdf/tests/rdtlib.hex \ - modules/objfmts/rdf/tests/rdtmain.asm \ - modules/objfmts/rdf/tests/rdtmain.hex \ - modules/objfmts/rdf/tests/testlib.asm \ - modules/objfmts/rdf/tests/testlib.hex \ - modules/objfmts/win32/tests/Makefile.inc \ - modules/objfmts/win32/tests/export.asm \ - modules/objfmts/win32/tests/export.hex \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/win32-curpos.asm \ - modules/objfmts/win32/tests/win32-curpos.hex \ - modules/objfmts/win32/tests/win32-overdef.asm \ - modules/objfmts/win32/tests/win32-overdef.hex \ - modules/objfmts/win32/tests/win32-safeseh.asm \ - modules/objfmts/win32/tests/win32-safeseh.hex \ - modules/objfmts/win32/tests/win32-safeseh.masm \ - modules/objfmts/win32/tests/win32-segof.asm \ - modules/objfmts/win32/tests/win32-segof.hex \ - modules/objfmts/win32/tests/win32test.c \ - modules/objfmts/win32/tests/win32test.asm \ - modules/objfmts/win32/tests/win32test.hex \ - modules/objfmts/win32/tests/gas/Makefile.inc \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win32/tests/gas/win32at.asm \ - modules/objfmts/win32/tests/gas/win32at.hex \ - modules/objfmts/win64/tests/Makefile.inc \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/sce1.asm \ - modules/objfmts/win64/tests/sce1.hex \ - modules/objfmts/win64/tests/sce1-err.asm \ - modules/objfmts/win64/tests/sce1-err.errwarn \ - modules/objfmts/win64/tests/sce2.asm \ - modules/objfmts/win64/tests/sce2.hex \ - modules/objfmts/win64/tests/sce2-err.asm \ - modules/objfmts/win64/tests/sce2-err.errwarn \ - modules/objfmts/win64/tests/sce3.asm \ - modules/objfmts/win64/tests/sce3.hex \ - modules/objfmts/win64/tests/sce3.masm \ - modules/objfmts/win64/tests/sce4.asm \ - modules/objfmts/win64/tests/sce4.hex \ - modules/objfmts/win64/tests/sce4.masm \ - modules/objfmts/win64/tests/sce4-err.asm \ - modules/objfmts/win64/tests/sce4-err.errwarn \ - modules/objfmts/win64/tests/win64-abs.asm \ - modules/objfmts/win64/tests/win64-abs.hex \ - modules/objfmts/win64/tests/win64-curpos.asm \ - modules/objfmts/win64/tests/win64-curpos.hex \ - modules/objfmts/win64/tests/win64-dataref.asm \ - modules/objfmts/win64/tests/win64-dataref.hex \ - modules/objfmts/win64/tests/win64-dataref.masm \ - modules/objfmts/win64/tests/win64-dataref2.asm \ - modules/objfmts/win64/tests/win64-dataref2.hex \ - modules/objfmts/win64/tests/win64-dataref2.masm \ - modules/objfmts/win64/tests/gas/Makefile.inc \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/win64/tests/gas/win64-gas-sce.asm \ - modules/objfmts/win64/tests/gas/win64-gas-sce.hex \ - modules/objfmts/xdf/tests/Makefile.inc \ - modules/objfmts/xdf/tests/xdf_test.sh \ - modules/objfmts/xdf/tests/xdf-overdef.asm \ - modules/objfmts/xdf/tests/xdf-overdef.hex \ - modules/objfmts/xdf/tests/xdflong.asm \ - modules/objfmts/xdf/tests/xdflong.hex \ - modules/objfmts/xdf/tests/xdflong.errwarn \ - modules/objfmts/xdf/tests/xdfother.asm \ - modules/objfmts/xdf/tests/xdfother.hex \ - modules/objfmts/xdf/tests/xdfprotect.asm \ - modules/objfmts/xdf/tests/xdfprotect.hex \ - modules/objfmts/xdf/tests/xdfsect.asm \ - modules/objfmts/xdf/tests/xdfsect.hex \ - modules/objfmts/xdf/tests/xdfsect-err.asm \ - modules/objfmts/xdf/tests/xdfsect-err.errwarn \ - modules/objfmts/xdf/tests/xdfvirtual.asm \ - modules/objfmts/xdf/tests/xdfvirtual.hex \ - modules/objfmts/yasm_objfmts.xml libyasm/genmodule.c \ - libyasm/module.in libyasm/tests/Makefile.inc \ - libyasm/tests/libyasm_test.sh libyasm/tests/1shl0.asm \ - libyasm/tests/1shl0.hex libyasm/tests/absloop-err.asm \ - libyasm/tests/absloop-err.errwarn \ - libyasm/tests/charconst64.asm libyasm/tests/charconst64.hex \ - libyasm/tests/data-rawvalue.asm \ - libyasm/tests/data-rawvalue.hex libyasm/tests/duplabel-err.asm \ - libyasm/tests/duplabel-err.errwarn libyasm/tests/emptydata.asm \ - libyasm/tests/emptydata.hex libyasm/tests/equ-expand.asm \ - libyasm/tests/equ-expand.hex libyasm/tests/expr-fold-level.asm \ - libyasm/tests/expr-fold-level.hex \ - libyasm/tests/expr-wide-ident.asm \ - libyasm/tests/expr-wide-ident.hex libyasm/tests/externdef.asm \ - libyasm/tests/externdef.errwarn libyasm/tests/externdef.hex \ - libyasm/tests/incbin.asm libyasm/tests/incbin.hex \ - libyasm/tests/jmpsize1.asm libyasm/tests/jmpsize1.hex \ - libyasm/tests/jmpsize1-err.asm \ - libyasm/tests/jmpsize1-err.errwarn \ - libyasm/tests/opt-align1.asm libyasm/tests/opt-align1.hex \ - libyasm/tests/opt-align2.asm libyasm/tests/opt-align2.hex \ - libyasm/tests/opt-align3.asm libyasm/tests/opt-align3.hex \ - libyasm/tests/opt-circular1-err.asm \ - libyasm/tests/opt-circular1-err.errwarn \ - libyasm/tests/opt-circular2-err.asm \ - libyasm/tests/opt-circular2-err.errwarn \ - libyasm/tests/opt-circular3-err.asm \ - libyasm/tests/opt-circular3-err.errwarn \ - libyasm/tests/opt-gvmat64.asm libyasm/tests/opt-gvmat64.hex \ - libyasm/tests/opt-immexpand.asm \ - libyasm/tests/opt-immexpand.hex \ - libyasm/tests/opt-immnoexpand.asm \ - libyasm/tests/opt-immnoexpand.hex \ - libyasm/tests/opt-oldalign.asm libyasm/tests/opt-oldalign.hex \ - libyasm/tests/opt-struc.asm libyasm/tests/opt-struc.hex \ - libyasm/tests/reserve-err1.asm \ - libyasm/tests/reserve-err1.errwarn \ - libyasm/tests/reserve-err2.asm \ - libyasm/tests/reserve-err2.errwarn libyasm/tests/strucsize.asm \ - libyasm/tests/strucsize.hex libyasm/tests/times0.asm \ - libyasm/tests/times0.hex libyasm/tests/timesover-err.asm \ - libyasm/tests/timesover-err.errwarn \ - libyasm/tests/timesunder.asm libyasm/tests/timesunder.hex \ - libyasm/tests/times-res.asm libyasm/tests/times-res.errwarn \ - libyasm/tests/times-res.hex libyasm/tests/unary.asm \ - libyasm/tests/unary.hex libyasm/tests/value-err.asm \ - libyasm/tests/value-err.errwarn \ - libyasm/tests/value-samesym.asm \ - libyasm/tests/value-samesym.errwarn \ - libyasm/tests/value-samesym.hex libyasm/tests/value-mask.asm \ - libyasm/tests/value-mask.errwarn libyasm/tests/value-mask.hex \ - frontends/yasm/Makefile.inc frontends/tasm/Makefile.inc \ - frontends/yasm/yasm.xml m4/intmax.m4 m4/longdouble.m4 \ - m4/nls.m4 m4/po.m4 m4/printf-posix.m4 m4/signed.m4 \ - m4/size_max.m4 m4/ulonglong.m4 m4/wchar_t.m4 m4/wint_t.m4 \ - m4/xsize.m4 m4/codeset.m4 m4/gettext.m4 m4/glibc21.m4 \ - m4/iconv.m4 m4/intdiv0.m4 m4/inttypes.m4 m4/inttypes_h.m4 \ - m4/inttypes-pri.m4 m4/isc-posix.m4 m4/lcmessage.m4 \ - m4/lib-ld.m4 m4/lib-link.m4 m4/lib-prefix.m4 m4/longlong.m4 \ - m4/progtest.m4 m4/stdint_h.m4 m4/uintmax_t.m4 m4/pythonhead.m4 \ - m4/pyrex.m4 out_test.sh Artistic.txt BSD.txt GNU_GPL-2.0 \ - GNU_LGPL-2.0 splint.sh Mkfiles/Makefile.flat \ - Mkfiles/Makefile.dj Mkfiles/dj/config.h \ - Mkfiles/dj/libyasm-stdint.h \ - Mkfiles/vc9/crt_secure_no_deprecate.vsprops \ - Mkfiles/vc9/yasm.sln Mkfiles/vc9/yasm.vcproj \ - Mkfiles/vc9/ytasm.vcproj Mkfiles/vc9/config.h \ - Mkfiles/vc9/libyasm-stdint.h Mkfiles/vc9/readme.vc9.txt \ - Mkfiles/vc9/yasm.rules Mkfiles/vc9/vc98_swap.py \ - Mkfiles/vc9/genmacro/genmacro.vcproj \ - Mkfiles/vc9/genmacro/run.bat \ - Mkfiles/vc9/genmodule/genmodule.vcproj \ - Mkfiles/vc9/genmodule/run.bat \ - Mkfiles/vc9/genstring/genstring.vcproj \ - Mkfiles/vc9/genstring/run.bat \ - Mkfiles/vc9/genversion/genversion.vcproj \ - Mkfiles/vc9/genversion/run.bat \ - Mkfiles/vc9/libyasm/libyasm.vcproj \ - Mkfiles/vc9/modules/modules.vcproj \ - Mkfiles/vc9/re2c/re2c.vcproj Mkfiles/vc9/re2c/run.bat \ - Mkfiles/vc9/genperf/genperf.vcproj Mkfiles/vc9/genperf/run.bat \ - genstring.c - -# libyasm-stdint.h doesn't clean up after itself? -CONFIG_CLEAN_FILES = libyasm-stdint.h -re2c_SOURCES = -re2c_LDADD = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -re2c_LINK = $(CCLD_FOR_BUILD) -o $@ -genmacro_SOURCES = -genmacro_LDADD = genmacro.$(OBJEXT) -genmacro_LINK = $(CCLD_FOR_BUILD) -o $@ -genperf_SOURCES = -genperf_LDADD = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -genperf_LINK = $(CCLD_FOR_BUILD) -o $@ -PYBINDING_DEPS = tools/python-yasm/bytecode.pxi \ - tools/python-yasm/errwarn.pxi tools/python-yasm/expr.pxi \ - tools/python-yasm/floatnum.pxi tools/python-yasm/intnum.pxi \ - tools/python-yasm/symrec.pxi tools/python-yasm/value.pxi -YASM_MODULES = arch_x86 arch_lc3b listfmt_nasm parser_gas parser_gnu \ - parser_nasm parser_tasm preproc_nasm preproc_tasm preproc_raw \ - preproc_cpp dbgfmt_cv8 dbgfmt_dwarf2 dbgfmt_null dbgfmt_stabs \ - objfmt_dbg objfmt_bin objfmt_dosexe objfmt_elf objfmt_elf32 \ - objfmt_elf64 objfmt_coff objfmt_macho objfmt_macho32 \ - objfmt_macho64 objfmt_rdf objfmt_win32 objfmt_win64 objfmt_x64 \ - objfmt_xdf -lib_LIBRARIES = libyasm.a -libyasm_a_SOURCES = modules/arch/x86/x86arch.c \ - modules/arch/x86/x86arch.h modules/arch/x86/x86bc.c \ - modules/arch/x86/x86expr.c modules/arch/x86/x86id.c \ - modules/arch/lc3b/lc3barch.c modules/arch/lc3b/lc3barch.h \ - modules/arch/lc3b/lc3bbc.c \ - modules/listfmts/nasm/nasm-listfmt.c \ - modules/parsers/gas/gas-parser.c \ - modules/parsers/gas/gas-parser.h \ - modules/parsers/gas/gas-parse.c \ - modules/parsers/nasm/nasm-parser.c \ - modules/parsers/nasm/nasm-parser.h \ - modules/parsers/nasm/nasm-parse.c \ - modules/preprocs/nasm/nasm-preproc.c \ - modules/preprocs/nasm/nasm-pp.h \ - modules/preprocs/nasm/nasm-pp.c modules/preprocs/nasm/nasm.h \ - modules/preprocs/nasm/nasmlib.h \ - modules/preprocs/nasm/nasmlib.c \ - modules/preprocs/nasm/nasm-eval.h \ - modules/preprocs/nasm/nasm-eval.c \ - modules/preprocs/raw/raw-preproc.c \ - modules/preprocs/cpp/cpp-preproc.c \ - modules/dbgfmts/codeview/cv-dbgfmt.h \ - modules/dbgfmts/codeview/cv-dbgfmt.c \ - modules/dbgfmts/codeview/cv-symline.c \ - modules/dbgfmts/codeview/cv-type.c \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.h \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c \ - modules/dbgfmts/dwarf2/dwarf2-line.c \ - modules/dbgfmts/dwarf2/dwarf2-aranges.c \ - modules/dbgfmts/dwarf2/dwarf2-info.c \ - modules/dbgfmts/null/null-dbgfmt.c \ - modules/dbgfmts/stabs/stabs-dbgfmt.c \ - modules/objfmts/dbg/dbg-objfmt.c \ - modules/objfmts/bin/bin-objfmt.c modules/objfmts/elf/elf.c \ - modules/objfmts/elf/elf.h modules/objfmts/elf/elf-objfmt.c \ - modules/objfmts/elf/elf-machine.h \ - modules/objfmts/elf/elf-x86-x86.c \ - modules/objfmts/elf/elf-x86-amd64.c \ - modules/objfmts/coff/coff-objfmt.c \ - modules/objfmts/coff/coff-objfmt.h \ - modules/objfmts/coff/win64-except.c \ - modules/objfmts/macho/macho-objfmt.c \ - modules/objfmts/rdf/rdf-objfmt.c \ - modules/objfmts/xdf/xdf-objfmt.c libyasm/assocdat.c \ - libyasm/bitvect.c libyasm/bc-align.c libyasm/bc-data.c \ - libyasm/bc-incbin.c libyasm/bc-org.c libyasm/bc-reserve.c \ - libyasm/bytecode.c libyasm/errwarn.c libyasm/expr.c \ - libyasm/file.c libyasm/floatnum.c libyasm/hamt.c \ - libyasm/insn.c libyasm/intnum.c libyasm/inttree.c \ - libyasm/linemap.c libyasm/md5.c libyasm/mergesort.c \ - libyasm/phash.c libyasm/section.c libyasm/strcasecmp.c \ - libyasm/strsep.c libyasm/symrec.c libyasm/valparam.c \ - libyasm/value.c libyasm/xmalloc.c libyasm/xstrdup.c -nodist_libyasm_a_SOURCES = x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c module.c -genversion_SOURCES = -genversion_LDADD = genversion.$(OBJEXT) -genversion_LINK = $(CCLD_FOR_BUILD) -o $@ -genmodule_SOURCES = -genmodule_LDADD = genmodule.$(OBJEXT) -genmodule_LINK = $(CCLD_FOR_BUILD) -o $@ -modincludedir = $(includedir)/libyasm -modinclude_HEADERS = libyasm/arch.h libyasm/assocdat.h \ - libyasm/bitvect.h libyasm/bytecode.h libyasm/compat-queue.h \ - libyasm/coretype.h libyasm/dbgfmt.h libyasm/errwarn.h \ - libyasm/expr.h libyasm/file.h libyasm/floatnum.h \ - libyasm/hamt.h libyasm/insn.h libyasm/intnum.h \ - libyasm/inttree.h libyasm/linemap.h libyasm/listfmt.h \ - libyasm/md5.h libyasm/module.h libyasm/objfmt.h \ - libyasm/parser.h libyasm/phash.h libyasm/preproc.h \ - libyasm/section.h libyasm/symrec.h libyasm/valparam.h \ - libyasm/value.h -bitvect_test_SOURCES = libyasm/tests/bitvect_test.c -bitvect_test_LDADD = libyasm.a $(INTLLIBS) -floatnum_test_SOURCES = libyasm/tests/floatnum_test.c -floatnum_test_LDADD = libyasm.a $(INTLLIBS) -leb128_test_SOURCES = libyasm/tests/leb128_test.c -leb128_test_LDADD = libyasm.a $(INTLLIBS) -splitpath_test_SOURCES = libyasm/tests/splitpath_test.c -splitpath_test_LDADD = libyasm.a $(INTLLIBS) -combpath_test_SOURCES = libyasm/tests/combpath_test.c -combpath_test_LDADD = libyasm.a $(INTLLIBS) -uncstring_test_SOURCES = libyasm/tests/uncstring_test.c -uncstring_test_LDADD = libyasm.a $(INTLLIBS) -yasm_SOURCES = frontends/yasm/yasm.c frontends/yasm/yasm-options.c \ - frontends/yasm/yasm-options.h -yasm_LDADD = libyasm.a $(INTLLIBS) -ytasm_SOURCES = frontends/tasm/tasm.c frontends/tasm/tasm-options.c \ - frontends/tasm/tasm-options.h -ytasm_LDADD = libyasm.a $(INTLLIBS) -ACLOCAL_AMFLAGS = -I m4 - -# genstring build -genstring_SOURCES = -genstring_LDADD = genstring.$(OBJEXT) -genstring_LINK = $(CCLD_FOR_BUILD) -o $@ -all: $(BUILT_SOURCES) config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .gperf .c .o .obj -am--refresh: - @: -$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/tools/Makefile.inc $(srcdir)/tools/re2c/Makefile.inc $(srcdir)/tools/genmacro/Makefile.inc $(srcdir)/tools/genperf/Makefile.inc $(srcdir)/tools/python-yasm/Makefile.inc $(srcdir)/tools/python-yasm/tests/Makefile.inc $(srcdir)/modules/Makefile.inc $(srcdir)/modules/arch/Makefile.inc $(srcdir)/modules/arch/x86/Makefile.inc $(srcdir)/modules/arch/x86/tests/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc $(srcdir)/modules/arch/lc3b/Makefile.inc $(srcdir)/modules/arch/lc3b/tests/Makefile.inc $(srcdir)/modules/listfmts/Makefile.inc $(srcdir)/modules/listfmts/nasm/Makefile.inc $(srcdir)/modules/parsers/Makefile.inc $(srcdir)/modules/parsers/gas/Makefile.inc $(srcdir)/modules/parsers/gas/tests/Makefile.inc $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc $(srcdir)/modules/parsers/nasm/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc $(srcdir)/modules/parsers/tasm/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc $(srcdir)/modules/preprocs/Makefile.inc $(srcdir)/modules/preprocs/nasm/Makefile.inc $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc $(srcdir)/modules/preprocs/raw/Makefile.inc $(srcdir)/modules/preprocs/raw/tests/Makefile.inc $(srcdir)/modules/preprocs/cpp/Makefile.inc $(srcdir)/modules/dbgfmts/Makefile.inc $(srcdir)/modules/dbgfmts/codeview/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc $(srcdir)/modules/dbgfmts/null/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc $(srcdir)/modules/objfmts/Makefile.inc $(srcdir)/modules/objfmts/dbg/Makefile.inc $(srcdir)/modules/objfmts/bin/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc $(srcdir)/modules/objfmts/elf/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/coff/Makefile.inc $(srcdir)/modules/objfmts/coff/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc $(srcdir)/modules/objfmts/rdf/Makefile.inc $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/win64/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/xdf/Makefile.inc $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc $(srcdir)/libyasm/Makefile.inc $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/frontends/Makefile.inc $(srcdir)/frontends/yasm/Makefile.inc $(srcdir)/frontends/tasm/Makefile.inc $(srcdir)/m4/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ - cd $(srcdir) && $(AUTOMAKE) --gnu \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: $(am__configure_deps) - cd $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): $(am__aclocal_m4_deps) - cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -install-libLIBRARIES: $(lib_LIBRARIES) - @$(NORMAL_INSTALL) - test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - f=$(am__strip_dir) \ - echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ - else :; fi; \ - done - @$(POST_INSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - p=$(am__strip_dir) \ - echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \ - $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \ - else :; fi; \ - done - -uninstall-libLIBRARIES: - @$(NORMAL_UNINSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - p=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - rm -f "$(DESTDIR)$(libdir)/$$p"; \ - done - -clean-libLIBRARIES: - -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES) -libyasm.a: $(libyasm_a_OBJECTS) $(libyasm_a_DEPENDENCIES) - -rm -f libyasm.a - $(libyasm_a_AR) libyasm.a $(libyasm_a_OBJECTS) $(libyasm_a_LIBADD) - $(RANLIB) libyasm.a -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) - -clean-checkPROGRAMS: - -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS) - -clean-noinstPROGRAMS: - -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) -bitvect_test$(EXEEXT): $(bitvect_test_OBJECTS) $(bitvect_test_DEPENDENCIES) - @rm -f bitvect_test$(EXEEXT) - $(LINK) $(bitvect_test_OBJECTS) $(bitvect_test_LDADD) $(LIBS) -combpath_test$(EXEEXT): $(combpath_test_OBJECTS) $(combpath_test_DEPENDENCIES) - @rm -f combpath_test$(EXEEXT) - $(LINK) $(combpath_test_OBJECTS) $(combpath_test_LDADD) $(LIBS) -floatnum_test$(EXEEXT): $(floatnum_test_OBJECTS) $(floatnum_test_DEPENDENCIES) - @rm -f floatnum_test$(EXEEXT) - $(LINK) $(floatnum_test_OBJECTS) $(floatnum_test_LDADD) $(LIBS) -genmacro$(EXEEXT): $(genmacro_OBJECTS) $(genmacro_DEPENDENCIES) - @rm -f genmacro$(EXEEXT) - $(genmacro_LINK) $(genmacro_OBJECTS) $(genmacro_LDADD) $(LIBS) -genmodule$(EXEEXT): $(genmodule_OBJECTS) $(genmodule_DEPENDENCIES) - @rm -f genmodule$(EXEEXT) - $(genmodule_LINK) $(genmodule_OBJECTS) $(genmodule_LDADD) $(LIBS) -genperf$(EXEEXT): $(genperf_OBJECTS) $(genperf_DEPENDENCIES) - @rm -f genperf$(EXEEXT) - $(genperf_LINK) $(genperf_OBJECTS) $(genperf_LDADD) $(LIBS) -genstring$(EXEEXT): $(genstring_OBJECTS) $(genstring_DEPENDENCIES) - @rm -f genstring$(EXEEXT) - $(genstring_LINK) $(genstring_OBJECTS) $(genstring_LDADD) $(LIBS) -genversion$(EXEEXT): $(genversion_OBJECTS) $(genversion_DEPENDENCIES) - @rm -f genversion$(EXEEXT) - $(genversion_LINK) $(genversion_OBJECTS) $(genversion_LDADD) $(LIBS) -leb128_test$(EXEEXT): $(leb128_test_OBJECTS) $(leb128_test_DEPENDENCIES) - @rm -f leb128_test$(EXEEXT) - $(LINK) $(leb128_test_OBJECTS) $(leb128_test_LDADD) $(LIBS) -re2c$(EXEEXT): $(re2c_OBJECTS) $(re2c_DEPENDENCIES) - @rm -f re2c$(EXEEXT) - $(re2c_LINK) $(re2c_OBJECTS) $(re2c_LDADD) $(LIBS) -splitpath_test$(EXEEXT): $(splitpath_test_OBJECTS) $(splitpath_test_DEPENDENCIES) - @rm -f splitpath_test$(EXEEXT) - $(LINK) $(splitpath_test_OBJECTS) $(splitpath_test_LDADD) $(LIBS) -test_hd$(EXEEXT): $(test_hd_OBJECTS) $(test_hd_DEPENDENCIES) - @rm -f test_hd$(EXEEXT) - $(LINK) $(test_hd_OBJECTS) $(test_hd_LDADD) $(LIBS) -uncstring_test$(EXEEXT): $(uncstring_test_OBJECTS) $(uncstring_test_DEPENDENCIES) - @rm -f uncstring_test$(EXEEXT) - $(LINK) $(uncstring_test_OBJECTS) $(uncstring_test_LDADD) $(LIBS) -yasm$(EXEEXT): $(yasm_OBJECTS) $(yasm_DEPENDENCIES) - @rm -f yasm$(EXEEXT) - $(LINK) $(yasm_OBJECTS) $(yasm_LDADD) $(LIBS) -ytasm$(EXEEXT): $(ytasm_OBJECTS) $(ytasm_DEPENDENCIES) - @rm -f ytasm$(EXEEXT) - $(LINK) $(ytasm_OBJECTS) $(ytasm_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/assocdat.Po -include ./$(DEPDIR)/bc-align.Po -include ./$(DEPDIR)/bc-data.Po -include ./$(DEPDIR)/bc-incbin.Po -include ./$(DEPDIR)/bc-org.Po -include ./$(DEPDIR)/bc-reserve.Po -include ./$(DEPDIR)/bin-objfmt.Po -include ./$(DEPDIR)/bitvect.Po -include ./$(DEPDIR)/bitvect_test.Po -include ./$(DEPDIR)/bytecode.Po -include ./$(DEPDIR)/coff-objfmt.Po -include ./$(DEPDIR)/combpath_test.Po -include ./$(DEPDIR)/cpp-preproc.Po -include ./$(DEPDIR)/cv-dbgfmt.Po -include ./$(DEPDIR)/cv-symline.Po -include ./$(DEPDIR)/cv-type.Po -include ./$(DEPDIR)/dbg-objfmt.Po -include ./$(DEPDIR)/dwarf2-aranges.Po -include ./$(DEPDIR)/dwarf2-dbgfmt.Po -include ./$(DEPDIR)/dwarf2-info.Po -include ./$(DEPDIR)/dwarf2-line.Po -include ./$(DEPDIR)/elf-objfmt.Po -include ./$(DEPDIR)/elf-x86-amd64.Po -include ./$(DEPDIR)/elf-x86-x86.Po -include ./$(DEPDIR)/elf.Po -include ./$(DEPDIR)/errwarn.Po -include ./$(DEPDIR)/expr.Po -include ./$(DEPDIR)/file.Po -include ./$(DEPDIR)/floatnum.Po -include ./$(DEPDIR)/floatnum_test.Po -include ./$(DEPDIR)/gas-parse.Po -include ./$(DEPDIR)/gas-parser.Po -include ./$(DEPDIR)/gas-token.Po -include ./$(DEPDIR)/hamt.Po -include ./$(DEPDIR)/insn.Po -include ./$(DEPDIR)/intnum.Po -include ./$(DEPDIR)/inttree.Po -include ./$(DEPDIR)/lc3barch.Po -include ./$(DEPDIR)/lc3bbc.Po -include ./$(DEPDIR)/lc3bid.Po -include ./$(DEPDIR)/leb128_test.Po -include ./$(DEPDIR)/linemap.Po -include ./$(DEPDIR)/macho-objfmt.Po -include ./$(DEPDIR)/md5.Po -include ./$(DEPDIR)/mergesort.Po -include ./$(DEPDIR)/module.Po -include ./$(DEPDIR)/nasm-eval.Po -include ./$(DEPDIR)/nasm-listfmt.Po -include ./$(DEPDIR)/nasm-parse.Po -include ./$(DEPDIR)/nasm-parser.Po -include ./$(DEPDIR)/nasm-pp.Po -include ./$(DEPDIR)/nasm-preproc.Po -include ./$(DEPDIR)/nasm-token.Po -include ./$(DEPDIR)/nasmlib.Po -include ./$(DEPDIR)/null-dbgfmt.Po -include ./$(DEPDIR)/phash.Po -include ./$(DEPDIR)/raw-preproc.Po -include ./$(DEPDIR)/rdf-objfmt.Po -include ./$(DEPDIR)/section.Po -include ./$(DEPDIR)/splitpath_test.Po -include ./$(DEPDIR)/stabs-dbgfmt.Po -include ./$(DEPDIR)/strcasecmp.Po -include ./$(DEPDIR)/strsep.Po -include ./$(DEPDIR)/symrec.Po -include ./$(DEPDIR)/tasm-options.Po -include ./$(DEPDIR)/tasm.Po -include ./$(DEPDIR)/test_hd.Po -include ./$(DEPDIR)/uncstring_test.Po -include ./$(DEPDIR)/valparam.Po -include ./$(DEPDIR)/value.Po -include ./$(DEPDIR)/win64-except.Po -include ./$(DEPDIR)/x86arch.Po -include ./$(DEPDIR)/x86bc.Po -include ./$(DEPDIR)/x86cpu.Po -include ./$(DEPDIR)/x86expr.Po -include ./$(DEPDIR)/x86id.Po -include ./$(DEPDIR)/x86regtmod.Po -include ./$(DEPDIR)/xdf-objfmt.Po -include ./$(DEPDIR)/xmalloc.Po -include ./$(DEPDIR)/xstrdup.Po -include ./$(DEPDIR)/yasm-options.Po -include ./$(DEPDIR)/yasm.Po - -.c.o: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -x86arch.o: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.o -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - -x86arch.obj: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.obj -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - -x86bc.o: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.o -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - -x86bc.obj: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.obj -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - -x86expr.o: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.o -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - -x86expr.obj: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.obj -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - -x86id.o: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.o -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - -x86id.obj: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.obj -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - -lc3barch.o: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.o -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - -lc3barch.obj: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.obj -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - -lc3bbc.o: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.o -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - -lc3bbc.obj: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.obj -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - -nasm-listfmt.o: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.o -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - -nasm-listfmt.obj: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.obj -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - -gas-parser.o: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.o -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - -gas-parser.obj: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.obj -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - -gas-parse.o: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.o -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - -gas-parse.obj: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.obj -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - -nasm-parser.o: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.o -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - -nasm-parser.obj: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.obj -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - -nasm-parse.o: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.o -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - -nasm-parse.obj: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.obj -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - -nasm-preproc.o: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.o -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - -nasm-preproc.obj: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.obj -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - -nasm-pp.o: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.o -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - -nasm-pp.obj: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.obj -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - -nasmlib.o: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.o -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - -nasmlib.obj: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.obj -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - -nasm-eval.o: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.o -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - -nasm-eval.obj: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.obj -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - -raw-preproc.o: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.o -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - -raw-preproc.obj: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.obj -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - -cpp-preproc.o: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.o -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - -cpp-preproc.obj: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.obj -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - -cv-dbgfmt.o: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.o -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - -cv-dbgfmt.obj: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.obj -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - -cv-symline.o: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.o -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - -cv-symline.obj: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.obj -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - -cv-type.o: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.o -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - -cv-type.obj: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.obj -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - -dwarf2-dbgfmt.o: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.o -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - -dwarf2-dbgfmt.obj: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.obj -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - -dwarf2-line.o: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.o -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - -dwarf2-line.obj: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.obj -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - -dwarf2-aranges.o: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.o -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - -dwarf2-aranges.obj: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.obj -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - -dwarf2-info.o: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.o -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - -dwarf2-info.obj: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.obj -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - -null-dbgfmt.o: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.o -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - -null-dbgfmt.obj: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.obj -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - -stabs-dbgfmt.o: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.o -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - -stabs-dbgfmt.obj: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.obj -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - -dbg-objfmt.o: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.o -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - -dbg-objfmt.obj: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.obj -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - -bin-objfmt.o: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.o -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - -bin-objfmt.obj: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.obj -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - -elf.o: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.o -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - -elf.obj: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.obj -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - -elf-objfmt.o: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.o -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - -elf-objfmt.obj: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.obj -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - -elf-x86-x86.o: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.o -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - -elf-x86-x86.obj: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.obj -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - -elf-x86-amd64.o: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.o -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - -elf-x86-amd64.obj: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.obj -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - -coff-objfmt.o: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.o -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - -coff-objfmt.obj: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.obj -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - -win64-except.o: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.o -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - -win64-except.obj: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.obj -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - -macho-objfmt.o: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.o -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - -macho-objfmt.obj: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.obj -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - -rdf-objfmt.o: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.o -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - -rdf-objfmt.obj: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.obj -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - -xdf-objfmt.o: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.o -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - -xdf-objfmt.obj: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.obj -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - -assocdat.o: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.o -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - -assocdat.obj: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.obj -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - -bitvect.o: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.o -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - -bitvect.obj: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.obj -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - -bc-align.o: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.o -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - -bc-align.obj: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.obj -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - -bc-data.o: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.o -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - -bc-data.obj: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.obj -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - -bc-incbin.o: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.o -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - -bc-incbin.obj: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.obj -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - -bc-org.o: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.o -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - -bc-org.obj: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.obj -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - -bc-reserve.o: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.o -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - -bc-reserve.obj: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.obj -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - -bytecode.o: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.o -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - -bytecode.obj: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.obj -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - -errwarn.o: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.o -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - -errwarn.obj: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.obj -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - -expr.o: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.o -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - -expr.obj: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.obj -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - -file.o: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.o -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - -file.obj: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.obj -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - -floatnum.o: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.o -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - -floatnum.obj: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.obj -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - -hamt.o: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.o -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - -hamt.obj: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.obj -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - -insn.o: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.o -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - -insn.obj: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.obj -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - -intnum.o: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.o -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - -intnum.obj: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.obj -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - -inttree.o: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.o -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - -inttree.obj: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.obj -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - -linemap.o: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.o -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - -linemap.obj: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.obj -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - -md5.o: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.o -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - -md5.obj: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.obj -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - -mergesort.o: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.o -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - -mergesort.obj: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.obj -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - -phash.o: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.o -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - -phash.obj: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.obj -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - -section.o: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.o -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - -section.obj: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.obj -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - -strcasecmp.o: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.o -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - -strcasecmp.obj: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.obj -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - -strsep.o: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.o -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - -strsep.obj: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.obj -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - -symrec.o: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.o -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - -symrec.obj: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.obj -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - -valparam.o: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.o -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - -valparam.obj: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.obj -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - -value.o: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.o -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - -value.obj: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.obj -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - -xmalloc.o: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.o -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - -xmalloc.obj: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.obj -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - -xstrdup.o: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.o -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - -xstrdup.obj: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.obj -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - -bitvect_test.o: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.o -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - -bitvect_test.obj: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.obj -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - -combpath_test.o: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.o -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - -combpath_test.obj: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.obj -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - -floatnum_test.o: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.o -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - -floatnum_test.obj: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.obj -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - -leb128_test.o: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.o -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - -leb128_test.obj: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.obj -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - -splitpath_test.o: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.o -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - -splitpath_test.obj: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.obj -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - -uncstring_test.o: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.o -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - -uncstring_test.obj: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.obj -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - -yasm.o: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.o -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - -yasm.obj: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.obj -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - -yasm-options.o: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.o -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - -yasm-options.obj: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.obj -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - -tasm.o: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.o -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - -tasm.obj: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.obj -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - -tasm-options.o: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.o -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - -tasm-options.obj: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.obj -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` -install-man1: $(man1_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \ - done -uninstall-man1: - @$(NORMAL_UNINSTALL) - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man1dir)/$$inst"; \ - done -install-man7: $(man7_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \ - done -uninstall-man7: - @$(NORMAL_UNINSTALL) - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man7dir)/$$inst"; \ - done -install-includeHEADERS: $(include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done -install-modincludeHEADERS: $(modinclude_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(modincludedir)" || $(MKDIR_P) "$(DESTDIR)$(modincludedir)" - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(modincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(modincludedir)/$$f'"; \ - $(modincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(modincludedir)/$$f"; \ - done - -uninstall-modincludeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(modincludedir)/$$f'"; \ - rm -f "$(DESTDIR)$(modincludedir)/$$f"; \ - done -install-nodist_includeHEADERS: $(nodist_include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(nodist_includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(nodist_includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-nodist_includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -$(RECURSIVE_CLEAN_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; ws='[ ]'; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - echo "XPASS: $$tst"; \ - ;; \ - *) \ - echo "PASS: $$tst"; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xfail=`expr $$xfail + 1`; \ - echo "XFAIL: $$tst"; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - echo "FAIL: $$tst"; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - echo "SKIP: $$tst"; \ - fi; \ - done; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="All $$all tests passed"; \ - else \ - banner="All $$all tests behaved as expected ($$xfail expected failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all tests failed"; \ - else \ - banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - skipped="($$skip tests were not run)"; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - echo "$$dashes"; \ - echo "$$banner"; \ - test -z "$$skipped" || echo "$$skipped"; \ - test -z "$$report" || echo "$$report"; \ - echo "$$dashes"; \ - test "$$failed" -eq 0; \ - else :; fi - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d $(distdir) || mkdir $(distdir) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r $(distdir) -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 - $(am__remove_distdir) - -dist-lzma: distdir - tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma - $(am__remove_distdir) - -dist-tarZ: distdir - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__remove_distdir) - -dist-shar: distdir - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__remove_distdir) - -dist dist-all: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lzma*) \ - unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir); chmod a+w $(distdir) - mkdir $(distdir)/_build - mkdir $(distdir)/_inst - chmod a-w $(distdir) - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && cd $(distdir)/_build \ - && ../configure --srcdir=.. --prefix="$$dc_install_base" \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck - $(am__remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @cd $(distuninstallcheck_dir) \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) check-recursive -all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(MANS) $(HEADERS) config.h \ - all-local -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" "$(DESTDIR)$(includedir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-local distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-includeHEADERS install-man \ - install-modincludeHEADERS install-nodist_includeHEADERS - -install-dvi: install-dvi-recursive - -install-exec-am: install-binPROGRAMS install-libLIBRARIES - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) install-exec-hook - -install-html: install-html-recursive - -install-info: install-info-recursive - -install-man: install-man1 install-man7 - -install-pdf: install-pdf-recursive - -install-ps: install-ps-recursive - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-includeHEADERS \ - uninstall-libLIBRARIES uninstall-man \ - uninstall-modincludeHEADERS uninstall-nodist_includeHEADERS - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) uninstall-hook - -uninstall-man: uninstall-man1 uninstall-man7 - -.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ - install-exec-am install-strip uninstall-am - -.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ - all all-am all-local am--refresh check check-TESTS check-am \ - clean clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS ctags ctags-recursive \ - dist dist-all dist-bzip2 dist-gzip dist-lzma dist-shar \ - dist-tarZ dist-zip distcheck distclean distclean-compile \ - distclean-generic distclean-hdr distclean-local distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-exec-hook install-html \ - install-html-am install-includeHEADERS install-info \ - install-info-am install-libLIBRARIES install-man install-man1 \ - install-man7 install-modincludeHEADERS \ - install-nodist_includeHEADERS install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-binPROGRAMS uninstall-hook \ - uninstall-includeHEADERS uninstall-libLIBRARIES uninstall-man \ - uninstall-man1 uninstall-man7 uninstall-modincludeHEADERS \ - uninstall-nodist_includeHEADERS - - -re2c-main.$(OBJEXT): tools/re2c/main.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/main.c || echo '$(srcdir)/'`tools/re2c/main.c - -re2c-code.$(OBJEXT): tools/re2c/code.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/code.c || echo '$(srcdir)/'`tools/re2c/code.c - -re2c-dfa.$(OBJEXT): tools/re2c/dfa.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/dfa.c || echo '$(srcdir)/'`tools/re2c/dfa.c - -re2c-parser.$(OBJEXT): tools/re2c/parser.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/parser.c || echo '$(srcdir)/'`tools/re2c/parser.c - -re2c-actions.$(OBJEXT): tools/re2c/actions.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/actions.c || echo '$(srcdir)/'`tools/re2c/actions.c - -re2c-scanner.$(OBJEXT): tools/re2c/scanner.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/scanner.c || echo '$(srcdir)/'`tools/re2c/scanner.c - -re2c-mbo_getopt.$(OBJEXT): tools/re2c/mbo_getopt.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/mbo_getopt.c || echo '$(srcdir)/'`tools/re2c/mbo_getopt.c - -re2c-substr.$(OBJEXT): tools/re2c/substr.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/substr.c || echo '$(srcdir)/'`tools/re2c/substr.c - -re2c-translate.$(OBJEXT): tools/re2c/translate.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/translate.c || echo '$(srcdir)/'`tools/re2c/translate.c - -genmacro.$(OBJEXT): tools/genmacro/genmacro.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genmacro/genmacro.c || echo '$(srcdir)/'`tools/genmacro/genmacro.c -.gperf.c: genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $< $@ - -genperf.$(OBJEXT): tools/genperf/genperf.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/genperf.c || echo '$(srcdir)/'`tools/genperf/genperf.c - -gp-perfect.$(OBJEXT): tools/genperf/perfect.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/perfect.c || echo '$(srcdir)/'`tools/genperf/perfect.c - -gp-phash.$(OBJEXT): libyasm/phash.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/phash.c || echo '$(srcdir)/'`libyasm/phash.c - -gp-xmalloc.$(OBJEXT): libyasm/xmalloc.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xmalloc.c || echo '$(srcdir)/'`libyasm/xmalloc.c - -gp-xstrdup.$(OBJEXT): libyasm/xstrdup.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xstrdup.c || echo '$(srcdir)/'`libyasm/xstrdup.c - -# Use Pyxelator to generate Pyrex function headers. -#_yasm.pxi: ${HEADERS} -# @rm -rf .tmp -# @mkdir .tmp -# $(PYTHON) $(srcdir)/tools/python-yasm/pyxelator/wrap_yasm.py \ -# "YASM_DIR=${srcdir}" "CPP=${CPP}" "CPPFLAGS=${CPPFLAGS}" -# @rm -rf .tmp - -# Need to build a local copy of the main Pyrex input file to include _yasm.pxi -# from the build directory. Also need to fixup the other .pxi include paths. -#yasm.pyx: $(srcdir)/tools/python-yasm/yasm.pyx -# sed -e 's,^include "\([^_]\),include "${srcdir}/tools/python-yasm/\1,' \ -# $(srcdir)/tools/python-yasm/yasm.pyx > $@ - -# Actually run Pyrex -#yasm_python.c: yasm.pyx _yasm.pxi $(PYBINDING_DEPS) -# $(PYTHON) -c "from Pyrex.Compiler.Main import main; main(command_line=1)" \ -# -o $@ yasm.pyx - -# Now the Python build magic... -#python-setup.txt: Makefile -# echo "includes=${DEFS} ${DEFAULT_INCLUDES} ${INCLUDES} ${AM_CPPFLAGS} ${CPPFLAGS}" > python-setup.txt -# echo "sources=${libyasm_a_SOURCES}" >> python-setup.txt -# echo "srcdir=${srcdir}" >> python-setup.txt -# echo "gcc=${GCC}" >> python-setup.txt - -#.python-build: python-setup.txt yasm_python.c ${libyasm_a_SOURCES} -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py build -# touch .python-build -#python-build: .python-build - -#python-install: .python-build -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py install "--install-lib=$(DESTDIR)$(pythondir)" - -#python-uninstall: -# rm -f `$(PYTHON) -c "import sys;sys.path.insert(0, '${DESTDIR}${pythondir}'); import yasm; print yasm.__file__"` - -python-build: -python-install: -python-uninstall: - -modules/arch/x86/x86id.c: x86insn_nasm.c x86insn_gas.c x86insns.c - -x86insn_nasm.gperf x86insn_gas.gperf x86insns.c: $(srcdir)/modules/arch/x86/gen_x86_insn.py - $(PYTHON) $(srcdir)/modules/arch/x86/gen_x86_insn.py -#x86insn_nasm.gperf: $(srcdir)/x86insn_nasm.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_nasm.gperf $@ -#x86insn_gas.gperf: $(srcdir)/x86insn_gas.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_gas.gperf $@ - -# Use suffix rules for gperf files -x86insn_nasm.c: x86insn_nasm.gperf genperf$(EXEEXT) -x86insn_gas.c: x86insn_gas.gperf genperf$(EXEEXT) -x86cpu.c: $(srcdir)/modules/arch/x86/x86cpu.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86cpu.gperf $@ -x86regtmod.c: $(srcdir)/modules/arch/x86/x86regtmod.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86regtmod.gperf $@ - -lc3bid.c: $(srcdir)/modules/arch/lc3b/lc3bid.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -s -o $@ $(srcdir)/modules/arch/lc3b/lc3bid.re - -yasm_arch.7: modules/arch/yasm_arch.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/arch/yasm_arch.xml - -#EXTRA_DIST += modules/listfmts/nasm/tests/Makefile.inc - -#include modules/listfmts/nasm/tests/Makefile.inc - -gas-token.c: $(srcdir)/modules/parsers/gas/gas-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/gas/gas-token.re - -nasm-token.c: $(srcdir)/modules/parsers/nasm/nasm-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/nasm/nasm-token.re - -$(top_srcdir)/modules/parsers/nasm/nasm-parser.c: nasm-macros.c - -nasm-macros.c: $(srcdir)/modules/parsers/nasm/nasm-std.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_standard_mac $(srcdir)/modules/parsers/nasm/nasm-std.mac - -yasm_parsers.7: modules/parsers/yasm_parsers.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/parsers/yasm_parsers.xml - -$(top_srcdir)/modules/preprocs/nasm/nasm-preproc.c: nasm-version.c - -nasm-version.c: version.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_version_mac version.mac - -version.mac: genversion$(EXEEXT) - $(top_builddir)/genversion$(EXEEXT) $@ - -genversion.$(OBJEXT): modules/preprocs/nasm/genversion.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f modules/preprocs/nasm/genversion.c || echo '$(srcdir)/'`modules/preprocs/nasm/genversion.c - -#EXTRA_DIST += modules/dbgfmts/codeview/tests/Makefile.inc -#include modules/dbgfmts/codeview/tests/Makefile.inc - -yasm_dbgfmts.7: modules/dbgfmts/yasm_dbgfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/dbgfmts/yasm_dbgfmts.xml - -$(top_srcdir)/modules/objfmts/coff/coff-objfmt.c: win64-nasm.c win64-gas.c - -win64-nasm.c: $(srcdir)/modules/objfmts/coff/win64-nasm.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_nasm_stdmac $(srcdir)/modules/objfmts/coff/win64-nasm.mac - -win64-gas.c: $(srcdir)/modules/objfmts/coff/win64-gas.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_gas_stdmac $(srcdir)/modules/objfmts/coff/win64-gas.mac - -yasm_objfmts.7: modules/objfmts/yasm_objfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/objfmts/yasm_objfmts.xml - -module.c: $(top_srcdir)/libyasm/module.in genmodule$(EXEEXT) Makefile - $(top_builddir)/genmodule$(EXEEXT) $(top_srcdir)/libyasm/module.in Makefile - -genmodule.$(OBJEXT): libyasm/genmodule.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/genmodule.c || echo '$(srcdir)/'`libyasm/genmodule.c - -yasm.1: frontends/yasm/yasm.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/frontends/yasm/yasm.xml - -$(srcdir)/frontends/yasm/yasm.c: license.c - -license.c: $(srcdir)/COPYING genstring$(EXEEXT) - $(top_builddir)/genstring$(EXEEXT) license_msg $@ $(srcdir)/COPYING - -distclean-local: - -rm -rf results - -rm -rf build - -all-local: python-build -install-exec-hook: python-install -uninstall-hook: python-uninstall - -genstring.$(OBJEXT): genstring.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f genstring.c || echo '$(srcdir)/'`genstring.c -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/packager/third_party/yasm/source/config/android/config.h b/packager/third_party/yasm/source/config/android/config.h deleted file mode 100644 index 35a50b74b0..0000000000 --- a/packager/third_party/yasm/source/config/android/config.h +++ /dev/null @@ -1,173 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Command name to run C preprocessor */ -#define CPP_PROG "gcc -E" - -/* */ -/* #undef ENABLE_NLS */ - -/* Define to 1 if you have the `abort' function. */ -#define HAVE_ABORT 1 - -/* */ -/* #undef HAVE_CATGETS */ - -/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the - CoreFoundation framework. */ -/* #undef HAVE_CFLOCALECOPYCURRENT */ - -/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in - the CoreFoundation framework. */ -/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */ - -/* Define if the GNU dcgettext() function is already present or preinstalled. - */ -#define HAVE_DCGETTEXT 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DIRECT_H */ - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* */ -#define HAVE_GETTEXT 1 - -/* Define to 1 if you have the GNU C Library */ -#define HAVE_GNU_C_LIBRARY 1 - -/* Define if you have the iconv() function and it works. */ -/* #undef HAVE_ICONV */ - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* */ -/* #undef HAVE_LC_MESSAGES */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mergesort' function. */ -/* #undef HAVE_MERGESORT */ - -/* Define to 1 if you have the `popen' function. */ -#define HAVE_POPEN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* */ -/* #undef HAVE_STPCPY */ - -/* Define to 1 if you have the `strcasecmp' function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the `strcmpi' function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the `stricmp' function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the `strsep' function. */ -#define HAVE_STRSEP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the `toascii' function. */ -#define HAVE_TOASCII 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to 1 if you have the `_stricmp' function. */ -/* #undef HAVE__STRICMP */ - -/* Name of package */ -#define PACKAGE "yasm" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "bug-yasm@tortall.net" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "yasm" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "yasm 1.2.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "yasm" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2.0" - -/* Define to 1 if the C compiler supports function prototypes. */ -#define PROTOTYPES 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void*', as computed by sizeof. */ -/* #undef SIZEOF_VOIDP */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.2.0" - -/* Define if using the dmalloc debugging malloc package */ -/* #undef WITH_DMALLOC */ - -/* Define like PROTOTYPES; this can be used by system headers. */ -#define __PROTOTYPES 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/packager/third_party/yasm/source/config/android/libyasm-stdint.h b/packager/third_party/yasm/source/config/android/libyasm-stdint.h deleted file mode 100644 index 357610e1c1..0000000000 --- a/packager/third_party/yasm/source/config/android/libyasm-stdint.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _YASM_LIBYASM_STDINT_H -#define _YASM_LIBYASM_STDINT_H 1 -#ifndef _GENERATED_STDINT_H -#define _GENERATED_STDINT_H "yasm HEAD" -/* generated using gcc -std=gnu99 */ -#define _STDINT_HAVE_STDINT_H 1 -#include -#endif -#endif diff --git a/packager/third_party/yasm/source/config/ios/Makefile b/packager/third_party/yasm/source/config/ios/Makefile deleted file mode 100644 index 770352550d..0000000000 --- a/packager/third_party/yasm/source/config/ios/Makefile +++ /dev/null @@ -1,3822 +0,0 @@ -# Makefile.in generated by automake 1.10.1 from Makefile.am. -# Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - -# $Id: Makefile.am 2184 2009-03-24 05:04:15Z peter $ - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# $Id: Makefile.inc 1718 2006-12-24 00:13:19Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1939 2007-09-10 07:15:50Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1948 2007-09-13 02:53:30Z peter $ - -# $Id: Makefile.inc 1951 2007-09-14 05:19:10Z peter $ - -# $Id: Makefile.inc 1598 2006-08-10 04:02:59Z peter $ - -# $Id: Makefile.inc 1914 2007-08-20 05:13:35Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2170 2009-01-14 08:28:13Z peter $ - -# $Id: Makefile.inc 2193 2009-04-04 23:03:41Z peter $ - -# $Id: Makefile.inc 1776 2007-02-19 02:36:10Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 1928 2007-09-07 22:03:34Z peter $ - -# $Id: Makefile.inc 1152 2004-10-02 06:18:30Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1783 2007-02-22 03:40:31Z peter $ - -# $Id: Makefile.inc 2169 2009-01-02 20:46:57Z peter $ - -# $Id$ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2191 2009-03-25 03:42:05Z peter $ - -# $Id: Makefile.inc 1137 2004-09-04 01:24:57Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 1966 2007-09-20 03:54:36Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2185 2009-03-24 06:33:32Z peter $ - -# $Id: Makefile.inc 2172 2009-01-27 06:38:14Z peter $ - -# $Id: Makefile.inc 2176 2009-03-04 07:39:02Z peter $ - -# Makefile for cpp module. -# Copied from raw preprocessor module. - -# $Id: Makefile.inc 1662 2006-10-21 18:52:29Z peter $ - -# $Id: Makefile.inc 1428 2006-03-27 02:15:19Z peter $ - -# $Id: Makefile.inc 1378 2006-02-12 01:27:39Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id$ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 1252 2005-09-28 05:50:51Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 2126 2008-10-03 08:13:00Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 1168 2004-10-31 01:07:52Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1777 2007-02-19 08:21:17Z peter $ - -# $Id: Makefile.inc 1782 2007-02-21 06:45:39Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1854 2007-05-31 06:16:49Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 1331 2006-01-15 22:48:55Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2120 2008-09-04 04:45:30Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2123 2008-09-30 03:56:37Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - - - -VPATH = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -pkgdatadir = $(datadir)/yasm -pkglibdir = $(libdir)/yasm -pkgincludedir = $(includedir)/yasm -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = i686-apple-darwin9.8.0 -host_triplet = i686-apple-darwin9.8.0 -bin_PROGRAMS = yasm$(EXEEXT) ytasm$(EXEEXT) -TESTS = $(am__append_3) modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/xdf/tests/xdf_test.sh bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) libyasm/tests/libyasm_test.sh -noinst_PROGRAMS = genstring$(EXEEXT) re2c$(EXEEXT) genmacro$(EXEEXT) \ - genperf$(EXEEXT) genversion$(EXEEXT) genmodule$(EXEEXT) -check_PROGRAMS = test_hd$(EXEEXT) bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) -DIST_COMMON = README $(am__configure_deps) $(dist_man_MANS) \ - $(include_HEADERS) $(modinclude_HEADERS) $(noinst_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/frontends/Makefile.inc \ - $(srcdir)/frontends/tasm/Makefile.inc \ - $(srcdir)/frontends/yasm/Makefile.inc \ - $(srcdir)/libyasm/Makefile.inc \ - $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/m4/Makefile.inc \ - $(srcdir)/modules/Makefile.inc \ - $(srcdir)/modules/arch/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/Makefile.inc \ - $(srcdir)/modules/dbgfmts/codeview/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/null/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc \ - $(srcdir)/modules/listfmts/Makefile.inc \ - $(srcdir)/modules/listfmts/nasm/Makefile.inc \ - $(srcdir)/modules/objfmts/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/dbg/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc \ - $(srcdir)/modules/parsers/Makefile.inc \ - $(srcdir)/modules/parsers/gas/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc \ - $(srcdir)/modules/preprocs/Makefile.inc \ - $(srcdir)/modules/preprocs/cpp/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/tests/Makefile.inc \ - $(srcdir)/tools/Makefile.inc \ - $(srcdir)/tools/genmacro/Makefile.inc \ - $(srcdir)/tools/genperf/Makefile.inc \ - $(srcdir)/tools/python-yasm/Makefile.inc \ - $(srcdir)/tools/python-yasm/tests/Makefile.inc \ - $(srcdir)/tools/re2c/Makefile.inc $(top_srcdir)/configure \ - ABOUT-NLS AUTHORS COPYING ChangeLog INSTALL NEWS \ - config/config.guess config/config.rpath config/config.sub \ - config/depcomp config/install-sh config/ltmain.sh \ - config/missing -#am__append_1 = _yasm.pxi yasm.pyx \ -# yasm_python.c python-setup.txt \ -# .python-build -#am__append_2 = PYTHON=${PYTHON} -#am__append_3 = tools/python-yasm/tests/python_test.sh -#am__append_4 = $(dist_man_MANS) -subdir = . -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_create_stdint_h.m4 \ - $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \ - $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ - $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/nls.m4 \ - $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ - $(top_srcdir)/m4/pyrex.m4 $(top_srcdir)/m4/pythonhead.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \ - "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" \ - "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" \ - "$(DESTDIR)$(includedir)" -libLIBRARIES_INSTALL = $(INSTALL_DATA) -LIBRARIES = $(lib_LIBRARIES) -AR = ar -ARFLAGS = cru -libyasm_a_AR = $(AR) $(ARFLAGS) -libyasm_a_LIBADD = -am_libyasm_a_OBJECTS = x86arch.$(OBJEXT) x86bc.$(OBJEXT) \ - x86expr.$(OBJEXT) x86id.$(OBJEXT) lc3barch.$(OBJEXT) \ - lc3bbc.$(OBJEXT) nasm-listfmt.$(OBJEXT) gas-parser.$(OBJEXT) \ - gas-parse.$(OBJEXT) nasm-parser.$(OBJEXT) nasm-parse.$(OBJEXT) \ - nasm-preproc.$(OBJEXT) nasm-pp.$(OBJEXT) nasmlib.$(OBJEXT) \ - nasm-eval.$(OBJEXT) raw-preproc.$(OBJEXT) \ - cpp-preproc.$(OBJEXT) cv-dbgfmt.$(OBJEXT) cv-symline.$(OBJEXT) \ - cv-type.$(OBJEXT) dwarf2-dbgfmt.$(OBJEXT) \ - dwarf2-line.$(OBJEXT) dwarf2-aranges.$(OBJEXT) \ - dwarf2-info.$(OBJEXT) null-dbgfmt.$(OBJEXT) \ - stabs-dbgfmt.$(OBJEXT) dbg-objfmt.$(OBJEXT) \ - bin-objfmt.$(OBJEXT) elf.$(OBJEXT) elf-objfmt.$(OBJEXT) \ - elf-x86-x86.$(OBJEXT) elf-x86-amd64.$(OBJEXT) \ - coff-objfmt.$(OBJEXT) win64-except.$(OBJEXT) \ - macho-objfmt.$(OBJEXT) rdf-objfmt.$(OBJEXT) \ - xdf-objfmt.$(OBJEXT) assocdat.$(OBJEXT) bitvect.$(OBJEXT) \ - bc-align.$(OBJEXT) bc-data.$(OBJEXT) bc-incbin.$(OBJEXT) \ - bc-org.$(OBJEXT) bc-reserve.$(OBJEXT) bytecode.$(OBJEXT) \ - errwarn.$(OBJEXT) expr.$(OBJEXT) file.$(OBJEXT) \ - floatnum.$(OBJEXT) hamt.$(OBJEXT) insn.$(OBJEXT) \ - intnum.$(OBJEXT) inttree.$(OBJEXT) linemap.$(OBJEXT) \ - md5.$(OBJEXT) mergesort.$(OBJEXT) phash.$(OBJEXT) \ - section.$(OBJEXT) strcasecmp.$(OBJEXT) strsep.$(OBJEXT) \ - symrec.$(OBJEXT) valparam.$(OBJEXT) value.$(OBJEXT) \ - xmalloc.$(OBJEXT) xstrdup.$(OBJEXT) -nodist_libyasm_a_OBJECTS = x86cpu.$(OBJEXT) x86regtmod.$(OBJEXT) \ - lc3bid.$(OBJEXT) gas-token.$(OBJEXT) nasm-token.$(OBJEXT) \ - module.$(OBJEXT) -libyasm_a_OBJECTS = $(am_libyasm_a_OBJECTS) \ - $(nodist_libyasm_a_OBJECTS) -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) -am_bitvect_test_OBJECTS = bitvect_test.$(OBJEXT) -bitvect_test_OBJECTS = $(am_bitvect_test_OBJECTS) -am__DEPENDENCIES_1 = -bitvect_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_combpath_test_OBJECTS = combpath_test.$(OBJEXT) -combpath_test_OBJECTS = $(am_combpath_test_OBJECTS) -combpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_floatnum_test_OBJECTS = floatnum_test.$(OBJEXT) -floatnum_test_OBJECTS = $(am_floatnum_test_OBJECTS) -floatnum_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_genmacro_OBJECTS = -genmacro_OBJECTS = $(am_genmacro_OBJECTS) -genmacro_DEPENDENCIES = genmacro.$(OBJEXT) -am_genmodule_OBJECTS = -genmodule_OBJECTS = $(am_genmodule_OBJECTS) -genmodule_DEPENDENCIES = genmodule.$(OBJEXT) -am_genperf_OBJECTS = -genperf_OBJECTS = $(am_genperf_OBJECTS) -genperf_DEPENDENCIES = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -am_genstring_OBJECTS = -genstring_OBJECTS = $(am_genstring_OBJECTS) -genstring_DEPENDENCIES = genstring.$(OBJEXT) -am_genversion_OBJECTS = -genversion_OBJECTS = $(am_genversion_OBJECTS) -genversion_DEPENDENCIES = genversion.$(OBJEXT) -am_leb128_test_OBJECTS = leb128_test.$(OBJEXT) -leb128_test_OBJECTS = $(am_leb128_test_OBJECTS) -leb128_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_re2c_OBJECTS = -re2c_OBJECTS = $(am_re2c_OBJECTS) -re2c_DEPENDENCIES = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -am_splitpath_test_OBJECTS = splitpath_test.$(OBJEXT) -splitpath_test_OBJECTS = $(am_splitpath_test_OBJECTS) -splitpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_test_hd_OBJECTS = test_hd.$(OBJEXT) -test_hd_OBJECTS = $(am_test_hd_OBJECTS) -test_hd_LDADD = $(LDADD) -am_uncstring_test_OBJECTS = uncstring_test.$(OBJEXT) -uncstring_test_OBJECTS = $(am_uncstring_test_OBJECTS) -uncstring_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_yasm_OBJECTS = yasm.$(OBJEXT) yasm-options.$(OBJEXT) -yasm_OBJECTS = $(am_yasm_OBJECTS) -yasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_ytasm_OBJECTS = tasm.$(OBJEXT) tasm-options.$(OBJEXT) -ytasm_OBJECTS = $(am_ytasm_OBJECTS) -ytasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -I$(srcdir) -depcomp = $(SHELL) $(top_srcdir)/config/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -SOURCES = $(libyasm_a_SOURCES) $(nodist_libyasm_a_SOURCES) \ - $(bitvect_test_SOURCES) $(combpath_test_SOURCES) \ - $(floatnum_test_SOURCES) $(genmacro_SOURCES) \ - $(genmodule_SOURCES) $(genperf_SOURCES) $(genstring_SOURCES) \ - $(genversion_SOURCES) $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -DIST_SOURCES = $(libyasm_a_SOURCES) $(bitvect_test_SOURCES) \ - $(combpath_test_SOURCES) $(floatnum_test_SOURCES) \ - $(genmacro_SOURCES) $(genmodule_SOURCES) $(genperf_SOURCES) \ - $(genstring_SOURCES) $(genversion_SOURCES) \ - $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-dvi-recursive install-exec-recursive \ - install-html-recursive install-info-recursive \ - install-pdf-recursive install-ps-recursive install-recursive \ - installcheck-recursive installdirs-recursive pdf-recursive \ - ps-recursive uninstall-recursive -man1dir = $(mandir)/man1 -man7dir = $(mandir)/man7 -NROFF = nroff -MANS = $(dist_man_MANS) -includeHEADERS_INSTALL = $(INSTALL_HEADER) -modincludeHEADERS_INSTALL = $(INSTALL_HEADER) -nodist_includeHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(include_HEADERS) $(modinclude_HEADERS) \ - $(nodist_include_HEADERS) $(noinst_HEADERS) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - { test ! -d $(distdir) \ - || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr $(distdir); }; } -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -distuninstallcheck_listfiles = find . -type f -print -distcleancheck_listfiles = find . -type f -print -ACLOCAL = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run aclocal-1.10 -AMTAR = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run tar -ARCH = x86 -AUTOCONF = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run autoconf -AUTOHEADER = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run autoheader -AUTOMAKE = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run automake-1.10 -AWK = awk -CC = gcc -std=gnu99 -CCDEPMODE = depmode=gcc3 -CCLD_FOR_BUILD = gcc -std=gnu99 -CC_FOR_BUILD = gcc -std=gnu99 -CFLAGS = -g -O2 -CPP = gcc -E -CPPFLAGS = -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -ECHO_C = \c -ECHO_N = -ECHO_T = -EGREP = /usr/bin/grep -E -EXEEXT = -GCC = yes -GMSGFMT = /opt/local/bin/msgfmt -GMSGFMT_015 = /opt/local/bin/msgfmt -GREP = /usr/bin/grep -HOST_CC = gcc -std=gnu99 -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTLLIBS = -INTL_MACOSX_LIBS = -Wl,-framework -Wl,CoreFoundation -LDFLAGS = -LIBICONV = -liconv -LIBINTL = -LIBOBJS = -LIBS = -LN_S = ln -s -LTLIBICONV = -liconv -LTLIBINTL = -LTLIBOBJS = -MAINT = # -MAKEINFO = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run makeinfo -MKDIR_P = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/install-sh -c -d -MORE_CFLAGS = -MSGFMT = /opt/local/bin/msgfmt -MSGFMT_015 = /opt/local/bin/msgfmt -MSGMERGE = /opt/local/bin/msgmerge -OBJEXT = o -PACKAGE = yasm -PACKAGE_BUGREPORT = bug-yasm@tortall.net -PACKAGE_NAME = yasm -PACKAGE_STRING = yasm 0.8.0 -PACKAGE_TARNAME = yasm -PACKAGE_VERSION = 0.8.0 -PATH_SEPARATOR = : -POSUB = -PYTHON = /usr/bin/python -PYTHON_EXEC_PREFIX = ${exec_prefix} -PYTHON_INCLUDES = -PYTHON_PLATFORM = darwin -PYTHON_PREFIX = ${prefix} -PYTHON_VERSION = 2.5 -RANLIB = ranlib -SET_MAKE = -SHELL = /bin/sh -STRIP = -USE_NLS = no -VERSION = 0.8.0 -XGETTEXT = /opt/local/bin/xgettext -XGETTEXT_015 = /opt/local/bin/xgettext -XMLTO = : -abs_builddir = /tmp/yasm -abs_srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -abs_top_builddir = /tmp/yasm -abs_top_srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -ac_ct_CC = gcc -am__include = include -am__leading_dot = . -am__quote = -am__tar = ${AMTAR} chof - "$$tardir" -am__untar = ${AMTAR} xf - -bindir = ${exec_prefix}/bin -build = i686-apple-darwin9.8.0 -build_alias = -build_cpu = i686 -build_os = darwin9.8.0 -build_vendor = apple -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} -dvidir = ${docdir} -exec_prefix = ${prefix} -host = i686-apple-darwin9.8.0 -host_alias = -host_cpu = i686 -host_os = darwin9.8.0 -host_vendor = apple -htmldir = ${docdir} -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mkdir_p = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/install-sh -c -d -oldincludedir = /usr/include -pdfdir = ${docdir} -pkgpyexecdir = ${pyexecdir}/yasm -pkgpythondir = ${pythondir}/yasm -prefix = /usr/local -program_transform_name = s,x,x, -psdir = ${docdir} -pyexecdir = /Library/Python/2.5/site-packages -pythondir = /Library/Python/2.5/site-packages -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -sysconfdir = ${prefix}/etc -target_alias = -top_builddir = . -top_srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -SUBDIRS = po . -AM_YFLAGS = -d -AM_CFLAGS = - -#!include modules/objfmts/omf/Makefile.inc -dist_man_MANS = yasm_arch.7 yasm_parsers.7 yasm_dbgfmts.7 \ - yasm_objfmts.7 yasm.1 -TESTS_ENVIRONMENT = $(am__append_2) -test_hd_SOURCES = test_hd.c -include_HEADERS = libyasm.h -nodist_include_HEADERS = libyasm-stdint.h -noinst_HEADERS = util.h -BUILT_SOURCES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - x86insn_nasm.c x86insn_gas.c gas-token.c nasm-token.c \ - nasm-macros.c nasm-version.c version.mac win64-nasm.c \ - win64-gas.c license.c -MAINTAINERCLEANFILES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - $(am__append_4) - -# Until this gets fixed in automake -DISTCLEANFILES = libyasm/stamp-h libyasm/stamp-h[0-9]* - -# Suffix rule for genperf -SUFFIXES = .gperf - -# configure.lineno doesn't clean up after itself? -CLEANFILES = configure.lineno $(am__append_1) x86insn_nasm.c \ - x86insn_gas.c x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c nasm-macros.c nasm-version.c version.mac \ - win64-nasm.c win64-gas.c module.c license.c - -# automake doesn't distribute mkinstalldirs? -#!EXTRA_DIST += modules/objfmts/omf/Makefile.inc -EXTRA_DIST = config/config.rpath config/mkinstalldirs \ - tools/Makefile.inc libyasm/Makefile.inc modules/Makefile.inc \ - frontends/Makefile.inc tools/re2c/Makefile.inc \ - tools/genmacro/Makefile.inc tools/genperf/Makefile.inc \ - tools/python-yasm/Makefile.inc tools/re2c/main.c \ - tools/re2c/basics.h tools/re2c/globals.h tools/re2c/ins.h \ - tools/re2c/re.h tools/re2c/token.h tools/re2c/code.c \ - tools/re2c/dfa.h tools/re2c/dfa.c tools/re2c/parse.h \ - tools/re2c/parser.h tools/re2c/parser.c tools/re2c/actions.c \ - tools/re2c/scanner.h tools/re2c/scanner.c \ - tools/re2c/mbo_getopt.h tools/re2c/mbo_getopt.c \ - tools/re2c/substr.h tools/re2c/substr.c tools/re2c/translate.c \ - tools/re2c/CHANGELOG tools/re2c/NO_WARRANTY tools/re2c/README \ - tools/re2c/scanner.re tools/re2c/re2c.1 \ - tools/re2c/bootstrap/scanner.c tools/re2c/doc/loplas.ps.gz \ - tools/re2c/doc/sample.bib tools/re2c/examples/basemmap.c \ - tools/re2c/examples/c.re tools/re2c/examples/cmmap.re \ - tools/re2c/examples/cnokw.re tools/re2c/examples/cunroll.re \ - tools/re2c/examples/modula.re tools/re2c/examples/repeater.re \ - tools/re2c/examples/sample.re tools/re2c/examples/simple.re \ - tools/re2c/examples/rexx/README \ - tools/re2c/examples/rexx/rexx.l \ - tools/re2c/examples/rexx/scanio.c tools/genmacro/genmacro.c \ - tools/genperf/genperf.c tools/genperf/perfect.c \ - tools/genperf/perfect.h tools/genperf/standard.h \ - tools/python-yasm/pyxelator/cparse.py \ - tools/python-yasm/pyxelator/genpyx.py \ - tools/python-yasm/pyxelator/ir.py \ - tools/python-yasm/pyxelator/lexer.py \ - tools/python-yasm/pyxelator/node.py \ - tools/python-yasm/pyxelator/parse_core.py \ - tools/python-yasm/pyxelator/work_unit.py \ - tools/python-yasm/pyxelator/wrap_yasm.py \ - tools/python-yasm/setup.py tools/python-yasm/yasm.pyx \ - $(PYBINDING_DEPS) tools/python-yasm/tests/Makefile.inc \ - tools/python-yasm/tests/python_test.sh \ - tools/python-yasm/tests/__init__.py \ - tools/python-yasm/tests/test_bytecode.py \ - tools/python-yasm/tests/test_expr.py \ - tools/python-yasm/tests/test_intnum.py \ - tools/python-yasm/tests/test_symrec.py \ - modules/arch/Makefile.inc modules/listfmts/Makefile.inc \ - modules/parsers/Makefile.inc modules/preprocs/Makefile.inc \ - modules/objfmts/Makefile.inc modules/arch/x86/Makefile.inc \ - modules/arch/lc3b/Makefile.inc \ - modules/arch/x86/gen_x86_insn.py x86insns.c x86insn_nasm.gperf \ - x86insn_gas.gperf modules/arch/x86/x86cpu.gperf \ - modules/arch/x86/x86regtmod.gperf \ - modules/arch/x86/tests/Makefile.inc \ - modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gen-fma-test.py \ - modules/arch/x86/tests/addbyte.asm \ - modules/arch/x86/tests/addbyte.errwarn \ - modules/arch/x86/tests/addbyte.hex \ - modules/arch/x86/tests/addrop.asm \ - modules/arch/x86/tests/addrop.errwarn \ - modules/arch/x86/tests/addrop.hex \ - modules/arch/x86/tests/addrop-err.asm \ - modules/arch/x86/tests/addrop-err.errwarn \ - modules/arch/x86/tests/aes.asm modules/arch/x86/tests/aes.hex \ - modules/arch/x86/tests/amd200707.asm \ - modules/arch/x86/tests/amd200707.hex \ - modules/arch/x86/tests/arithsmall.asm \ - modules/arch/x86/tests/arithsmall.errwarn \ - modules/arch/x86/tests/arithsmall.hex \ - modules/arch/x86/tests/avx.asm modules/arch/x86/tests/avx.hex \ - modules/arch/x86/tests/avxcc.asm \ - modules/arch/x86/tests/avxcc.hex \ - modules/arch/x86/tests/bittest.asm \ - modules/arch/x86/tests/bittest.hex \ - modules/arch/x86/tests/bswap64.asm \ - modules/arch/x86/tests/bswap64.hex \ - modules/arch/x86/tests/clmul.asm \ - modules/arch/x86/tests/clmul.hex \ - modules/arch/x86/tests/cmpxchg.asm \ - modules/arch/x86/tests/cmpxchg.hex \ - modules/arch/x86/tests/cpubasic-err.asm \ - modules/arch/x86/tests/cpubasic-err.errwarn \ - modules/arch/x86/tests/cyrix.asm \ - modules/arch/x86/tests/cyrix.hex \ - modules/arch/x86/tests/div-err.asm \ - modules/arch/x86/tests/div-err.errwarn \ - modules/arch/x86/tests/ea-nonzero.asm \ - modules/arch/x86/tests/ea-nonzero.hex \ - modules/arch/x86/tests/ea-over.asm \ - modules/arch/x86/tests/ea-over.errwarn \ - modules/arch/x86/tests/ea-over.hex \ - modules/arch/x86/tests/ea-warn.asm \ - modules/arch/x86/tests/ea-warn.errwarn \ - modules/arch/x86/tests/ea-warn.hex \ - modules/arch/x86/tests/ebpindex.asm \ - modules/arch/x86/tests/ebpindex.hex \ - modules/arch/x86/tests/effaddr.asm \ - modules/arch/x86/tests/effaddr.hex \ - modules/arch/x86/tests/enter.asm \ - modules/arch/x86/tests/enter.errwarn \ - modules/arch/x86/tests/enter.hex \ - modules/arch/x86/tests/far64.asm \ - modules/arch/x86/tests/far64.hex \ - modules/arch/x86/tests/farbasic.asm \ - modules/arch/x86/tests/farbasic.hex \ - modules/arch/x86/tests/farithr.asm \ - modules/arch/x86/tests/farithr.hex \ - modules/arch/x86/tests/fcmov.asm \ - modules/arch/x86/tests/fcmov.hex \ - modules/arch/x86/tests/fma.asm modules/arch/x86/tests/fma.hex \ - modules/arch/x86/tests/fwdequ64.asm \ - modules/arch/x86/tests/fwdequ64.hex \ - modules/arch/x86/tests/genopcode.asm \ - modules/arch/x86/tests/genopcode.hex \ - modules/arch/x86/tests/imm64.asm \ - modules/arch/x86/tests/imm64.errwarn \ - modules/arch/x86/tests/imm64.hex \ - modules/arch/x86/tests/iret.asm \ - modules/arch/x86/tests/iret.hex \ - modules/arch/x86/tests/jmp64-1.asm \ - modules/arch/x86/tests/jmp64-1.hex \ - modules/arch/x86/tests/jmp64-2.asm \ - modules/arch/x86/tests/jmp64-2.hex \ - modules/arch/x86/tests/jmp64-3.asm \ - modules/arch/x86/tests/jmp64-3.hex \ - modules/arch/x86/tests/jmp64-4.asm \ - modules/arch/x86/tests/jmp64-4.hex \ - modules/arch/x86/tests/jmp64-5.asm \ - modules/arch/x86/tests/jmp64-5.hex \ - modules/arch/x86/tests/jmp64-6.asm \ - modules/arch/x86/tests/jmp64-6.hex \ - modules/arch/x86/tests/jmpfar.asm \ - modules/arch/x86/tests/jmpfar.hex \ - modules/arch/x86/tests/lds.asm modules/arch/x86/tests/lds.hex \ - modules/arch/x86/tests/loopadsz.asm \ - modules/arch/x86/tests/loopadsz.hex \ - modules/arch/x86/tests/lsahf.asm \ - modules/arch/x86/tests/lsahf.hex \ - modules/arch/x86/tests/mem64-err.asm \ - modules/arch/x86/tests/mem64-err.errwarn \ - modules/arch/x86/tests/mem64.asm \ - modules/arch/x86/tests/mem64.errwarn \ - modules/arch/x86/tests/mem64.hex \ - modules/arch/x86/tests/mem64hi32.asm \ - modules/arch/x86/tests/mem64hi32.hex \ - modules/arch/x86/tests/mem64rip.asm \ - modules/arch/x86/tests/mem64rip.hex \ - modules/arch/x86/tests/mixcase.asm \ - modules/arch/x86/tests/mixcase.hex \ - modules/arch/x86/tests/movbe.asm \ - modules/arch/x86/tests/movbe.hex \ - modules/arch/x86/tests/movdq32.asm \ - modules/arch/x86/tests/movdq32.hex \ - modules/arch/x86/tests/movdq64.asm \ - modules/arch/x86/tests/movdq64.hex \ - modules/arch/x86/tests/negequ.asm \ - modules/arch/x86/tests/negequ.hex \ - modules/arch/x86/tests/nomem64-err.asm \ - modules/arch/x86/tests/nomem64-err.errwarn \ - modules/arch/x86/tests/nomem64-err2.asm \ - modules/arch/x86/tests/nomem64-err2.errwarn \ - modules/arch/x86/tests/nomem64.asm \ - modules/arch/x86/tests/nomem64.errwarn \ - modules/arch/x86/tests/nomem64.hex \ - modules/arch/x86/tests/o64.asm modules/arch/x86/tests/o64.hex \ - modules/arch/x86/tests/o64loop.asm \ - modules/arch/x86/tests/o64loop.errwarn \ - modules/arch/x86/tests/o64loop.hex \ - modules/arch/x86/tests/opersize.asm \ - modules/arch/x86/tests/opersize.hex \ - modules/arch/x86/tests/opsize-err.asm \ - modules/arch/x86/tests/opsize-err.errwarn \ - modules/arch/x86/tests/overflow.asm \ - modules/arch/x86/tests/overflow.errwarn \ - modules/arch/x86/tests/overflow.hex \ - modules/arch/x86/tests/padlock.asm \ - modules/arch/x86/tests/padlock.hex \ - modules/arch/x86/tests/pshift.asm \ - modules/arch/x86/tests/pshift.hex \ - modules/arch/x86/tests/push64.asm \ - modules/arch/x86/tests/push64.errwarn \ - modules/arch/x86/tests/push64.hex \ - modules/arch/x86/tests/pushf.asm \ - modules/arch/x86/tests/pushf.hex \ - modules/arch/x86/tests/pushf-err.asm \ - modules/arch/x86/tests/pushf-err.errwarn \ - modules/arch/x86/tests/pushnosize.asm \ - modules/arch/x86/tests/pushnosize.errwarn \ - modules/arch/x86/tests/pushnosize.hex \ - modules/arch/x86/tests/rep.asm modules/arch/x86/tests/rep.hex \ - modules/arch/x86/tests/ret.asm modules/arch/x86/tests/ret.hex \ - modules/arch/x86/tests/riprel1.asm \ - modules/arch/x86/tests/riprel1.hex \ - modules/arch/x86/tests/riprel2.asm \ - modules/arch/x86/tests/riprel2.errwarn \ - modules/arch/x86/tests/riprel2.hex \ - modules/arch/x86/tests/ripseg.asm \ - modules/arch/x86/tests/ripseg.errwarn \ - modules/arch/x86/tests/ripseg.hex \ - modules/arch/x86/tests/segmov.asm \ - modules/arch/x86/tests/segmov.hex \ - modules/arch/x86/tests/segoff.asm \ - modules/arch/x86/tests/segoff.hex \ - modules/arch/x86/tests/segoff-err.asm \ - modules/arch/x86/tests/segoff-err.errwarn \ - modules/arch/x86/tests/shift.asm \ - modules/arch/x86/tests/shift.hex \ - modules/arch/x86/tests/simd-1.asm \ - modules/arch/x86/tests/simd-1.hex \ - modules/arch/x86/tests/simd-2.asm \ - modules/arch/x86/tests/simd-2.hex \ - modules/arch/x86/tests/simd64-1.asm \ - modules/arch/x86/tests/simd64-1.hex \ - modules/arch/x86/tests/simd64-2.asm \ - modules/arch/x86/tests/simd64-2.hex \ - modules/arch/x86/tests/sse-prefix.asm \ - modules/arch/x86/tests/sse-prefix.hex \ - modules/arch/x86/tests/sse3.asm \ - modules/arch/x86/tests/sse3.hex \ - modules/arch/x86/tests/sse4.asm \ - modules/arch/x86/tests/sse4.hex \ - modules/arch/x86/tests/sse4-err.asm \ - modules/arch/x86/tests/sse4-err.errwarn \ - modules/arch/x86/tests/sse5-all.asm \ - modules/arch/x86/tests/sse5-all.hex \ - modules/arch/x86/tests/sse5-basic.asm \ - modules/arch/x86/tests/sse5-basic.hex \ - modules/arch/x86/tests/sse5-cc.asm \ - modules/arch/x86/tests/sse5-cc.hex \ - modules/arch/x86/tests/sse5-err.asm \ - modules/arch/x86/tests/sse5-err.errwarn \ - modules/arch/x86/tests/ssewidth.asm \ - modules/arch/x86/tests/ssewidth.hex \ - modules/arch/x86/tests/ssse3.asm \ - modules/arch/x86/tests/ssse3.c \ - modules/arch/x86/tests/ssse3.hex \ - modules/arch/x86/tests/stos.asm \ - modules/arch/x86/tests/stos.hex modules/arch/x86/tests/str.asm \ - modules/arch/x86/tests/str.hex \ - modules/arch/x86/tests/strict.asm \ - modules/arch/x86/tests/strict.errwarn \ - modules/arch/x86/tests/strict.hex \ - modules/arch/x86/tests/strict-err.asm \ - modules/arch/x86/tests/strict-err.errwarn \ - modules/arch/x86/tests/stringseg.asm \ - modules/arch/x86/tests/stringseg.errwarn \ - modules/arch/x86/tests/stringseg.hex \ - modules/arch/x86/tests/svm.asm modules/arch/x86/tests/svm.hex \ - modules/arch/x86/tests/twobytemem.asm \ - modules/arch/x86/tests/twobytemem.errwarn \ - modules/arch/x86/tests/twobytemem.hex \ - modules/arch/x86/tests/vmx.asm modules/arch/x86/tests/vmx.hex \ - modules/arch/x86/tests/vmx-err.asm \ - modules/arch/x86/tests/vmx-err.errwarn \ - modules/arch/x86/tests/x86label.asm \ - modules/arch/x86/tests/x86label.hex \ - modules/arch/x86/tests/xchg64.asm \ - modules/arch/x86/tests/xchg64.hex \ - modules/arch/x86/tests/xmm64.asm \ - modules/arch/x86/tests/xmm64.hex \ - modules/arch/x86/tests/xsave.asm \ - modules/arch/x86/tests/xsave.hex \ - modules/arch/x86/tests/gas32/Makefile.inc \ - modules/arch/x86/tests/gas64/Makefile.inc \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas32/align32.asm \ - modules/arch/x86/tests/gas32/align32.hex \ - modules/arch/x86/tests/gas32/gas-farithr.asm \ - modules/arch/x86/tests/gas32/gas-farithr.hex \ - modules/arch/x86/tests/gas32/gas-fpmem.asm \ - modules/arch/x86/tests/gas32/gas-fpmem.hex \ - modules/arch/x86/tests/gas32/gas-movdq32.asm \ - modules/arch/x86/tests/gas32/gas-movdq32.hex \ - modules/arch/x86/tests/gas32/gas-movsd.asm \ - modules/arch/x86/tests/gas32/gas-movsd.hex \ - modules/arch/x86/tests/gas32/gas32-jmpcall.asm \ - modules/arch/x86/tests/gas32/gas32-jmpcall.hex \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/x86/tests/gas64/align64.asm \ - modules/arch/x86/tests/gas64/align64.hex \ - modules/arch/x86/tests/gas64/gas-cbw.asm \ - modules/arch/x86/tests/gas64/gas-cbw.hex \ - modules/arch/x86/tests/gas64/gas-fp.asm \ - modules/arch/x86/tests/gas64/gas-fp.hex \ - modules/arch/x86/tests/gas64/gas-inout.asm \ - modules/arch/x86/tests/gas64/gas-inout.hex \ - modules/arch/x86/tests/gas64/gas-moreinsn.asm \ - modules/arch/x86/tests/gas64/gas-moreinsn.hex \ - modules/arch/x86/tests/gas64/gas-movabs.asm \ - modules/arch/x86/tests/gas64/gas-movabs.hex \ - modules/arch/x86/tests/gas64/gas-movdq64.asm \ - modules/arch/x86/tests/gas64/gas-movdq64.hex \ - modules/arch/x86/tests/gas64/gas-movsxs.asm \ - modules/arch/x86/tests/gas64/gas-movsxs.hex \ - modules/arch/x86/tests/gas64/gas-muldiv.asm \ - modules/arch/x86/tests/gas64/gas-muldiv.hex \ - modules/arch/x86/tests/gas64/gas-prefix.asm \ - modules/arch/x86/tests/gas64/gas-prefix.errwarn \ - modules/arch/x86/tests/gas64/gas-prefix.hex \ - modules/arch/x86/tests/gas64/gas-retenter.asm \ - modules/arch/x86/tests/gas64/gas-retenter.hex \ - modules/arch/x86/tests/gas64/gas-shift.asm \ - modules/arch/x86/tests/gas64/gas-shift.hex \ - modules/arch/x86/tests/gas64/gas64-jmpcall.asm \ - modules/arch/x86/tests/gas64/gas64-jmpcall.hex \ - modules/arch/x86/tests/gas64/riprel.asm \ - modules/arch/x86/tests/gas64/riprel.hex \ - modules/arch/lc3b/tests/Makefile.inc \ - modules/arch/lc3b/lc3bid.re \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/arch/lc3b/tests/lc3b-basic.asm \ - modules/arch/lc3b/tests/lc3b-basic.errwarn \ - modules/arch/lc3b/tests/lc3b-basic.hex \ - modules/arch/lc3b/tests/lc3b-br.asm \ - modules/arch/lc3b/tests/lc3b-br.hex \ - modules/arch/lc3b/tests/lc3b-ea-err.asm \ - modules/arch/lc3b/tests/lc3b-ea-err.errwarn \ - modules/arch/lc3b/tests/lc3b-mp22NC.asm \ - modules/arch/lc3b/tests/lc3b-mp22NC.hex \ - modules/arch/yasm_arch.xml modules/listfmts/nasm/Makefile.inc \ - modules/parsers/gas/Makefile.inc \ - modules/parsers/nasm/Makefile.inc \ - modules/parsers/gas/tests/Makefile.inc \ - modules/parsers/gas/gas-token.re \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/dataref-imm.asm \ - modules/parsers/gas/tests/dataref-imm.hex \ - modules/parsers/gas/tests/datavis.asm \ - modules/parsers/gas/tests/datavis.errwarn \ - modules/parsers/gas/tests/datavis.hex \ - modules/parsers/gas/tests/datavis2.asm \ - modules/parsers/gas/tests/datavis2.hex \ - modules/parsers/gas/tests/execsect.asm \ - modules/parsers/gas/tests/execsect.hex \ - modules/parsers/gas/tests/gas-fill.asm \ - modules/parsers/gas/tests/gas-fill.hex \ - modules/parsers/gas/tests/gas-float.asm \ - modules/parsers/gas/tests/gas-float.hex \ - modules/parsers/gas/tests/gas-instlabel.asm \ - modules/parsers/gas/tests/gas-instlabel.hex \ - modules/parsers/gas/tests/gas-line-err.asm \ - modules/parsers/gas/tests/gas-line-err.errwarn \ - modules/parsers/gas/tests/gas-line2-err.asm \ - modules/parsers/gas/tests/gas-line2-err.errwarn \ - modules/parsers/gas/tests/gas-push.asm \ - modules/parsers/gas/tests/gas-push.hex \ - modules/parsers/gas/tests/gas-segprefix.asm \ - modules/parsers/gas/tests/gas-segprefix.hex \ - modules/parsers/gas/tests/gas-semi.asm \ - modules/parsers/gas/tests/gas-semi.hex \ - modules/parsers/gas/tests/gassectalign.asm \ - modules/parsers/gas/tests/gassectalign.hex \ - modules/parsers/gas/tests/jmpcall.asm \ - modules/parsers/gas/tests/jmpcall.errwarn \ - modules/parsers/gas/tests/jmpcall.hex \ - modules/parsers/gas/tests/leb128.asm \ - modules/parsers/gas/tests/leb128.hex \ - modules/parsers/gas/tests/localcomm.asm \ - modules/parsers/gas/tests/localcomm.hex \ - modules/parsers/gas/tests/reggroup-err.asm \ - modules/parsers/gas/tests/reggroup-err.errwarn \ - modules/parsers/gas/tests/reggroup.asm \ - modules/parsers/gas/tests/reggroup.hex \ - modules/parsers/gas/tests/strzero.asm \ - modules/parsers/gas/tests/strzero.hex \ - modules/parsers/gas/tests/varinsn.asm \ - modules/parsers/gas/tests/varinsn.hex \ - modules/parsers/gas/tests/bin/Makefile.inc \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/gas/tests/bin/gas-comment.asm \ - modules/parsers/gas/tests/bin/gas-comment.errwarn \ - modules/parsers/gas/tests/bin/gas-comment.hex \ - modules/parsers/gas/tests/bin/gas-llabel.asm \ - modules/parsers/gas/tests/bin/gas-llabel.hex \ - modules/parsers/gas/tests/bin/gas-set.asm \ - modules/parsers/gas/tests/bin/gas-set.hex \ - modules/parsers/gas/tests/bin/rept-err.asm \ - modules/parsers/gas/tests/bin/rept-err.errwarn \ - modules/parsers/gas/tests/bin/reptempty.asm \ - modules/parsers/gas/tests/bin/reptempty.hex \ - modules/parsers/gas/tests/bin/reptlong.asm \ - modules/parsers/gas/tests/bin/reptlong.hex \ - modules/parsers/gas/tests/bin/reptnested-err.asm \ - modules/parsers/gas/tests/bin/reptnested-err.errwarn \ - modules/parsers/gas/tests/bin/reptsimple.asm \ - modules/parsers/gas/tests/bin/reptsimple.hex \ - modules/parsers/gas/tests/bin/reptwarn.asm \ - modules/parsers/gas/tests/bin/reptwarn.errwarn \ - modules/parsers/gas/tests/bin/reptwarn.hex \ - modules/parsers/gas/tests/bin/reptzero.asm \ - modules/parsers/gas/tests/bin/reptzero.hex \ - modules/parsers/nasm/nasm-token.re \ - modules/parsers/nasm/nasm-std.mac \ - modules/parsers/nasm/tests/Makefile.inc \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/alignnop16.asm \ - modules/parsers/nasm/tests/alignnop16.hex \ - modules/parsers/nasm/tests/alignnop32.asm \ - modules/parsers/nasm/tests/alignnop32.hex \ - modules/parsers/nasm/tests/charconstmath.asm \ - modules/parsers/nasm/tests/charconstmath.hex \ - modules/parsers/nasm/tests/dy.asm \ - modules/parsers/nasm/tests/dy.hex \ - modules/parsers/nasm/tests/endcomma.asm \ - modules/parsers/nasm/tests/endcomma.hex \ - modules/parsers/nasm/tests/equcolon.asm \ - modules/parsers/nasm/tests/equcolon.hex \ - modules/parsers/nasm/tests/equlocal.asm \ - modules/parsers/nasm/tests/equlocal.hex \ - modules/parsers/nasm/tests/hexconst.asm \ - modules/parsers/nasm/tests/hexconst.hex \ - modules/parsers/nasm/tests/long.asm \ - modules/parsers/nasm/tests/long.hex \ - modules/parsers/nasm/tests/locallabel.asm \ - modules/parsers/nasm/tests/locallabel.hex \ - modules/parsers/nasm/tests/locallabel2.asm \ - modules/parsers/nasm/tests/locallabel2.hex \ - modules/parsers/nasm/tests/nasm-prefix.asm \ - modules/parsers/nasm/tests/nasm-prefix.hex \ - modules/parsers/nasm/tests/newsect.asm \ - modules/parsers/nasm/tests/newsect.hex \ - modules/parsers/nasm/tests/orphannowarn.asm \ - modules/parsers/nasm/tests/orphannowarn.hex \ - modules/parsers/nasm/tests/prevlocalwarn.asm \ - modules/parsers/nasm/tests/prevlocalwarn.errwarn \ - modules/parsers/nasm/tests/prevlocalwarn.hex \ - modules/parsers/nasm/tests/strucalign.asm \ - modules/parsers/nasm/tests/strucalign.hex \ - modules/parsers/nasm/tests/struczero.asm \ - modules/parsers/nasm/tests/struczero.hex \ - modules/parsers/nasm/tests/syntax-err.asm \ - modules/parsers/nasm/tests/syntax-err.errwarn \ - modules/parsers/nasm/tests/uscore.asm \ - modules/parsers/nasm/tests/uscore.hex \ - modules/parsers/nasm/tests/worphan/Makefile.inc \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/nasm/tests/worphan/orphanwarn.asm \ - modules/parsers/nasm/tests/worphan/orphanwarn.errwarn \ - modules/parsers/nasm/tests/worphan/orphanwarn.hex \ - modules/parsers/tasm/tests/Makefile.inc \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/array.asm \ - modules/parsers/tasm/tests/array.hex \ - modules/parsers/tasm/tests/case.asm \ - modules/parsers/tasm/tests/case.hex \ - modules/parsers/tasm/tests/charstr.asm \ - modules/parsers/tasm/tests/charstr.hex \ - modules/parsers/tasm/tests/dup.asm \ - modules/parsers/tasm/tests/dup.hex \ - modules/parsers/tasm/tests/equal.asm \ - modules/parsers/tasm/tests/equal.hex \ - modules/parsers/tasm/tests/expr.asm \ - modules/parsers/tasm/tests/expr.hex \ - modules/parsers/tasm/tests/irp.asm \ - modules/parsers/tasm/tests/irp.hex \ - modules/parsers/tasm/tests/label.asm \ - modules/parsers/tasm/tests/label.hex \ - modules/parsers/tasm/tests/les.asm \ - modules/parsers/tasm/tests/les.hex \ - modules/parsers/tasm/tests/lidt.asm \ - modules/parsers/tasm/tests/lidt.hex \ - modules/parsers/tasm/tests/macro.asm \ - modules/parsers/tasm/tests/macro.hex \ - modules/parsers/tasm/tests/offset.asm \ - modules/parsers/tasm/tests/offset.hex \ - modules/parsers/tasm/tests/quote.asm \ - modules/parsers/tasm/tests/quote.hex \ - modules/parsers/tasm/tests/res.asm \ - modules/parsers/tasm/tests/res.errwarn \ - modules/parsers/tasm/tests/res.hex \ - modules/parsers/tasm/tests/segment.asm \ - modules/parsers/tasm/tests/segment.hex \ - modules/parsers/tasm/tests/size.asm \ - modules/parsers/tasm/tests/size.hex \ - modules/parsers/tasm/tests/struc.asm \ - modules/parsers/tasm/tests/struc.errwarn \ - modules/parsers/tasm/tests/struc.hex \ - modules/parsers/tasm/tests/exe/Makefile.inc \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/parsers/tasm/tests/exe/exe.asm \ - modules/parsers/tasm/tests/exe/exe.hex \ - modules/parsers/yasm_parsers.xml \ - modules/preprocs/nasm/Makefile.inc \ - modules/preprocs/raw/Makefile.inc \ - modules/preprocs/cpp/Makefile.inc \ - modules/preprocs/nasm/genversion.c \ - modules/preprocs/nasm/tests/Makefile.inc \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/nasm/tests/16args.asm \ - modules/preprocs/nasm/tests/16args.hex \ - modules/preprocs/nasm/tests/ifcritical-err.asm \ - modules/preprocs/nasm/tests/ifcritical-err.errwarn \ - modules/preprocs/nasm/tests/longline.asm \ - modules/preprocs/nasm/tests/longline.hex \ - modules/preprocs/nasm/tests/macroeof-err.asm \ - modules/preprocs/nasm/tests/macroeof-err.errwarn \ - modules/preprocs/nasm/tests/noinclude-err.asm \ - modules/preprocs/nasm/tests/noinclude-err.errwarn \ - modules/preprocs/nasm/tests/nasmpp-bigint.asm \ - modules/preprocs/nasm/tests/nasmpp-bigint.hex \ - modules/preprocs/nasm/tests/nasmpp-decimal.asm \ - modules/preprocs/nasm/tests/nasmpp-decimal.hex \ - modules/preprocs/nasm/tests/nasmpp-nested.asm \ - modules/preprocs/nasm/tests/nasmpp-nested.errwarn \ - modules/preprocs/nasm/tests/nasmpp-nested.hex \ - modules/preprocs/nasm/tests/orgsect.asm \ - modules/preprocs/nasm/tests/orgsect.hex \ - modules/preprocs/raw/tests/Makefile.inc \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/preprocs/raw/tests/longline.asm \ - modules/preprocs/raw/tests/longline.hex \ - modules/dbgfmts/codeview/Makefile.inc \ - modules/dbgfmts/dwarf2/Makefile.inc \ - modules/dbgfmts/null/Makefile.inc \ - modules/dbgfmts/stabs/Makefile.inc \ - modules/dbgfmts/codeview/cv8.txt \ - modules/dbgfmts/dwarf2/tests/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.errwarn \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.hex \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.asm \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.errwarn \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.hex \ - modules/dbgfmts/stabs/tests/Makefile.inc \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/dbgfmts/stabs/tests/stabs-elf.asm \ - modules/dbgfmts/stabs/tests/stabs-elf.hex \ - modules/dbgfmts/yasm_dbgfmts.xml \ - modules/objfmts/dbg/Makefile.inc \ - modules/objfmts/bin/Makefile.inc \ - modules/objfmts/elf/Makefile.inc \ - modules/objfmts/coff/Makefile.inc \ - modules/objfmts/macho/Makefile.inc \ - modules/objfmts/rdf/Makefile.inc \ - modules/objfmts/win32/Makefile.inc \ - modules/objfmts/win64/Makefile.inc \ - modules/objfmts/xdf/Makefile.inc \ - modules/objfmts/bin/tests/Makefile.inc \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/abs.asm \ - modules/objfmts/bin/tests/abs.hex \ - modules/objfmts/bin/tests/bigorg.asm \ - modules/objfmts/bin/tests/bigorg.hex \ - modules/objfmts/bin/tests/bigorg.errwarn \ - modules/objfmts/bin/tests/bin-farabs.asm \ - modules/objfmts/bin/tests/bin-farabs.hex \ - modules/objfmts/bin/tests/bin-rip.asm \ - modules/objfmts/bin/tests/bin-rip.hex \ - modules/objfmts/bin/tests/bintest.asm \ - modules/objfmts/bin/tests/bintest.hex \ - modules/objfmts/bin/tests/float-err.asm \ - modules/objfmts/bin/tests/float-err.errwarn \ - modules/objfmts/bin/tests/float.asm \ - modules/objfmts/bin/tests/float.hex \ - modules/objfmts/bin/tests/integer-warn.asm \ - modules/objfmts/bin/tests/integer-warn.hex \ - modules/objfmts/bin/tests/integer-warn.errwarn \ - modules/objfmts/bin/tests/integer.asm \ - modules/objfmts/bin/tests/integer.hex \ - modules/objfmts/bin/tests/levelop.asm \ - modules/objfmts/bin/tests/levelop.hex \ - modules/objfmts/bin/tests/reserve.asm \ - modules/objfmts/bin/tests/reserve.hex \ - modules/objfmts/bin/tests/reserve.errwarn \ - modules/objfmts/bin/tests/shr.asm \ - modules/objfmts/bin/tests/shr.hex \ - modules/objfmts/bin/tests/multisect/Makefile.inc \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/bin/tests/multisect/bin-align.asm \ - modules/objfmts/bin/tests/multisect/bin-align.errwarn \ - modules/objfmts/bin/tests/multisect/bin-align.hex \ - modules/objfmts/bin/tests/multisect/bin-align.map \ - modules/objfmts/bin/tests/multisect/bin-ssym.asm \ - modules/objfmts/bin/tests/multisect/bin-ssym.hex \ - modules/objfmts/bin/tests/multisect/bin-ssym.map \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.asm \ - modules/objfmts/bin/tests/multisect/initbss.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.hex \ - modules/objfmts/bin/tests/multisect/initbss.map \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.asm \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.hex \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.map \ - modules/objfmts/bin/tests/multisect/multisect1.asm \ - modules/objfmts/bin/tests/multisect/multisect1.hex \ - modules/objfmts/bin/tests/multisect/multisect1.map \ - modules/objfmts/bin/tests/multisect/multisect2.asm \ - modules/objfmts/bin/tests/multisect/multisect2.hex \ - modules/objfmts/bin/tests/multisect/multisect2.map \ - modules/objfmts/bin/tests/multisect/multisect3.asm \ - modules/objfmts/bin/tests/multisect/multisect3.hex \ - modules/objfmts/bin/tests/multisect/multisect3.map \ - modules/objfmts/bin/tests/multisect/multisect4.asm \ - modules/objfmts/bin/tests/multisect/multisect4.hex \ - modules/objfmts/bin/tests/multisect/multisect4.map \ - modules/objfmts/bin/tests/multisect/multisect5.asm \ - modules/objfmts/bin/tests/multisect/multisect5.hex \ - modules/objfmts/bin/tests/multisect/multisect5.map \ - modules/objfmts/bin/tests/multisect/nomultisect1.asm \ - modules/objfmts/bin/tests/multisect/nomultisect1.hex \ - modules/objfmts/bin/tests/multisect/nomultisect1.map \ - modules/objfmts/bin/tests/multisect/nomultisect2.asm \ - modules/objfmts/bin/tests/multisect/nomultisect2.hex \ - modules/objfmts/bin/tests/multisect/nomultisect2.map \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.errwarn \ - modules/objfmts/elf/tests/Makefile.inc \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/curpos.asm \ - modules/objfmts/elf/tests/curpos.hex \ - modules/objfmts/elf/tests/curpos-err.asm \ - modules/objfmts/elf/tests/curpos-err.errwarn \ - modules/objfmts/elf/tests/elf-overdef.asm \ - modules/objfmts/elf/tests/elf-overdef.hex \ - modules/objfmts/elf/tests/elf-x86id.asm \ - modules/objfmts/elf/tests/elf-x86id.hex \ - modules/objfmts/elf/tests/elfabssect.asm \ - modules/objfmts/elf/tests/elfabssect.hex \ - modules/objfmts/elf/tests/elfcond.asm \ - modules/objfmts/elf/tests/elfcond.hex \ - modules/objfmts/elf/tests/elfequabs.asm \ - modules/objfmts/elf/tests/elfequabs.hex \ - modules/objfmts/elf/tests/elfglobal.asm \ - modules/objfmts/elf/tests/elfglobal.hex \ - modules/objfmts/elf/tests/elfglobext.asm \ - modules/objfmts/elf/tests/elfglobext.hex \ - modules/objfmts/elf/tests/elfglobext2.asm \ - modules/objfmts/elf/tests/elfglobext2.hex \ - modules/objfmts/elf/tests/elfmanysym.asm \ - modules/objfmts/elf/tests/elfmanysym.hex \ - modules/objfmts/elf/tests/elfreloc.asm \ - modules/objfmts/elf/tests/elfreloc.hex \ - modules/objfmts/elf/tests/elfreloc-ext.asm \ - modules/objfmts/elf/tests/elfreloc-ext.hex \ - modules/objfmts/elf/tests/elfsectalign.asm \ - modules/objfmts/elf/tests/elfsectalign.hex \ - modules/objfmts/elf/tests/elfso.asm \ - modules/objfmts/elf/tests/elfso.hex \ - modules/objfmts/elf/tests/elftest.c \ - modules/objfmts/elf/tests/elftest.asm \ - modules/objfmts/elf/tests/elftest.hex \ - modules/objfmts/elf/tests/elftimes.asm \ - modules/objfmts/elf/tests/elftimes.hex \ - modules/objfmts/elf/tests/elftypesize.asm \ - modules/objfmts/elf/tests/elftypesize.hex \ - modules/objfmts/elf/tests/elfvisibility.asm \ - modules/objfmts/elf/tests/elfvisibility.errwarn \ - modules/objfmts/elf/tests/elfvisibility.hex \ - modules/objfmts/elf/tests/nasm-sectname.asm \ - modules/objfmts/elf/tests/nasm-sectname.hex \ - modules/objfmts/elf/tests/nasm-forceident.asm \ - modules/objfmts/elf/tests/nasm-forceident.hex \ - modules/objfmts/elf/tests/amd64/Makefile.inc \ - modules/objfmts/elf/tests/gas32/Makefile.inc \ - modules/objfmts/elf/tests/gas64/Makefile.inc \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/amd64/elf-rip.asm \ - modules/objfmts/elf/tests/amd64/elf-rip.hex \ - modules/objfmts/elf/tests/amd64/elfso64.asm \ - modules/objfmts/elf/tests/amd64/elfso64.hex \ - modules/objfmts/elf/tests/amd64/gotpcrel.asm \ - modules/objfmts/elf/tests/amd64/gotpcrel.hex \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.asm \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/elf/tests/gas64/crosssect.asm \ - modules/objfmts/elf/tests/gas64/crosssect.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.hex \ - modules/objfmts/coff/win64-nasm.mac \ - modules/objfmts/coff/win64-gas.mac \ - modules/objfmts/coff/tests/Makefile.inc \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/coff/tests/cofftest.c \ - modules/objfmts/coff/tests/cofftest.asm \ - modules/objfmts/coff/tests/cofftest.hex \ - modules/objfmts/coff/tests/cofftimes.asm \ - modules/objfmts/coff/tests/cofftimes.hex \ - modules/objfmts/coff/tests/x86id.asm \ - modules/objfmts/coff/tests/x86id.hex \ - modules/objfmts/coff/tests/x86id.errwarn \ - modules/objfmts/macho/tests/Makefile.inc \ - modules/objfmts/macho/tests/gas32/Makefile.inc \ - modules/objfmts/macho/tests/gas64/Makefile.inc \ - modules/objfmts/macho/tests/nasm32/Makefile.inc \ - modules/objfmts/macho/tests/nasm64/Makefile.inc \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas32/gas-macho32.asm \ - modules/objfmts/macho/tests/gas32/gas-macho32.hex \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/gas64/gas-macho64.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64.hex \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm32/machotest.c \ - modules/objfmts/macho/tests/nasm32/machotest.asm \ - modules/objfmts/macho/tests/nasm32/machotest.hex \ - modules/objfmts/macho/tests/nasm32/macho-reloc.asm \ - modules/objfmts/macho/tests/nasm32/macho-reloc.hex \ - modules/objfmts/macho/tests/nasm32/macho32-sect.asm \ - modules/objfmts/macho/tests/nasm32/macho32-sect.errwarn \ - modules/objfmts/macho/tests/nasm32/macho32-sect.hex \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.asm \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/macho/tests/nasm64/machotest64.c \ - modules/objfmts/macho/tests/nasm64/machotest64.asm \ - modules/objfmts/macho/tests/nasm64/machotest64.hex \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.asm \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.errwarn \ - modules/objfmts/rdf/tests/Makefile.inc \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/rdf/tests/rdfabs.asm \ - modules/objfmts/rdf/tests/rdfabs.errwarn \ - modules/objfmts/rdf/tests/rdfabs.hex \ - modules/objfmts/rdf/tests/rdfext.asm \ - modules/objfmts/rdf/tests/rdfext.hex \ - modules/objfmts/rdf/tests/rdfseg.asm \ - modules/objfmts/rdf/tests/rdfseg.hex \ - modules/objfmts/rdf/tests/rdfseg2.asm \ - modules/objfmts/rdf/tests/rdfseg2.hex \ - modules/objfmts/rdf/tests/rdftest1.asm \ - modules/objfmts/rdf/tests/rdftest1.hex \ - modules/objfmts/rdf/tests/rdftest2.asm \ - modules/objfmts/rdf/tests/rdftest2.hex \ - modules/objfmts/rdf/tests/rdtlib.asm \ - modules/objfmts/rdf/tests/rdtlib.hex \ - modules/objfmts/rdf/tests/rdtmain.asm \ - modules/objfmts/rdf/tests/rdtmain.hex \ - modules/objfmts/rdf/tests/testlib.asm \ - modules/objfmts/rdf/tests/testlib.hex \ - modules/objfmts/win32/tests/Makefile.inc \ - modules/objfmts/win32/tests/export.asm \ - modules/objfmts/win32/tests/export.hex \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/win32-curpos.asm \ - modules/objfmts/win32/tests/win32-curpos.hex \ - modules/objfmts/win32/tests/win32-overdef.asm \ - modules/objfmts/win32/tests/win32-overdef.hex \ - modules/objfmts/win32/tests/win32-safeseh.asm \ - modules/objfmts/win32/tests/win32-safeseh.hex \ - modules/objfmts/win32/tests/win32-safeseh.masm \ - modules/objfmts/win32/tests/win32-segof.asm \ - modules/objfmts/win32/tests/win32-segof.hex \ - modules/objfmts/win32/tests/win32test.c \ - modules/objfmts/win32/tests/win32test.asm \ - modules/objfmts/win32/tests/win32test.hex \ - modules/objfmts/win32/tests/gas/Makefile.inc \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win32/tests/gas/win32at.asm \ - modules/objfmts/win32/tests/gas/win32at.hex \ - modules/objfmts/win64/tests/Makefile.inc \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/sce1.asm \ - modules/objfmts/win64/tests/sce1.hex \ - modules/objfmts/win64/tests/sce1-err.asm \ - modules/objfmts/win64/tests/sce1-err.errwarn \ - modules/objfmts/win64/tests/sce2.asm \ - modules/objfmts/win64/tests/sce2.hex \ - modules/objfmts/win64/tests/sce2-err.asm \ - modules/objfmts/win64/tests/sce2-err.errwarn \ - modules/objfmts/win64/tests/sce3.asm \ - modules/objfmts/win64/tests/sce3.hex \ - modules/objfmts/win64/tests/sce3.masm \ - modules/objfmts/win64/tests/sce4.asm \ - modules/objfmts/win64/tests/sce4.hex \ - modules/objfmts/win64/tests/sce4.masm \ - modules/objfmts/win64/tests/sce4-err.asm \ - modules/objfmts/win64/tests/sce4-err.errwarn \ - modules/objfmts/win64/tests/win64-abs.asm \ - modules/objfmts/win64/tests/win64-abs.hex \ - modules/objfmts/win64/tests/win64-curpos.asm \ - modules/objfmts/win64/tests/win64-curpos.hex \ - modules/objfmts/win64/tests/win64-dataref.asm \ - modules/objfmts/win64/tests/win64-dataref.hex \ - modules/objfmts/win64/tests/win64-dataref.masm \ - modules/objfmts/win64/tests/win64-dataref2.asm \ - modules/objfmts/win64/tests/win64-dataref2.hex \ - modules/objfmts/win64/tests/win64-dataref2.masm \ - modules/objfmts/win64/tests/gas/Makefile.inc \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/win64/tests/gas/win64-gas-sce.asm \ - modules/objfmts/win64/tests/gas/win64-gas-sce.hex \ - modules/objfmts/xdf/tests/Makefile.inc \ - modules/objfmts/xdf/tests/xdf_test.sh \ - modules/objfmts/xdf/tests/xdf-overdef.asm \ - modules/objfmts/xdf/tests/xdf-overdef.hex \ - modules/objfmts/xdf/tests/xdflong.asm \ - modules/objfmts/xdf/tests/xdflong.hex \ - modules/objfmts/xdf/tests/xdflong.errwarn \ - modules/objfmts/xdf/tests/xdfother.asm \ - modules/objfmts/xdf/tests/xdfother.hex \ - modules/objfmts/xdf/tests/xdfprotect.asm \ - modules/objfmts/xdf/tests/xdfprotect.hex \ - modules/objfmts/xdf/tests/xdfsect.asm \ - modules/objfmts/xdf/tests/xdfsect.hex \ - modules/objfmts/xdf/tests/xdfsect-err.asm \ - modules/objfmts/xdf/tests/xdfsect-err.errwarn \ - modules/objfmts/xdf/tests/xdfvirtual.asm \ - modules/objfmts/xdf/tests/xdfvirtual.hex \ - modules/objfmts/yasm_objfmts.xml libyasm/genmodule.c \ - libyasm/module.in libyasm/tests/Makefile.inc \ - libyasm/tests/libyasm_test.sh libyasm/tests/1shl0.asm \ - libyasm/tests/1shl0.hex libyasm/tests/absloop-err.asm \ - libyasm/tests/absloop-err.errwarn \ - libyasm/tests/charconst64.asm libyasm/tests/charconst64.hex \ - libyasm/tests/data-rawvalue.asm \ - libyasm/tests/data-rawvalue.hex libyasm/tests/duplabel-err.asm \ - libyasm/tests/duplabel-err.errwarn libyasm/tests/emptydata.asm \ - libyasm/tests/emptydata.hex libyasm/tests/equ-expand.asm \ - libyasm/tests/equ-expand.hex libyasm/tests/expr-fold-level.asm \ - libyasm/tests/expr-fold-level.hex \ - libyasm/tests/expr-wide-ident.asm \ - libyasm/tests/expr-wide-ident.hex libyasm/tests/externdef.asm \ - libyasm/tests/externdef.errwarn libyasm/tests/externdef.hex \ - libyasm/tests/incbin.asm libyasm/tests/incbin.hex \ - libyasm/tests/jmpsize1.asm libyasm/tests/jmpsize1.hex \ - libyasm/tests/jmpsize1-err.asm \ - libyasm/tests/jmpsize1-err.errwarn \ - libyasm/tests/opt-align1.asm libyasm/tests/opt-align1.hex \ - libyasm/tests/opt-align2.asm libyasm/tests/opt-align2.hex \ - libyasm/tests/opt-align3.asm libyasm/tests/opt-align3.hex \ - libyasm/tests/opt-circular1-err.asm \ - libyasm/tests/opt-circular1-err.errwarn \ - libyasm/tests/opt-circular2-err.asm \ - libyasm/tests/opt-circular2-err.errwarn \ - libyasm/tests/opt-circular3-err.asm \ - libyasm/tests/opt-circular3-err.errwarn \ - libyasm/tests/opt-gvmat64.asm libyasm/tests/opt-gvmat64.hex \ - libyasm/tests/opt-immexpand.asm \ - libyasm/tests/opt-immexpand.hex \ - libyasm/tests/opt-immnoexpand.asm \ - libyasm/tests/opt-immnoexpand.hex \ - libyasm/tests/opt-oldalign.asm libyasm/tests/opt-oldalign.hex \ - libyasm/tests/opt-struc.asm libyasm/tests/opt-struc.hex \ - libyasm/tests/reserve-err1.asm \ - libyasm/tests/reserve-err1.errwarn \ - libyasm/tests/reserve-err2.asm \ - libyasm/tests/reserve-err2.errwarn libyasm/tests/strucsize.asm \ - libyasm/tests/strucsize.hex libyasm/tests/times0.asm \ - libyasm/tests/times0.hex libyasm/tests/timesover-err.asm \ - libyasm/tests/timesover-err.errwarn \ - libyasm/tests/timesunder.asm libyasm/tests/timesunder.hex \ - libyasm/tests/times-res.asm libyasm/tests/times-res.errwarn \ - libyasm/tests/times-res.hex libyasm/tests/unary.asm \ - libyasm/tests/unary.hex libyasm/tests/value-err.asm \ - libyasm/tests/value-err.errwarn \ - libyasm/tests/value-samesym.asm \ - libyasm/tests/value-samesym.errwarn \ - libyasm/tests/value-samesym.hex libyasm/tests/value-mask.asm \ - libyasm/tests/value-mask.errwarn libyasm/tests/value-mask.hex \ - frontends/yasm/Makefile.inc frontends/tasm/Makefile.inc \ - frontends/yasm/yasm.xml m4/intmax.m4 m4/longdouble.m4 \ - m4/nls.m4 m4/po.m4 m4/printf-posix.m4 m4/signed.m4 \ - m4/size_max.m4 m4/ulonglong.m4 m4/wchar_t.m4 m4/wint_t.m4 \ - m4/xsize.m4 m4/codeset.m4 m4/gettext.m4 m4/glibc21.m4 \ - m4/iconv.m4 m4/intdiv0.m4 m4/inttypes.m4 m4/inttypes_h.m4 \ - m4/inttypes-pri.m4 m4/isc-posix.m4 m4/lcmessage.m4 \ - m4/lib-ld.m4 m4/lib-link.m4 m4/lib-prefix.m4 m4/longlong.m4 \ - m4/progtest.m4 m4/stdint_h.m4 m4/uintmax_t.m4 m4/pythonhead.m4 \ - m4/pyrex.m4 out_test.sh Artistic.txt BSD.txt GNU_GPL-2.0 \ - GNU_LGPL-2.0 splint.sh Mkfiles/Makefile.flat \ - Mkfiles/Makefile.dj Mkfiles/dj/config.h \ - Mkfiles/dj/libyasm-stdint.h \ - Mkfiles/vc9/crt_secure_no_deprecate.vsprops \ - Mkfiles/vc9/yasm.sln Mkfiles/vc9/yasm.vcproj \ - Mkfiles/vc9/ytasm.vcproj Mkfiles/vc9/config.h \ - Mkfiles/vc9/libyasm-stdint.h Mkfiles/vc9/readme.vc9.txt \ - Mkfiles/vc9/yasm.rules Mkfiles/vc9/vc98_swap.py \ - Mkfiles/vc9/genmacro/genmacro.vcproj \ - Mkfiles/vc9/genmacro/run.bat \ - Mkfiles/vc9/genmodule/genmodule.vcproj \ - Mkfiles/vc9/genmodule/run.bat \ - Mkfiles/vc9/genstring/genstring.vcproj \ - Mkfiles/vc9/genstring/run.bat \ - Mkfiles/vc9/genversion/genversion.vcproj \ - Mkfiles/vc9/genversion/run.bat \ - Mkfiles/vc9/libyasm/libyasm.vcproj \ - Mkfiles/vc9/modules/modules.vcproj \ - Mkfiles/vc9/re2c/re2c.vcproj Mkfiles/vc9/re2c/run.bat \ - Mkfiles/vc9/genperf/genperf.vcproj Mkfiles/vc9/genperf/run.bat \ - genstring.c - -# libyasm-stdint.h doesn't clean up after itself? -CONFIG_CLEAN_FILES = libyasm-stdint.h -re2c_SOURCES = -re2c_LDADD = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -re2c_LINK = $(CCLD_FOR_BUILD) -o $@ -genmacro_SOURCES = -genmacro_LDADD = genmacro.$(OBJEXT) -genmacro_LINK = $(CCLD_FOR_BUILD) -o $@ -genperf_SOURCES = -genperf_LDADD = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -genperf_LINK = $(CCLD_FOR_BUILD) -o $@ -PYBINDING_DEPS = tools/python-yasm/bytecode.pxi \ - tools/python-yasm/errwarn.pxi tools/python-yasm/expr.pxi \ - tools/python-yasm/floatnum.pxi tools/python-yasm/intnum.pxi \ - tools/python-yasm/symrec.pxi tools/python-yasm/value.pxi -YASM_MODULES = arch_x86 arch_lc3b listfmt_nasm parser_gas parser_gnu \ - parser_nasm parser_tasm preproc_nasm preproc_tasm preproc_raw \ - preproc_cpp dbgfmt_cv8 dbgfmt_dwarf2 dbgfmt_null dbgfmt_stabs \ - objfmt_dbg objfmt_bin objfmt_dosexe objfmt_elf objfmt_elf32 \ - objfmt_elf64 objfmt_coff objfmt_macho objfmt_macho32 \ - objfmt_macho64 objfmt_rdf objfmt_win32 objfmt_win64 objfmt_x64 \ - objfmt_xdf -lib_LIBRARIES = libyasm.a -libyasm_a_SOURCES = modules/arch/x86/x86arch.c \ - modules/arch/x86/x86arch.h modules/arch/x86/x86bc.c \ - modules/arch/x86/x86expr.c modules/arch/x86/x86id.c \ - modules/arch/lc3b/lc3barch.c modules/arch/lc3b/lc3barch.h \ - modules/arch/lc3b/lc3bbc.c \ - modules/listfmts/nasm/nasm-listfmt.c \ - modules/parsers/gas/gas-parser.c \ - modules/parsers/gas/gas-parser.h \ - modules/parsers/gas/gas-parse.c \ - modules/parsers/nasm/nasm-parser.c \ - modules/parsers/nasm/nasm-parser.h \ - modules/parsers/nasm/nasm-parse.c \ - modules/preprocs/nasm/nasm-preproc.c \ - modules/preprocs/nasm/nasm-pp.h \ - modules/preprocs/nasm/nasm-pp.c modules/preprocs/nasm/nasm.h \ - modules/preprocs/nasm/nasmlib.h \ - modules/preprocs/nasm/nasmlib.c \ - modules/preprocs/nasm/nasm-eval.h \ - modules/preprocs/nasm/nasm-eval.c \ - modules/preprocs/raw/raw-preproc.c \ - modules/preprocs/cpp/cpp-preproc.c \ - modules/dbgfmts/codeview/cv-dbgfmt.h \ - modules/dbgfmts/codeview/cv-dbgfmt.c \ - modules/dbgfmts/codeview/cv-symline.c \ - modules/dbgfmts/codeview/cv-type.c \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.h \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c \ - modules/dbgfmts/dwarf2/dwarf2-line.c \ - modules/dbgfmts/dwarf2/dwarf2-aranges.c \ - modules/dbgfmts/dwarf2/dwarf2-info.c \ - modules/dbgfmts/null/null-dbgfmt.c \ - modules/dbgfmts/stabs/stabs-dbgfmt.c \ - modules/objfmts/dbg/dbg-objfmt.c \ - modules/objfmts/bin/bin-objfmt.c modules/objfmts/elf/elf.c \ - modules/objfmts/elf/elf.h modules/objfmts/elf/elf-objfmt.c \ - modules/objfmts/elf/elf-machine.h \ - modules/objfmts/elf/elf-x86-x86.c \ - modules/objfmts/elf/elf-x86-amd64.c \ - modules/objfmts/coff/coff-objfmt.c \ - modules/objfmts/coff/coff-objfmt.h \ - modules/objfmts/coff/win64-except.c \ - modules/objfmts/macho/macho-objfmt.c \ - modules/objfmts/rdf/rdf-objfmt.c \ - modules/objfmts/xdf/xdf-objfmt.c libyasm/assocdat.c \ - libyasm/bitvect.c libyasm/bc-align.c libyasm/bc-data.c \ - libyasm/bc-incbin.c libyasm/bc-org.c libyasm/bc-reserve.c \ - libyasm/bytecode.c libyasm/errwarn.c libyasm/expr.c \ - libyasm/file.c libyasm/floatnum.c libyasm/hamt.c \ - libyasm/insn.c libyasm/intnum.c libyasm/inttree.c \ - libyasm/linemap.c libyasm/md5.c libyasm/mergesort.c \ - libyasm/phash.c libyasm/section.c libyasm/strcasecmp.c \ - libyasm/strsep.c libyasm/symrec.c libyasm/valparam.c \ - libyasm/value.c libyasm/xmalloc.c libyasm/xstrdup.c -nodist_libyasm_a_SOURCES = x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c module.c -genversion_SOURCES = -genversion_LDADD = genversion.$(OBJEXT) -genversion_LINK = $(CCLD_FOR_BUILD) -o $@ -genmodule_SOURCES = -genmodule_LDADD = genmodule.$(OBJEXT) -genmodule_LINK = $(CCLD_FOR_BUILD) -o $@ -modincludedir = $(includedir)/libyasm -modinclude_HEADERS = libyasm/arch.h libyasm/assocdat.h \ - libyasm/bitvect.h libyasm/bytecode.h libyasm/compat-queue.h \ - libyasm/coretype.h libyasm/dbgfmt.h libyasm/errwarn.h \ - libyasm/expr.h libyasm/file.h libyasm/floatnum.h \ - libyasm/hamt.h libyasm/insn.h libyasm/intnum.h \ - libyasm/inttree.h libyasm/linemap.h libyasm/listfmt.h \ - libyasm/md5.h libyasm/module.h libyasm/objfmt.h \ - libyasm/parser.h libyasm/phash.h libyasm/preproc.h \ - libyasm/section.h libyasm/symrec.h libyasm/valparam.h \ - libyasm/value.h -bitvect_test_SOURCES = libyasm/tests/bitvect_test.c -bitvect_test_LDADD = libyasm.a $(INTLLIBS) -floatnum_test_SOURCES = libyasm/tests/floatnum_test.c -floatnum_test_LDADD = libyasm.a $(INTLLIBS) -leb128_test_SOURCES = libyasm/tests/leb128_test.c -leb128_test_LDADD = libyasm.a $(INTLLIBS) -splitpath_test_SOURCES = libyasm/tests/splitpath_test.c -splitpath_test_LDADD = libyasm.a $(INTLLIBS) -combpath_test_SOURCES = libyasm/tests/combpath_test.c -combpath_test_LDADD = libyasm.a $(INTLLIBS) -uncstring_test_SOURCES = libyasm/tests/uncstring_test.c -uncstring_test_LDADD = libyasm.a $(INTLLIBS) -yasm_SOURCES = frontends/yasm/yasm.c frontends/yasm/yasm-options.c \ - frontends/yasm/yasm-options.h -yasm_LDADD = libyasm.a $(INTLLIBS) -ytasm_SOURCES = frontends/tasm/tasm.c frontends/tasm/tasm-options.c \ - frontends/tasm/tasm-options.h -ytasm_LDADD = libyasm.a $(INTLLIBS) -ACLOCAL_AMFLAGS = -I m4 - -# genstring build -genstring_SOURCES = -genstring_LDADD = genstring.$(OBJEXT) -genstring_LINK = $(CCLD_FOR_BUILD) -o $@ -all: $(BUILT_SOURCES) config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .gperf .c .o .obj -am--refresh: - @: -$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(srcdir)/tools/Makefile.inc $(srcdir)/tools/re2c/Makefile.inc $(srcdir)/tools/genmacro/Makefile.inc $(srcdir)/tools/genperf/Makefile.inc $(srcdir)/tools/python-yasm/Makefile.inc $(srcdir)/tools/python-yasm/tests/Makefile.inc $(srcdir)/modules/Makefile.inc $(srcdir)/modules/arch/Makefile.inc $(srcdir)/modules/arch/x86/Makefile.inc $(srcdir)/modules/arch/x86/tests/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc $(srcdir)/modules/arch/lc3b/Makefile.inc $(srcdir)/modules/arch/lc3b/tests/Makefile.inc $(srcdir)/modules/listfmts/Makefile.inc $(srcdir)/modules/listfmts/nasm/Makefile.inc $(srcdir)/modules/parsers/Makefile.inc $(srcdir)/modules/parsers/gas/Makefile.inc $(srcdir)/modules/parsers/gas/tests/Makefile.inc $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc $(srcdir)/modules/parsers/nasm/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc $(srcdir)/modules/parsers/tasm/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc $(srcdir)/modules/preprocs/Makefile.inc $(srcdir)/modules/preprocs/nasm/Makefile.inc $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc $(srcdir)/modules/preprocs/raw/Makefile.inc $(srcdir)/modules/preprocs/raw/tests/Makefile.inc $(srcdir)/modules/preprocs/cpp/Makefile.inc $(srcdir)/modules/dbgfmts/Makefile.inc $(srcdir)/modules/dbgfmts/codeview/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc $(srcdir)/modules/dbgfmts/null/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc $(srcdir)/modules/objfmts/Makefile.inc $(srcdir)/modules/objfmts/dbg/Makefile.inc $(srcdir)/modules/objfmts/bin/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc $(srcdir)/modules/objfmts/elf/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/coff/Makefile.inc $(srcdir)/modules/objfmts/coff/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc $(srcdir)/modules/objfmts/rdf/Makefile.inc $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/win64/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/xdf/Makefile.inc $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc $(srcdir)/libyasm/Makefile.inc $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/frontends/Makefile.inc $(srcdir)/frontends/yasm/Makefile.inc $(srcdir)/frontends/tasm/Makefile.inc $(srcdir)/m4/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ - cd $(srcdir) && $(AUTOMAKE) --gnu \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: # $(am__configure_deps) - cd $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): # $(am__aclocal_m4_deps) - cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: # $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -install-libLIBRARIES: $(lib_LIBRARIES) - @$(NORMAL_INSTALL) - test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - f=$(am__strip_dir) \ - echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ - else :; fi; \ - done - @$(POST_INSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - p=$(am__strip_dir) \ - echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \ - $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \ - else :; fi; \ - done - -uninstall-libLIBRARIES: - @$(NORMAL_UNINSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - p=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - rm -f "$(DESTDIR)$(libdir)/$$p"; \ - done - -clean-libLIBRARIES: - -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES) -libyasm.a: $(libyasm_a_OBJECTS) $(libyasm_a_DEPENDENCIES) - -rm -f libyasm.a - $(libyasm_a_AR) libyasm.a $(libyasm_a_OBJECTS) $(libyasm_a_LIBADD) - $(RANLIB) libyasm.a -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) - -clean-checkPROGRAMS: - -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS) - -clean-noinstPROGRAMS: - -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) -bitvect_test$(EXEEXT): $(bitvect_test_OBJECTS) $(bitvect_test_DEPENDENCIES) - @rm -f bitvect_test$(EXEEXT) - $(LINK) $(bitvect_test_OBJECTS) $(bitvect_test_LDADD) $(LIBS) -combpath_test$(EXEEXT): $(combpath_test_OBJECTS) $(combpath_test_DEPENDENCIES) - @rm -f combpath_test$(EXEEXT) - $(LINK) $(combpath_test_OBJECTS) $(combpath_test_LDADD) $(LIBS) -floatnum_test$(EXEEXT): $(floatnum_test_OBJECTS) $(floatnum_test_DEPENDENCIES) - @rm -f floatnum_test$(EXEEXT) - $(LINK) $(floatnum_test_OBJECTS) $(floatnum_test_LDADD) $(LIBS) -genmacro$(EXEEXT): $(genmacro_OBJECTS) $(genmacro_DEPENDENCIES) - @rm -f genmacro$(EXEEXT) - $(genmacro_LINK) $(genmacro_OBJECTS) $(genmacro_LDADD) $(LIBS) -genmodule$(EXEEXT): $(genmodule_OBJECTS) $(genmodule_DEPENDENCIES) - @rm -f genmodule$(EXEEXT) - $(genmodule_LINK) $(genmodule_OBJECTS) $(genmodule_LDADD) $(LIBS) -genperf$(EXEEXT): $(genperf_OBJECTS) $(genperf_DEPENDENCIES) - @rm -f genperf$(EXEEXT) - $(genperf_LINK) $(genperf_OBJECTS) $(genperf_LDADD) $(LIBS) -genstring$(EXEEXT): $(genstring_OBJECTS) $(genstring_DEPENDENCIES) - @rm -f genstring$(EXEEXT) - $(genstring_LINK) $(genstring_OBJECTS) $(genstring_LDADD) $(LIBS) -genversion$(EXEEXT): $(genversion_OBJECTS) $(genversion_DEPENDENCIES) - @rm -f genversion$(EXEEXT) - $(genversion_LINK) $(genversion_OBJECTS) $(genversion_LDADD) $(LIBS) -leb128_test$(EXEEXT): $(leb128_test_OBJECTS) $(leb128_test_DEPENDENCIES) - @rm -f leb128_test$(EXEEXT) - $(LINK) $(leb128_test_OBJECTS) $(leb128_test_LDADD) $(LIBS) -re2c$(EXEEXT): $(re2c_OBJECTS) $(re2c_DEPENDENCIES) - @rm -f re2c$(EXEEXT) - $(re2c_LINK) $(re2c_OBJECTS) $(re2c_LDADD) $(LIBS) -splitpath_test$(EXEEXT): $(splitpath_test_OBJECTS) $(splitpath_test_DEPENDENCIES) - @rm -f splitpath_test$(EXEEXT) - $(LINK) $(splitpath_test_OBJECTS) $(splitpath_test_LDADD) $(LIBS) -test_hd$(EXEEXT): $(test_hd_OBJECTS) $(test_hd_DEPENDENCIES) - @rm -f test_hd$(EXEEXT) - $(LINK) $(test_hd_OBJECTS) $(test_hd_LDADD) $(LIBS) -uncstring_test$(EXEEXT): $(uncstring_test_OBJECTS) $(uncstring_test_DEPENDENCIES) - @rm -f uncstring_test$(EXEEXT) - $(LINK) $(uncstring_test_OBJECTS) $(uncstring_test_LDADD) $(LIBS) -yasm$(EXEEXT): $(yasm_OBJECTS) $(yasm_DEPENDENCIES) - @rm -f yasm$(EXEEXT) - $(LINK) $(yasm_OBJECTS) $(yasm_LDADD) $(LIBS) -ytasm$(EXEEXT): $(ytasm_OBJECTS) $(ytasm_DEPENDENCIES) - @rm -f ytasm$(EXEEXT) - $(LINK) $(ytasm_OBJECTS) $(ytasm_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/assocdat.Po -include ./$(DEPDIR)/bc-align.Po -include ./$(DEPDIR)/bc-data.Po -include ./$(DEPDIR)/bc-incbin.Po -include ./$(DEPDIR)/bc-org.Po -include ./$(DEPDIR)/bc-reserve.Po -include ./$(DEPDIR)/bin-objfmt.Po -include ./$(DEPDIR)/bitvect.Po -include ./$(DEPDIR)/bitvect_test.Po -include ./$(DEPDIR)/bytecode.Po -include ./$(DEPDIR)/coff-objfmt.Po -include ./$(DEPDIR)/combpath_test.Po -include ./$(DEPDIR)/cpp-preproc.Po -include ./$(DEPDIR)/cv-dbgfmt.Po -include ./$(DEPDIR)/cv-symline.Po -include ./$(DEPDIR)/cv-type.Po -include ./$(DEPDIR)/dbg-objfmt.Po -include ./$(DEPDIR)/dwarf2-aranges.Po -include ./$(DEPDIR)/dwarf2-dbgfmt.Po -include ./$(DEPDIR)/dwarf2-info.Po -include ./$(DEPDIR)/dwarf2-line.Po -include ./$(DEPDIR)/elf-objfmt.Po -include ./$(DEPDIR)/elf-x86-amd64.Po -include ./$(DEPDIR)/elf-x86-x86.Po -include ./$(DEPDIR)/elf.Po -include ./$(DEPDIR)/errwarn.Po -include ./$(DEPDIR)/expr.Po -include ./$(DEPDIR)/file.Po -include ./$(DEPDIR)/floatnum.Po -include ./$(DEPDIR)/floatnum_test.Po -include ./$(DEPDIR)/gas-parse.Po -include ./$(DEPDIR)/gas-parser.Po -include ./$(DEPDIR)/gas-token.Po -include ./$(DEPDIR)/hamt.Po -include ./$(DEPDIR)/insn.Po -include ./$(DEPDIR)/intnum.Po -include ./$(DEPDIR)/inttree.Po -include ./$(DEPDIR)/lc3barch.Po -include ./$(DEPDIR)/lc3bbc.Po -include ./$(DEPDIR)/lc3bid.Po -include ./$(DEPDIR)/leb128_test.Po -include ./$(DEPDIR)/linemap.Po -include ./$(DEPDIR)/macho-objfmt.Po -include ./$(DEPDIR)/md5.Po -include ./$(DEPDIR)/mergesort.Po -include ./$(DEPDIR)/module.Po -include ./$(DEPDIR)/nasm-eval.Po -include ./$(DEPDIR)/nasm-listfmt.Po -include ./$(DEPDIR)/nasm-parse.Po -include ./$(DEPDIR)/nasm-parser.Po -include ./$(DEPDIR)/nasm-pp.Po -include ./$(DEPDIR)/nasm-preproc.Po -include ./$(DEPDIR)/nasm-token.Po -include ./$(DEPDIR)/nasmlib.Po -include ./$(DEPDIR)/null-dbgfmt.Po -include ./$(DEPDIR)/phash.Po -include ./$(DEPDIR)/raw-preproc.Po -include ./$(DEPDIR)/rdf-objfmt.Po -include ./$(DEPDIR)/section.Po -include ./$(DEPDIR)/splitpath_test.Po -include ./$(DEPDIR)/stabs-dbgfmt.Po -include ./$(DEPDIR)/strcasecmp.Po -include ./$(DEPDIR)/strsep.Po -include ./$(DEPDIR)/symrec.Po -include ./$(DEPDIR)/tasm-options.Po -include ./$(DEPDIR)/tasm.Po -include ./$(DEPDIR)/test_hd.Po -include ./$(DEPDIR)/uncstring_test.Po -include ./$(DEPDIR)/valparam.Po -include ./$(DEPDIR)/value.Po -include ./$(DEPDIR)/win64-except.Po -include ./$(DEPDIR)/x86arch.Po -include ./$(DEPDIR)/x86bc.Po -include ./$(DEPDIR)/x86cpu.Po -include ./$(DEPDIR)/x86expr.Po -include ./$(DEPDIR)/x86id.Po -include ./$(DEPDIR)/x86regtmod.Po -include ./$(DEPDIR)/xdf-objfmt.Po -include ./$(DEPDIR)/xmalloc.Po -include ./$(DEPDIR)/xstrdup.Po -include ./$(DEPDIR)/yasm-options.Po -include ./$(DEPDIR)/yasm.Po - -.c.o: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -x86arch.o: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.o -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - -x86arch.obj: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.obj -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - -x86bc.o: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.o -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - -x86bc.obj: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.obj -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - -x86expr.o: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.o -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - -x86expr.obj: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.obj -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - -x86id.o: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.o -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - -x86id.obj: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.obj -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - -lc3barch.o: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.o -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - -lc3barch.obj: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.obj -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - -lc3bbc.o: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.o -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - -lc3bbc.obj: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.obj -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - -nasm-listfmt.o: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.o -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - -nasm-listfmt.obj: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.obj -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - -gas-parser.o: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.o -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - -gas-parser.obj: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.obj -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - -gas-parse.o: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.o -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - -gas-parse.obj: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.obj -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - -nasm-parser.o: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.o -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - -nasm-parser.obj: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.obj -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - -nasm-parse.o: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.o -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - -nasm-parse.obj: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.obj -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - -nasm-preproc.o: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.o -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - -nasm-preproc.obj: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.obj -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - -nasm-pp.o: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.o -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - -nasm-pp.obj: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.obj -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - -nasmlib.o: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.o -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - -nasmlib.obj: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.obj -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - -nasm-eval.o: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.o -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - -nasm-eval.obj: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.obj -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - -raw-preproc.o: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.o -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - -raw-preproc.obj: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.obj -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - -cpp-preproc.o: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.o -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - -cpp-preproc.obj: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.obj -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - -cv-dbgfmt.o: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.o -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - -cv-dbgfmt.obj: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.obj -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - -cv-symline.o: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.o -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - -cv-symline.obj: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.obj -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - -cv-type.o: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.o -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - -cv-type.obj: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.obj -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - -dwarf2-dbgfmt.o: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.o -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - -dwarf2-dbgfmt.obj: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.obj -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - -dwarf2-line.o: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.o -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - -dwarf2-line.obj: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.obj -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - -dwarf2-aranges.o: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.o -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - -dwarf2-aranges.obj: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.obj -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - -dwarf2-info.o: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.o -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - -dwarf2-info.obj: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.obj -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - -null-dbgfmt.o: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.o -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - -null-dbgfmt.obj: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.obj -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - -stabs-dbgfmt.o: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.o -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - -stabs-dbgfmt.obj: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.obj -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - -dbg-objfmt.o: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.o -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - -dbg-objfmt.obj: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.obj -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - -bin-objfmt.o: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.o -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - -bin-objfmt.obj: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.obj -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - -elf.o: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.o -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - -elf.obj: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.obj -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - -elf-objfmt.o: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.o -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - -elf-objfmt.obj: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.obj -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - -elf-x86-x86.o: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.o -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - -elf-x86-x86.obj: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.obj -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - -elf-x86-amd64.o: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.o -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - -elf-x86-amd64.obj: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.obj -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - -coff-objfmt.o: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.o -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - -coff-objfmt.obj: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.obj -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - -win64-except.o: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.o -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - -win64-except.obj: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.obj -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - -macho-objfmt.o: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.o -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - -macho-objfmt.obj: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.obj -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - -rdf-objfmt.o: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.o -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - -rdf-objfmt.obj: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.obj -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - -xdf-objfmt.o: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.o -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - -xdf-objfmt.obj: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.obj -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - -assocdat.o: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.o -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - -assocdat.obj: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.obj -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - -bitvect.o: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.o -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - -bitvect.obj: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.obj -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - -bc-align.o: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.o -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - -bc-align.obj: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.obj -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - -bc-data.o: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.o -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - -bc-data.obj: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.obj -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - -bc-incbin.o: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.o -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - -bc-incbin.obj: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.obj -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - -bc-org.o: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.o -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - -bc-org.obj: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.obj -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - -bc-reserve.o: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.o -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - -bc-reserve.obj: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.obj -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - -bytecode.o: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.o -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - -bytecode.obj: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.obj -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - -errwarn.o: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.o -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - -errwarn.obj: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.obj -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - -expr.o: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.o -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - -expr.obj: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.obj -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - -file.o: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.o -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - -file.obj: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.obj -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - -floatnum.o: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.o -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - -floatnum.obj: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.obj -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - -hamt.o: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.o -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - -hamt.obj: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.obj -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - -insn.o: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.o -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - -insn.obj: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.obj -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - -intnum.o: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.o -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - -intnum.obj: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.obj -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - -inttree.o: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.o -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - -inttree.obj: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.obj -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - -linemap.o: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.o -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - -linemap.obj: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.obj -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - -md5.o: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.o -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - -md5.obj: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.obj -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - -mergesort.o: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.o -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - -mergesort.obj: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.obj -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - -phash.o: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.o -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - -phash.obj: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.obj -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - -section.o: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.o -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - -section.obj: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.obj -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - -strcasecmp.o: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.o -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - -strcasecmp.obj: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.obj -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - -strsep.o: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.o -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - -strsep.obj: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.obj -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - -symrec.o: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.o -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - -symrec.obj: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.obj -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - -valparam.o: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.o -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - -valparam.obj: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.obj -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - -value.o: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.o -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - -value.obj: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.obj -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - -xmalloc.o: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.o -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - -xmalloc.obj: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.obj -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - -xstrdup.o: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.o -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - -xstrdup.obj: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.obj -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - -bitvect_test.o: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.o -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - -bitvect_test.obj: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.obj -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - -combpath_test.o: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.o -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - -combpath_test.obj: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.obj -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - -floatnum_test.o: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.o -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - -floatnum_test.obj: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.obj -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - -leb128_test.o: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.o -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - -leb128_test.obj: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.obj -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - -splitpath_test.o: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.o -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - -splitpath_test.obj: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.obj -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - -uncstring_test.o: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.o -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - -uncstring_test.obj: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.obj -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - -yasm.o: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.o -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - -yasm.obj: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.obj -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - -yasm-options.o: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.o -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - -yasm-options.obj: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.obj -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - -tasm.o: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.o -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - -tasm.obj: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.obj -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - -tasm-options.o: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.o -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - -tasm-options.obj: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.obj -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` -install-man1: $(man1_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \ - done -uninstall-man1: - @$(NORMAL_UNINSTALL) - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man1dir)/$$inst"; \ - done -install-man7: $(man7_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \ - done -uninstall-man7: - @$(NORMAL_UNINSTALL) - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man7dir)/$$inst"; \ - done -install-includeHEADERS: $(include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done -install-modincludeHEADERS: $(modinclude_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(modincludedir)" || $(MKDIR_P) "$(DESTDIR)$(modincludedir)" - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(modincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(modincludedir)/$$f'"; \ - $(modincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(modincludedir)/$$f"; \ - done - -uninstall-modincludeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(modincludedir)/$$f'"; \ - rm -f "$(DESTDIR)$(modincludedir)/$$f"; \ - done -install-nodist_includeHEADERS: $(nodist_include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(nodist_includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(nodist_includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-nodist_includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -$(RECURSIVE_CLEAN_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; ws='[ ]'; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - echo "XPASS: $$tst"; \ - ;; \ - *) \ - echo "PASS: $$tst"; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xfail=`expr $$xfail + 1`; \ - echo "XFAIL: $$tst"; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - echo "FAIL: $$tst"; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - echo "SKIP: $$tst"; \ - fi; \ - done; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="All $$all tests passed"; \ - else \ - banner="All $$all tests behaved as expected ($$xfail expected failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all tests failed"; \ - else \ - banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - skipped="($$skip tests were not run)"; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - echo "$$dashes"; \ - echo "$$banner"; \ - test -z "$$skipped" || echo "$$skipped"; \ - test -z "$$report" || echo "$$report"; \ - echo "$$dashes"; \ - test "$$failed" -eq 0; \ - else :; fi - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d $(distdir) || mkdir $(distdir) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r $(distdir) -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 - $(am__remove_distdir) - -dist-lzma: distdir - tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma - $(am__remove_distdir) - -dist-tarZ: distdir - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__remove_distdir) - -dist-shar: distdir - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__remove_distdir) - -dist dist-all: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lzma*) \ - unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir); chmod a+w $(distdir) - mkdir $(distdir)/_build - mkdir $(distdir)/_inst - chmod a-w $(distdir) - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && cd $(distdir)/_build \ - && ../configure --srcdir=.. --prefix="$$dc_install_base" \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck - $(am__remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @cd $(distuninstallcheck_dir) \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) check-recursive -all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(MANS) $(HEADERS) config.h \ - all-local -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" "$(DESTDIR)$(includedir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-local distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-includeHEADERS install-man \ - install-modincludeHEADERS install-nodist_includeHEADERS - -install-dvi: install-dvi-recursive - -install-exec-am: install-binPROGRAMS install-libLIBRARIES - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) install-exec-hook - -install-html: install-html-recursive - -install-info: install-info-recursive - -install-man: install-man1 install-man7 - -install-pdf: install-pdf-recursive - -install-ps: install-ps-recursive - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-includeHEADERS \ - uninstall-libLIBRARIES uninstall-man \ - uninstall-modincludeHEADERS uninstall-nodist_includeHEADERS - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) uninstall-hook - -uninstall-man: uninstall-man1 uninstall-man7 - -.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ - install-exec-am install-strip uninstall-am - -.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ - all all-am all-local am--refresh check check-TESTS check-am \ - clean clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS ctags ctags-recursive \ - dist dist-all dist-bzip2 dist-gzip dist-lzma dist-shar \ - dist-tarZ dist-zip distcheck distclean distclean-compile \ - distclean-generic distclean-hdr distclean-local distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-exec-hook install-html \ - install-html-am install-includeHEADERS install-info \ - install-info-am install-libLIBRARIES install-man install-man1 \ - install-man7 install-modincludeHEADERS \ - install-nodist_includeHEADERS install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-binPROGRAMS uninstall-hook \ - uninstall-includeHEADERS uninstall-libLIBRARIES uninstall-man \ - uninstall-man1 uninstall-man7 uninstall-modincludeHEADERS \ - uninstall-nodist_includeHEADERS - - -re2c-main.$(OBJEXT): tools/re2c/main.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/main.c || echo '$(srcdir)/'`tools/re2c/main.c - -re2c-code.$(OBJEXT): tools/re2c/code.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/code.c || echo '$(srcdir)/'`tools/re2c/code.c - -re2c-dfa.$(OBJEXT): tools/re2c/dfa.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/dfa.c || echo '$(srcdir)/'`tools/re2c/dfa.c - -re2c-parser.$(OBJEXT): tools/re2c/parser.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/parser.c || echo '$(srcdir)/'`tools/re2c/parser.c - -re2c-actions.$(OBJEXT): tools/re2c/actions.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/actions.c || echo '$(srcdir)/'`tools/re2c/actions.c - -re2c-scanner.$(OBJEXT): tools/re2c/scanner.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/scanner.c || echo '$(srcdir)/'`tools/re2c/scanner.c - -re2c-mbo_getopt.$(OBJEXT): tools/re2c/mbo_getopt.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/mbo_getopt.c || echo '$(srcdir)/'`tools/re2c/mbo_getopt.c - -re2c-substr.$(OBJEXT): tools/re2c/substr.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/substr.c || echo '$(srcdir)/'`tools/re2c/substr.c - -re2c-translate.$(OBJEXT): tools/re2c/translate.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/translate.c || echo '$(srcdir)/'`tools/re2c/translate.c - -genmacro.$(OBJEXT): tools/genmacro/genmacro.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genmacro/genmacro.c || echo '$(srcdir)/'`tools/genmacro/genmacro.c -.gperf.c: genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $< $@ - -genperf.$(OBJEXT): tools/genperf/genperf.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/genperf.c || echo '$(srcdir)/'`tools/genperf/genperf.c - -gp-perfect.$(OBJEXT): tools/genperf/perfect.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/perfect.c || echo '$(srcdir)/'`tools/genperf/perfect.c - -gp-phash.$(OBJEXT): libyasm/phash.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/phash.c || echo '$(srcdir)/'`libyasm/phash.c - -gp-xmalloc.$(OBJEXT): libyasm/xmalloc.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xmalloc.c || echo '$(srcdir)/'`libyasm/xmalloc.c - -gp-xstrdup.$(OBJEXT): libyasm/xstrdup.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xstrdup.c || echo '$(srcdir)/'`libyasm/xstrdup.c - -# Use Pyxelator to generate Pyrex function headers. -#_yasm.pxi: ${HEADERS} -# @rm -rf .tmp -# @mkdir .tmp -# $(PYTHON) $(srcdir)/tools/python-yasm/pyxelator/wrap_yasm.py \ -# "YASM_DIR=${srcdir}" "CPP=${CPP}" "CPPFLAGS=${CPPFLAGS}" -# @rm -rf .tmp - -# Need to build a local copy of the main Pyrex input file to include _yasm.pxi -# from the build directory. Also need to fixup the other .pxi include paths. -#yasm.pyx: $(srcdir)/tools/python-yasm/yasm.pyx -# sed -e 's,^include "\([^_]\),include "${srcdir}/tools/python-yasm/\1,' \ -# $(srcdir)/tools/python-yasm/yasm.pyx > $@ - -# Actually run Pyrex -#yasm_python.c: yasm.pyx _yasm.pxi $(PYBINDING_DEPS) -# $(PYTHON) -c "from Pyrex.Compiler.Main import main; main(command_line=1)" \ -# -o $@ yasm.pyx - -# Now the Python build magic... -#python-setup.txt: Makefile -# echo "includes=${DEFS} ${DEFAULT_INCLUDES} ${INCLUDES} ${AM_CPPFLAGS} ${CPPFLAGS}" > python-setup.txt -# echo "sources=${libyasm_a_SOURCES}" >> python-setup.txt -# echo "srcdir=${srcdir}" >> python-setup.txt -# echo "gcc=${GCC}" >> python-setup.txt - -#.python-build: python-setup.txt yasm_python.c ${libyasm_a_SOURCES} -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py build -# touch .python-build -#python-build: .python-build - -#python-install: .python-build -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py install "--install-lib=$(DESTDIR)$(pythondir)" - -#python-uninstall: -# rm -f `$(PYTHON) -c "import sys;sys.path.insert(0, '${DESTDIR}${pythondir}'); import yasm; print yasm.__file__"` - -python-build: -python-install: -python-uninstall: - -modules/arch/x86/x86id.c: x86insn_nasm.c x86insn_gas.c x86insns.c - -x86insn_nasm.gperf x86insn_gas.gperf x86insns.c: $(srcdir)/modules/arch/x86/gen_x86_insn.py - $(PYTHON) $(srcdir)/modules/arch/x86/gen_x86_insn.py -#x86insn_nasm.gperf: $(srcdir)/x86insn_nasm.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_nasm.gperf $@ -#x86insn_gas.gperf: $(srcdir)/x86insn_gas.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_gas.gperf $@ - -# Use suffix rules for gperf files -x86insn_nasm.c: x86insn_nasm.gperf genperf$(EXEEXT) -x86insn_gas.c: x86insn_gas.gperf genperf$(EXEEXT) -x86cpu.c: $(srcdir)/modules/arch/x86/x86cpu.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86cpu.gperf $@ -x86regtmod.c: $(srcdir)/modules/arch/x86/x86regtmod.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86regtmod.gperf $@ - -lc3bid.c: $(srcdir)/modules/arch/lc3b/lc3bid.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -s -o $@ $(srcdir)/modules/arch/lc3b/lc3bid.re - -#yasm_arch.7: modules/arch/yasm_arch.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/arch/yasm_arch.xml - -#EXTRA_DIST += modules/listfmts/nasm/tests/Makefile.inc - -#include modules/listfmts/nasm/tests/Makefile.inc - -gas-token.c: $(srcdir)/modules/parsers/gas/gas-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/gas/gas-token.re - -nasm-token.c: $(srcdir)/modules/parsers/nasm/nasm-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/nasm/nasm-token.re - -$(top_srcdir)/modules/parsers/nasm/nasm-parser.c: nasm-macros.c - -nasm-macros.c: $(srcdir)/modules/parsers/nasm/nasm-std.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_standard_mac $(srcdir)/modules/parsers/nasm/nasm-std.mac - -#yasm_parsers.7: modules/parsers/yasm_parsers.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/parsers/yasm_parsers.xml - -$(top_srcdir)/modules/preprocs/nasm/nasm-preproc.c: nasm-version.c - -nasm-version.c: version.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_version_mac version.mac - -version.mac: genversion$(EXEEXT) - $(top_builddir)/genversion$(EXEEXT) $@ - -genversion.$(OBJEXT): modules/preprocs/nasm/genversion.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f modules/preprocs/nasm/genversion.c || echo '$(srcdir)/'`modules/preprocs/nasm/genversion.c - -#EXTRA_DIST += modules/dbgfmts/codeview/tests/Makefile.inc -#include modules/dbgfmts/codeview/tests/Makefile.inc - -#yasm_dbgfmts.7: modules/dbgfmts/yasm_dbgfmts.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/dbgfmts/yasm_dbgfmts.xml - -$(top_srcdir)/modules/objfmts/coff/coff-objfmt.c: win64-nasm.c win64-gas.c - -win64-nasm.c: $(srcdir)/modules/objfmts/coff/win64-nasm.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_nasm_stdmac $(srcdir)/modules/objfmts/coff/win64-nasm.mac - -win64-gas.c: $(srcdir)/modules/objfmts/coff/win64-gas.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_gas_stdmac $(srcdir)/modules/objfmts/coff/win64-gas.mac - -#yasm_objfmts.7: modules/objfmts/yasm_objfmts.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/objfmts/yasm_objfmts.xml - -module.c: $(top_srcdir)/libyasm/module.in genmodule$(EXEEXT) Makefile - $(top_builddir)/genmodule$(EXEEXT) $(top_srcdir)/libyasm/module.in Makefile - -genmodule.$(OBJEXT): libyasm/genmodule.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/genmodule.c || echo '$(srcdir)/'`libyasm/genmodule.c - -#yasm.1: frontends/yasm/yasm.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/frontends/yasm/yasm.xml - -$(srcdir)/frontends/yasm/yasm.c: license.c - -license.c: $(srcdir)/COPYING genstring$(EXEEXT) - $(top_builddir)/genstring$(EXEEXT) license_msg $@ $(srcdir)/COPYING - -distclean-local: - -rm -rf results - -rm -rf build - -all-local: python-build -install-exec-hook: python-install -uninstall-hook: python-uninstall - -genstring.$(OBJEXT): genstring.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f genstring.c || echo '$(srcdir)/'`genstring.c -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/packager/third_party/yasm/source/config/ios/config.h b/packager/third_party/yasm/source/config/ios/config.h deleted file mode 100644 index f3b43d235a..0000000000 --- a/packager/third_party/yasm/source/config/ios/config.h +++ /dev/null @@ -1,173 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Command name to run C preprocessor */ -#define CPP_PROG "gcc -E" - -/* */ -/* #undef ENABLE_NLS */ - -/* Define to 1 if you have the `abort' function. */ -#define HAVE_ABORT 1 - -/* */ -/* #undef HAVE_CATGETS */ - -/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the - CoreFoundation framework. */ -#define HAVE_CFLOCALECOPYCURRENT 1 - -/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in - the CoreFoundation framework. */ -#define HAVE_CFPREFERENCESCOPYAPPVALUE 1 - -/* Define if the GNU dcgettext() function is already present or preinstalled. - */ -/* #undef HAVE_DCGETTEXT */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DIRECT_H */ - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* */ -/* #undef HAVE_GETTEXT */ - -/* Define to 1 if you have the GNU C Library */ -/* #undef HAVE_GNU_C_LIBRARY */ - -/* Define if you have the iconv() function and it works. */ -#define HAVE_ICONV 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* */ -/* #undef HAVE_LC_MESSAGES */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mergesort' function. */ -#define HAVE_MERGESORT 1 - -/* Define to 1 if you have the `popen' function. */ -#define HAVE_POPEN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* */ -/* #undef HAVE_STPCPY */ - -/* Define to 1 if you have the `strcasecmp' function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the `strcmpi' function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the `stricmp' function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the `strsep' function. */ -#define HAVE_STRSEP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the `toascii' function. */ -#define HAVE_TOASCII 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to 1 if you have the `_stricmp' function. */ -/* #undef HAVE__STRICMP */ - -/* Name of package */ -#define PACKAGE "yasm" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "bug-yasm@tortall.net" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "yasm" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "yasm 1.2.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "yasm" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2.0" - -/* Define to 1 if the C compiler supports function prototypes. */ -#define PROTOTYPES 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void*', as computed by sizeof. */ -/* #undef SIZEOF_VOIDP */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.2.0" - -/* Define if using the dmalloc debugging malloc package */ -/* #undef WITH_DMALLOC */ - -/* Define like PROTOTYPES; this can be used by system headers. */ -#define __PROTOTYPES 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/packager/third_party/yasm/source/config/ios/libyasm-stdint.h b/packager/third_party/yasm/source/config/ios/libyasm-stdint.h deleted file mode 100644 index 851e85b788..0000000000 --- a/packager/third_party/yasm/source/config/ios/libyasm-stdint.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _YASM_LIBYASM_STDINT_H -#define _YASM_LIBYASM_STDINT_H 1 -#ifndef _GENERATED_STDINT_H -#define _GENERATED_STDINT_H "yasm 0.8.0" -/* generated using gcc -std=gnu99 */ -#define _STDINT_HAVE_STDINT_H 1 -#include -#endif -#endif diff --git a/packager/third_party/yasm/source/config/linux/Makefile b/packager/third_party/yasm/source/config/linux/Makefile deleted file mode 100644 index 6fccce46a9..0000000000 --- a/packager/third_party/yasm/source/config/linux/Makefile +++ /dev/null @@ -1,3822 +0,0 @@ -# Makefile.in generated by automake 1.10.1 from Makefile.am. -# Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - -# $Id: Makefile.am 2184 2009-03-24 05:04:15Z peter $ - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# $Id: Makefile.inc 1718 2006-12-24 00:13:19Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1939 2007-09-10 07:15:50Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1948 2007-09-13 02:53:30Z peter $ - -# $Id: Makefile.inc 1951 2007-09-14 05:19:10Z peter $ - -# $Id: Makefile.inc 1598 2006-08-10 04:02:59Z peter $ - -# $Id: Makefile.inc 1914 2007-08-20 05:13:35Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2170 2009-01-14 08:28:13Z peter $ - -# $Id: Makefile.inc 2192 2009-03-29 23:25:05Z peter $ - -# $Id: Makefile.inc 1776 2007-02-19 02:36:10Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 1928 2007-09-07 22:03:34Z peter $ - -# $Id: Makefile.inc 1152 2004-10-02 06:18:30Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1783 2007-02-22 03:40:31Z peter $ - -# $Id: Makefile.inc 2169 2009-01-02 20:46:57Z peter $ - -# $Id$ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2190 2009-03-25 03:40:59Z peter $ - -# $Id: Makefile.inc 1137 2004-09-04 01:24:57Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 1966 2007-09-20 03:54:36Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2185 2009-03-24 06:33:32Z peter $ - -# $Id: Makefile.inc 2172 2009-01-27 06:38:14Z peter $ - -# $Id: Makefile.inc 2176 2009-03-04 07:39:02Z peter $ - -# Makefile for cpp module. -# Copied from raw preprocessor module. - -# $Id: Makefile.inc 1662 2006-10-21 18:52:29Z peter $ - -# $Id: Makefile.inc 1428 2006-03-27 02:15:19Z peter $ - -# $Id: Makefile.inc 1378 2006-02-12 01:27:39Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id$ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 1252 2005-09-28 05:50:51Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 2126 2008-10-03 08:13:00Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 1168 2004-10-31 01:07:52Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1777 2007-02-19 08:21:17Z peter $ - -# $Id: Makefile.inc 1782 2007-02-21 06:45:39Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1854 2007-05-31 06:16:49Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 1331 2006-01-15 22:48:55Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2120 2008-09-04 04:45:30Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2123 2008-09-30 03:56:37Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - - - - -pkgdatadir = $(datadir)/yasm -pkglibdir = $(libdir)/yasm -pkgincludedir = $(includedir)/yasm -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = x86_64-unknown-linux-gnu -host_triplet = x86_64-unknown-linux-gnu -bin_PROGRAMS = yasm$(EXEEXT) ytasm$(EXEEXT) -TESTS = $(am__append_3) modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/xdf/tests/xdf_test.sh bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) libyasm/tests/libyasm_test.sh -noinst_PROGRAMS = genstring$(EXEEXT) re2c$(EXEEXT) genmacro$(EXEEXT) \ - genperf$(EXEEXT) genversion$(EXEEXT) genmodule$(EXEEXT) -check_PROGRAMS = test_hd$(EXEEXT) bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) -DIST_COMMON = README $(am__configure_deps) $(dist_man_MANS) \ - $(include_HEADERS) $(modinclude_HEADERS) $(noinst_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/frontends/Makefile.inc \ - $(srcdir)/frontends/tasm/Makefile.inc \ - $(srcdir)/frontends/yasm/Makefile.inc \ - $(srcdir)/libyasm/Makefile.inc \ - $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/m4/Makefile.inc \ - $(srcdir)/modules/Makefile.inc \ - $(srcdir)/modules/arch/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/Makefile.inc \ - $(srcdir)/modules/dbgfmts/codeview/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/null/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc \ - $(srcdir)/modules/listfmts/Makefile.inc \ - $(srcdir)/modules/listfmts/nasm/Makefile.inc \ - $(srcdir)/modules/objfmts/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/dbg/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc \ - $(srcdir)/modules/parsers/Makefile.inc \ - $(srcdir)/modules/parsers/gas/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc \ - $(srcdir)/modules/preprocs/Makefile.inc \ - $(srcdir)/modules/preprocs/cpp/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/tests/Makefile.inc \ - $(srcdir)/tools/Makefile.inc \ - $(srcdir)/tools/genmacro/Makefile.inc \ - $(srcdir)/tools/genperf/Makefile.inc \ - $(srcdir)/tools/python-yasm/Makefile.inc \ - $(srcdir)/tools/python-yasm/tests/Makefile.inc \ - $(srcdir)/tools/re2c/Makefile.inc $(top_srcdir)/configure \ - ABOUT-NLS AUTHORS COPYING ChangeLog INSTALL NEWS \ - config/config.guess config/config.rpath config/config.sub \ - config/depcomp config/install-sh config/ltmain.sh \ - config/missing -#am__append_1 = _yasm.pxi yasm.pyx \ -# yasm_python.c python-setup.txt \ -# .python-build -#am__append_2 = PYTHON=${PYTHON} -#am__append_3 = tools/python-yasm/tests/python_test.sh -am__append_4 = $(dist_man_MANS) -subdir = . -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_create_stdint_h.m4 \ - $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \ - $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ - $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/nls.m4 \ - $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ - $(top_srcdir)/m4/pyrex.m4 $(top_srcdir)/m4/pythonhead.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \ - "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" \ - "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" \ - "$(DESTDIR)$(includedir)" -libLIBRARIES_INSTALL = $(INSTALL_DATA) -LIBRARIES = $(lib_LIBRARIES) -AR = ar -ARFLAGS = cru -libyasm_a_AR = $(AR) $(ARFLAGS) -libyasm_a_LIBADD = -am_libyasm_a_OBJECTS = x86arch.$(OBJEXT) x86bc.$(OBJEXT) \ - x86expr.$(OBJEXT) x86id.$(OBJEXT) lc3barch.$(OBJEXT) \ - lc3bbc.$(OBJEXT) nasm-listfmt.$(OBJEXT) gas-parser.$(OBJEXT) \ - gas-parse.$(OBJEXT) nasm-parser.$(OBJEXT) nasm-parse.$(OBJEXT) \ - nasm-preproc.$(OBJEXT) nasm-pp.$(OBJEXT) nasmlib.$(OBJEXT) \ - nasm-eval.$(OBJEXT) raw-preproc.$(OBJEXT) \ - cpp-preproc.$(OBJEXT) cv-dbgfmt.$(OBJEXT) cv-symline.$(OBJEXT) \ - cv-type.$(OBJEXT) dwarf2-dbgfmt.$(OBJEXT) \ - dwarf2-line.$(OBJEXT) dwarf2-aranges.$(OBJEXT) \ - dwarf2-info.$(OBJEXT) null-dbgfmt.$(OBJEXT) \ - stabs-dbgfmt.$(OBJEXT) dbg-objfmt.$(OBJEXT) \ - bin-objfmt.$(OBJEXT) elf.$(OBJEXT) elf-objfmt.$(OBJEXT) \ - elf-x86-x86.$(OBJEXT) elf-x86-amd64.$(OBJEXT) \ - coff-objfmt.$(OBJEXT) win64-except.$(OBJEXT) \ - macho-objfmt.$(OBJEXT) rdf-objfmt.$(OBJEXT) \ - xdf-objfmt.$(OBJEXT) assocdat.$(OBJEXT) bitvect.$(OBJEXT) \ - bc-align.$(OBJEXT) bc-data.$(OBJEXT) bc-incbin.$(OBJEXT) \ - bc-org.$(OBJEXT) bc-reserve.$(OBJEXT) bytecode.$(OBJEXT) \ - errwarn.$(OBJEXT) expr.$(OBJEXT) file.$(OBJEXT) \ - floatnum.$(OBJEXT) hamt.$(OBJEXT) insn.$(OBJEXT) \ - intnum.$(OBJEXT) inttree.$(OBJEXT) linemap.$(OBJEXT) \ - md5.$(OBJEXT) mergesort.$(OBJEXT) phash.$(OBJEXT) \ - section.$(OBJEXT) strcasecmp.$(OBJEXT) strsep.$(OBJEXT) \ - symrec.$(OBJEXT) valparam.$(OBJEXT) value.$(OBJEXT) \ - xmalloc.$(OBJEXT) xstrdup.$(OBJEXT) -nodist_libyasm_a_OBJECTS = x86cpu.$(OBJEXT) x86regtmod.$(OBJEXT) \ - lc3bid.$(OBJEXT) gas-token.$(OBJEXT) nasm-token.$(OBJEXT) \ - module.$(OBJEXT) -libyasm_a_OBJECTS = $(am_libyasm_a_OBJECTS) \ - $(nodist_libyasm_a_OBJECTS) -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) -am_bitvect_test_OBJECTS = bitvect_test.$(OBJEXT) -bitvect_test_OBJECTS = $(am_bitvect_test_OBJECTS) -am__DEPENDENCIES_1 = -bitvect_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_combpath_test_OBJECTS = combpath_test.$(OBJEXT) -combpath_test_OBJECTS = $(am_combpath_test_OBJECTS) -combpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_floatnum_test_OBJECTS = floatnum_test.$(OBJEXT) -floatnum_test_OBJECTS = $(am_floatnum_test_OBJECTS) -floatnum_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_genmacro_OBJECTS = -genmacro_OBJECTS = $(am_genmacro_OBJECTS) -genmacro_DEPENDENCIES = genmacro.$(OBJEXT) -am_genmodule_OBJECTS = -genmodule_OBJECTS = $(am_genmodule_OBJECTS) -genmodule_DEPENDENCIES = genmodule.$(OBJEXT) -am_genperf_OBJECTS = -genperf_OBJECTS = $(am_genperf_OBJECTS) -genperf_DEPENDENCIES = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -am_genstring_OBJECTS = -genstring_OBJECTS = $(am_genstring_OBJECTS) -genstring_DEPENDENCIES = genstring.$(OBJEXT) -am_genversion_OBJECTS = -genversion_OBJECTS = $(am_genversion_OBJECTS) -genversion_DEPENDENCIES = genversion.$(OBJEXT) -am_leb128_test_OBJECTS = leb128_test.$(OBJEXT) -leb128_test_OBJECTS = $(am_leb128_test_OBJECTS) -leb128_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_re2c_OBJECTS = -re2c_OBJECTS = $(am_re2c_OBJECTS) -re2c_DEPENDENCIES = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -am_splitpath_test_OBJECTS = splitpath_test.$(OBJEXT) -splitpath_test_OBJECTS = $(am_splitpath_test_OBJECTS) -splitpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_test_hd_OBJECTS = test_hd.$(OBJEXT) -test_hd_OBJECTS = $(am_test_hd_OBJECTS) -test_hd_LDADD = $(LDADD) -am_uncstring_test_OBJECTS = uncstring_test.$(OBJEXT) -uncstring_test_OBJECTS = $(am_uncstring_test_OBJECTS) -uncstring_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_yasm_OBJECTS = yasm.$(OBJEXT) yasm-options.$(OBJEXT) -yasm_OBJECTS = $(am_yasm_OBJECTS) -yasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_ytasm_OBJECTS = tasm.$(OBJEXT) tasm-options.$(OBJEXT) -ytasm_OBJECTS = $(am_ytasm_OBJECTS) -ytasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -depcomp = $(SHELL) $(top_srcdir)/config/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -SOURCES = $(libyasm_a_SOURCES) $(nodist_libyasm_a_SOURCES) \ - $(bitvect_test_SOURCES) $(combpath_test_SOURCES) \ - $(floatnum_test_SOURCES) $(genmacro_SOURCES) \ - $(genmodule_SOURCES) $(genperf_SOURCES) $(genstring_SOURCES) \ - $(genversion_SOURCES) $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -DIST_SOURCES = $(libyasm_a_SOURCES) $(bitvect_test_SOURCES) \ - $(combpath_test_SOURCES) $(floatnum_test_SOURCES) \ - $(genmacro_SOURCES) $(genmodule_SOURCES) $(genperf_SOURCES) \ - $(genstring_SOURCES) $(genversion_SOURCES) \ - $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-dvi-recursive install-exec-recursive \ - install-html-recursive install-info-recursive \ - install-pdf-recursive install-ps-recursive install-recursive \ - installcheck-recursive installdirs-recursive pdf-recursive \ - ps-recursive uninstall-recursive -man1dir = $(mandir)/man1 -man7dir = $(mandir)/man7 -NROFF = nroff -MANS = $(dist_man_MANS) -includeHEADERS_INSTALL = $(INSTALL_HEADER) -modincludeHEADERS_INSTALL = $(INSTALL_HEADER) -nodist_includeHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(include_HEADERS) $(modinclude_HEADERS) \ - $(nodist_include_HEADERS) $(noinst_HEADERS) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - { test ! -d $(distdir) \ - || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr $(distdir); }; } -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -distuninstallcheck_listfiles = find . -type f -print -distcleancheck_listfiles = find . -type f -print -ACLOCAL = ${SHELL} /tmp/yasm/config/missing --run aclocal-1.10 -AMTAR = ${SHELL} /tmp/yasm/config/missing --run tar -ARCH = x86 -AUTOCONF = ${SHELL} /tmp/yasm/config/missing --run autoconf -AUTOHEADER = ${SHELL} /tmp/yasm/config/missing --run autoheader -AUTOMAKE = ${SHELL} /tmp/yasm/config/missing --run automake-1.10 -AWK = gawk -CC = gcc -std=gnu99 -CCDEPMODE = depmode=gcc3 -CCLD_FOR_BUILD = gcc -std=gnu99 -CC_FOR_BUILD = gcc -std=gnu99 -CFLAGS = -g -O2 -CPP = gcc -E -CPPFLAGS = -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -ECHO_C = -ECHO_N = -n -ECHO_T = -EGREP = /bin/grep -E -EXEEXT = -GCC = yes -GMSGFMT = /usr/bin/msgfmt -GMSGFMT_015 = /usr/bin/msgfmt -GREP = /bin/grep -HOST_CC = gcc -std=gnu99 -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTLLIBS = -INTL_MACOSX_LIBS = -LDFLAGS = -LIBICONV = -liconv -LIBINTL = -LIBOBJS = -LIBS = -LN_S = ln -s -LTLIBICONV = -liconv -LTLIBINTL = -LTLIBOBJS = -MAINT = -MAKEINFO = ${SHELL} /tmp/yasm/config/missing --run makeinfo -MKDIR_P = /bin/mkdir -p -MORE_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter -MSGFMT = /usr/bin/msgfmt -MSGFMT_015 = /usr/bin/msgfmt -MSGMERGE = /usr/bin/msgmerge -OBJEXT = o -PACKAGE = yasm -PACKAGE_BUGREPORT = bug-yasm@tortall.net -PACKAGE_NAME = yasm -PACKAGE_STRING = yasm HEAD -PACKAGE_TARNAME = yasm -PACKAGE_VERSION = HEAD -PATH_SEPARATOR = : -POSUB = po -PYTHON = /usr/bin/python -PYTHON_EXEC_PREFIX = ${exec_prefix} -PYTHON_INCLUDES = -PYTHON_PLATFORM = linux2 -PYTHON_PREFIX = ${prefix} -PYTHON_VERSION = 2.5 -RANLIB = ranlib -SET_MAKE = -SHELL = /bin/sh -STRIP = -USE_NLS = yes -VERSION = HEAD -XGETTEXT = /usr/bin/xgettext -XGETTEXT_015 = /usr/bin/xgettext -XMLTO = xmlto -abs_builddir = /tmp/yasm -abs_srcdir = /tmp/yasm -abs_top_builddir = /tmp/yasm -abs_top_srcdir = /tmp/yasm -ac_ct_CC = gcc -am__include = include -am__leading_dot = . -am__quote = -am__tar = ${AMTAR} chof - "$$tardir" -am__untar = ${AMTAR} xf - -bindir = ${exec_prefix}/bin -build = x86_64-unknown-linux-gnu -build_alias = -build_cpu = x86_64 -build_os = linux-gnu -build_vendor = unknown -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} -dvidir = ${docdir} -exec_prefix = ${prefix} -host = x86_64-unknown-linux-gnu -host_alias = -host_cpu = x86_64 -host_os = linux-gnu -host_vendor = unknown -htmldir = ${docdir} -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /tmp/yasm/config/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mkdir_p = /bin/mkdir -p -oldincludedir = /usr/include -pdfdir = ${docdir} -pkgpyexecdir = ${pyexecdir}/yasm -pkgpythondir = ${pythondir}/yasm -prefix = /usr/local -program_transform_name = s,x,x, -psdir = ${docdir} -pyexecdir = ${exec_prefix}/lib/python2.5/site-packages -pythondir = ${prefix}/lib/python2.5/site-packages -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = . -sysconfdir = ${prefix}/etc -target_alias = -top_builddir = . -top_srcdir = . -SUBDIRS = po . -AM_YFLAGS = -d -AM_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter - -#!include modules/objfmts/omf/Makefile.inc -dist_man_MANS = yasm_arch.7 yasm_parsers.7 yasm_dbgfmts.7 \ - yasm_objfmts.7 yasm.1 -TESTS_ENVIRONMENT = $(am__append_2) -test_hd_SOURCES = test_hd.c -include_HEADERS = libyasm.h -nodist_include_HEADERS = libyasm-stdint.h -noinst_HEADERS = util.h -BUILT_SOURCES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - x86insn_nasm.c x86insn_gas.c gas-token.c nasm-token.c \ - nasm-macros.c nasm-version.c version.mac win64-nasm.c \ - win64-gas.c license.c -MAINTAINERCLEANFILES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - $(am__append_4) - -# Until this gets fixed in automake -DISTCLEANFILES = libyasm/stamp-h libyasm/stamp-h[0-9]* - -# Suffix rule for genperf -SUFFIXES = .gperf - -# configure.lineno doesn't clean up after itself? -CLEANFILES = configure.lineno $(am__append_1) x86insn_nasm.c \ - x86insn_gas.c x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c nasm-macros.c nasm-version.c version.mac \ - win64-nasm.c win64-gas.c module.c license.c - -# automake doesn't distribute mkinstalldirs? -#!EXTRA_DIST += modules/objfmts/omf/Makefile.inc -EXTRA_DIST = config/config.rpath config/mkinstalldirs \ - tools/Makefile.inc libyasm/Makefile.inc modules/Makefile.inc \ - frontends/Makefile.inc tools/re2c/Makefile.inc \ - tools/genmacro/Makefile.inc tools/genperf/Makefile.inc \ - tools/python-yasm/Makefile.inc tools/re2c/main.c \ - tools/re2c/basics.h tools/re2c/globals.h tools/re2c/ins.h \ - tools/re2c/re.h tools/re2c/token.h tools/re2c/code.c \ - tools/re2c/dfa.h tools/re2c/dfa.c tools/re2c/parse.h \ - tools/re2c/parser.h tools/re2c/parser.c tools/re2c/actions.c \ - tools/re2c/scanner.h tools/re2c/scanner.c \ - tools/re2c/mbo_getopt.h tools/re2c/mbo_getopt.c \ - tools/re2c/substr.h tools/re2c/substr.c tools/re2c/translate.c \ - tools/re2c/CHANGELOG tools/re2c/NO_WARRANTY tools/re2c/README \ - tools/re2c/scanner.re tools/re2c/re2c.1 \ - tools/re2c/bootstrap/scanner.c tools/re2c/doc/loplas.ps.gz \ - tools/re2c/doc/sample.bib tools/re2c/examples/basemmap.c \ - tools/re2c/examples/c.re tools/re2c/examples/cmmap.re \ - tools/re2c/examples/cnokw.re tools/re2c/examples/cunroll.re \ - tools/re2c/examples/modula.re tools/re2c/examples/repeater.re \ - tools/re2c/examples/sample.re tools/re2c/examples/simple.re \ - tools/re2c/examples/rexx/README \ - tools/re2c/examples/rexx/rexx.l \ - tools/re2c/examples/rexx/scanio.c tools/genmacro/genmacro.c \ - tools/genperf/genperf.c tools/genperf/perfect.c \ - tools/genperf/perfect.h tools/genperf/standard.h \ - tools/python-yasm/pyxelator/cparse.py \ - tools/python-yasm/pyxelator/genpyx.py \ - tools/python-yasm/pyxelator/ir.py \ - tools/python-yasm/pyxelator/lexer.py \ - tools/python-yasm/pyxelator/node.py \ - tools/python-yasm/pyxelator/parse_core.py \ - tools/python-yasm/pyxelator/work_unit.py \ - tools/python-yasm/pyxelator/wrap_yasm.py \ - tools/python-yasm/setup.py tools/python-yasm/yasm.pyx \ - $(PYBINDING_DEPS) tools/python-yasm/tests/Makefile.inc \ - tools/python-yasm/tests/python_test.sh \ - tools/python-yasm/tests/__init__.py \ - tools/python-yasm/tests/test_bytecode.py \ - tools/python-yasm/tests/test_expr.py \ - tools/python-yasm/tests/test_intnum.py \ - tools/python-yasm/tests/test_symrec.py \ - modules/arch/Makefile.inc modules/listfmts/Makefile.inc \ - modules/parsers/Makefile.inc modules/preprocs/Makefile.inc \ - modules/objfmts/Makefile.inc modules/arch/x86/Makefile.inc \ - modules/arch/lc3b/Makefile.inc \ - modules/arch/x86/gen_x86_insn.py x86insns.c x86insn_nasm.gperf \ - x86insn_gas.gperf modules/arch/x86/x86cpu.gperf \ - modules/arch/x86/x86regtmod.gperf \ - modules/arch/x86/tests/Makefile.inc \ - modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gen-fma-test.py \ - modules/arch/x86/tests/addbyte.asm \ - modules/arch/x86/tests/addbyte.errwarn \ - modules/arch/x86/tests/addbyte.hex \ - modules/arch/x86/tests/addrop.asm \ - modules/arch/x86/tests/addrop.errwarn \ - modules/arch/x86/tests/addrop.hex \ - modules/arch/x86/tests/addrop-err.asm \ - modules/arch/x86/tests/addrop-err.errwarn \ - modules/arch/x86/tests/aes.asm modules/arch/x86/tests/aes.hex \ - modules/arch/x86/tests/amd200707.asm \ - modules/arch/x86/tests/amd200707.hex \ - modules/arch/x86/tests/arithsmall.asm \ - modules/arch/x86/tests/arithsmall.errwarn \ - modules/arch/x86/tests/arithsmall.hex \ - modules/arch/x86/tests/avx.asm modules/arch/x86/tests/avx.hex \ - modules/arch/x86/tests/avxcc.asm \ - modules/arch/x86/tests/avxcc.hex \ - modules/arch/x86/tests/bittest.asm \ - modules/arch/x86/tests/bittest.hex \ - modules/arch/x86/tests/bswap64.asm \ - modules/arch/x86/tests/bswap64.hex \ - modules/arch/x86/tests/clmul.asm \ - modules/arch/x86/tests/clmul.hex \ - modules/arch/x86/tests/cmpxchg.asm \ - modules/arch/x86/tests/cmpxchg.hex \ - modules/arch/x86/tests/cpubasic-err.asm \ - modules/arch/x86/tests/cpubasic-err.errwarn \ - modules/arch/x86/tests/cyrix.asm \ - modules/arch/x86/tests/cyrix.hex \ - modules/arch/x86/tests/div-err.asm \ - modules/arch/x86/tests/div-err.errwarn \ - modules/arch/x86/tests/ea-nonzero.asm \ - modules/arch/x86/tests/ea-nonzero.hex \ - modules/arch/x86/tests/ea-over.asm \ - modules/arch/x86/tests/ea-over.errwarn \ - modules/arch/x86/tests/ea-over.hex \ - modules/arch/x86/tests/ea-warn.asm \ - modules/arch/x86/tests/ea-warn.errwarn \ - modules/arch/x86/tests/ea-warn.hex \ - modules/arch/x86/tests/ebpindex.asm \ - modules/arch/x86/tests/ebpindex.hex \ - modules/arch/x86/tests/effaddr.asm \ - modules/arch/x86/tests/effaddr.hex \ - modules/arch/x86/tests/enter.asm \ - modules/arch/x86/tests/enter.errwarn \ - modules/arch/x86/tests/enter.hex \ - modules/arch/x86/tests/far64.asm \ - modules/arch/x86/tests/far64.hex \ - modules/arch/x86/tests/farbasic.asm \ - modules/arch/x86/tests/farbasic.hex \ - modules/arch/x86/tests/farithr.asm \ - modules/arch/x86/tests/farithr.hex \ - modules/arch/x86/tests/fcmov.asm \ - modules/arch/x86/tests/fcmov.hex \ - modules/arch/x86/tests/fma.asm modules/arch/x86/tests/fma.hex \ - modules/arch/x86/tests/fwdequ64.asm \ - modules/arch/x86/tests/fwdequ64.hex \ - modules/arch/x86/tests/genopcode.asm \ - modules/arch/x86/tests/genopcode.hex \ - modules/arch/x86/tests/imm64.asm \ - modules/arch/x86/tests/imm64.errwarn \ - modules/arch/x86/tests/imm64.hex \ - modules/arch/x86/tests/iret.asm \ - modules/arch/x86/tests/iret.hex \ - modules/arch/x86/tests/jmp64-1.asm \ - modules/arch/x86/tests/jmp64-1.hex \ - modules/arch/x86/tests/jmp64-2.asm \ - modules/arch/x86/tests/jmp64-2.hex \ - modules/arch/x86/tests/jmp64-3.asm \ - modules/arch/x86/tests/jmp64-3.hex \ - modules/arch/x86/tests/jmp64-4.asm \ - modules/arch/x86/tests/jmp64-4.hex \ - modules/arch/x86/tests/jmp64-5.asm \ - modules/arch/x86/tests/jmp64-5.hex \ - modules/arch/x86/tests/jmp64-6.asm \ - modules/arch/x86/tests/jmp64-6.hex \ - modules/arch/x86/tests/jmpfar.asm \ - modules/arch/x86/tests/jmpfar.hex \ - modules/arch/x86/tests/lds.asm modules/arch/x86/tests/lds.hex \ - modules/arch/x86/tests/loopadsz.asm \ - modules/arch/x86/tests/loopadsz.hex \ - modules/arch/x86/tests/lsahf.asm \ - modules/arch/x86/tests/lsahf.hex \ - modules/arch/x86/tests/mem64-err.asm \ - modules/arch/x86/tests/mem64-err.errwarn \ - modules/arch/x86/tests/mem64.asm \ - modules/arch/x86/tests/mem64.errwarn \ - modules/arch/x86/tests/mem64.hex \ - modules/arch/x86/tests/mem64hi32.asm \ - modules/arch/x86/tests/mem64hi32.hex \ - modules/arch/x86/tests/mem64rip.asm \ - modules/arch/x86/tests/mem64rip.hex \ - modules/arch/x86/tests/mixcase.asm \ - modules/arch/x86/tests/mixcase.hex \ - modules/arch/x86/tests/movbe.asm \ - modules/arch/x86/tests/movbe.hex \ - modules/arch/x86/tests/movdq32.asm \ - modules/arch/x86/tests/movdq32.hex \ - modules/arch/x86/tests/movdq64.asm \ - modules/arch/x86/tests/movdq64.hex \ - modules/arch/x86/tests/negequ.asm \ - modules/arch/x86/tests/negequ.hex \ - modules/arch/x86/tests/nomem64-err.asm \ - modules/arch/x86/tests/nomem64-err.errwarn \ - modules/arch/x86/tests/nomem64-err2.asm \ - modules/arch/x86/tests/nomem64-err2.errwarn \ - modules/arch/x86/tests/nomem64.asm \ - modules/arch/x86/tests/nomem64.errwarn \ - modules/arch/x86/tests/nomem64.hex \ - modules/arch/x86/tests/o64.asm modules/arch/x86/tests/o64.hex \ - modules/arch/x86/tests/o64loop.asm \ - modules/arch/x86/tests/o64loop.errwarn \ - modules/arch/x86/tests/o64loop.hex \ - modules/arch/x86/tests/opersize.asm \ - modules/arch/x86/tests/opersize.hex \ - modules/arch/x86/tests/opsize-err.asm \ - modules/arch/x86/tests/opsize-err.errwarn \ - modules/arch/x86/tests/overflow.asm \ - modules/arch/x86/tests/overflow.errwarn \ - modules/arch/x86/tests/overflow.hex \ - modules/arch/x86/tests/padlock.asm \ - modules/arch/x86/tests/padlock.hex \ - modules/arch/x86/tests/pshift.asm \ - modules/arch/x86/tests/pshift.hex \ - modules/arch/x86/tests/push64.asm \ - modules/arch/x86/tests/push64.errwarn \ - modules/arch/x86/tests/push64.hex \ - modules/arch/x86/tests/pushf.asm \ - modules/arch/x86/tests/pushf.hex \ - modules/arch/x86/tests/pushf-err.asm \ - modules/arch/x86/tests/pushf-err.errwarn \ - modules/arch/x86/tests/pushnosize.asm \ - modules/arch/x86/tests/pushnosize.errwarn \ - modules/arch/x86/tests/pushnosize.hex \ - modules/arch/x86/tests/rep.asm modules/arch/x86/tests/rep.hex \ - modules/arch/x86/tests/ret.asm modules/arch/x86/tests/ret.hex \ - modules/arch/x86/tests/riprel1.asm \ - modules/arch/x86/tests/riprel1.hex \ - modules/arch/x86/tests/riprel2.asm \ - modules/arch/x86/tests/riprel2.errwarn \ - modules/arch/x86/tests/riprel2.hex \ - modules/arch/x86/tests/ripseg.asm \ - modules/arch/x86/tests/ripseg.errwarn \ - modules/arch/x86/tests/ripseg.hex \ - modules/arch/x86/tests/segmov.asm \ - modules/arch/x86/tests/segmov.hex \ - modules/arch/x86/tests/segoff.asm \ - modules/arch/x86/tests/segoff.hex \ - modules/arch/x86/tests/segoff-err.asm \ - modules/arch/x86/tests/segoff-err.errwarn \ - modules/arch/x86/tests/shift.asm \ - modules/arch/x86/tests/shift.hex \ - modules/arch/x86/tests/simd-1.asm \ - modules/arch/x86/tests/simd-1.hex \ - modules/arch/x86/tests/simd-2.asm \ - modules/arch/x86/tests/simd-2.hex \ - modules/arch/x86/tests/simd64-1.asm \ - modules/arch/x86/tests/simd64-1.hex \ - modules/arch/x86/tests/simd64-2.asm \ - modules/arch/x86/tests/simd64-2.hex \ - modules/arch/x86/tests/sse-prefix.asm \ - modules/arch/x86/tests/sse-prefix.hex \ - modules/arch/x86/tests/sse3.asm \ - modules/arch/x86/tests/sse3.hex \ - modules/arch/x86/tests/sse4.asm \ - modules/arch/x86/tests/sse4.hex \ - modules/arch/x86/tests/sse4-err.asm \ - modules/arch/x86/tests/sse4-err.errwarn \ - modules/arch/x86/tests/sse5-all.asm \ - modules/arch/x86/tests/sse5-all.hex \ - modules/arch/x86/tests/sse5-basic.asm \ - modules/arch/x86/tests/sse5-basic.hex \ - modules/arch/x86/tests/sse5-cc.asm \ - modules/arch/x86/tests/sse5-cc.hex \ - modules/arch/x86/tests/sse5-err.asm \ - modules/arch/x86/tests/sse5-err.errwarn \ - modules/arch/x86/tests/ssewidth.asm \ - modules/arch/x86/tests/ssewidth.hex \ - modules/arch/x86/tests/ssse3.asm \ - modules/arch/x86/tests/ssse3.c \ - modules/arch/x86/tests/ssse3.hex \ - modules/arch/x86/tests/stos.asm \ - modules/arch/x86/tests/stos.hex modules/arch/x86/tests/str.asm \ - modules/arch/x86/tests/str.hex \ - modules/arch/x86/tests/strict.asm \ - modules/arch/x86/tests/strict.errwarn \ - modules/arch/x86/tests/strict.hex \ - modules/arch/x86/tests/strict-err.asm \ - modules/arch/x86/tests/strict-err.errwarn \ - modules/arch/x86/tests/stringseg.asm \ - modules/arch/x86/tests/stringseg.errwarn \ - modules/arch/x86/tests/stringseg.hex \ - modules/arch/x86/tests/svm.asm modules/arch/x86/tests/svm.hex \ - modules/arch/x86/tests/twobytemem.asm \ - modules/arch/x86/tests/twobytemem.errwarn \ - modules/arch/x86/tests/twobytemem.hex \ - modules/arch/x86/tests/vmx.asm modules/arch/x86/tests/vmx.hex \ - modules/arch/x86/tests/vmx-err.asm \ - modules/arch/x86/tests/vmx-err.errwarn \ - modules/arch/x86/tests/x86label.asm \ - modules/arch/x86/tests/x86label.hex \ - modules/arch/x86/tests/xchg64.asm \ - modules/arch/x86/tests/xchg64.hex \ - modules/arch/x86/tests/xmm64.asm \ - modules/arch/x86/tests/xmm64.hex \ - modules/arch/x86/tests/xsave.asm \ - modules/arch/x86/tests/xsave.hex \ - modules/arch/x86/tests/gas32/Makefile.inc \ - modules/arch/x86/tests/gas64/Makefile.inc \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas32/align32.asm \ - modules/arch/x86/tests/gas32/align32.hex \ - modules/arch/x86/tests/gas32/gas-farithr.asm \ - modules/arch/x86/tests/gas32/gas-farithr.hex \ - modules/arch/x86/tests/gas32/gas-fpmem.asm \ - modules/arch/x86/tests/gas32/gas-fpmem.hex \ - modules/arch/x86/tests/gas32/gas-movdq32.asm \ - modules/arch/x86/tests/gas32/gas-movdq32.hex \ - modules/arch/x86/tests/gas32/gas-movsd.asm \ - modules/arch/x86/tests/gas32/gas-movsd.hex \ - modules/arch/x86/tests/gas32/gas32-jmpcall.asm \ - modules/arch/x86/tests/gas32/gas32-jmpcall.hex \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/x86/tests/gas64/align64.asm \ - modules/arch/x86/tests/gas64/align64.hex \ - modules/arch/x86/tests/gas64/gas-cbw.asm \ - modules/arch/x86/tests/gas64/gas-cbw.hex \ - modules/arch/x86/tests/gas64/gas-fp.asm \ - modules/arch/x86/tests/gas64/gas-fp.hex \ - modules/arch/x86/tests/gas64/gas-inout.asm \ - modules/arch/x86/tests/gas64/gas-inout.hex \ - modules/arch/x86/tests/gas64/gas-moreinsn.asm \ - modules/arch/x86/tests/gas64/gas-moreinsn.hex \ - modules/arch/x86/tests/gas64/gas-movabs.asm \ - modules/arch/x86/tests/gas64/gas-movabs.hex \ - modules/arch/x86/tests/gas64/gas-movdq64.asm \ - modules/arch/x86/tests/gas64/gas-movdq64.hex \ - modules/arch/x86/tests/gas64/gas-movsxs.asm \ - modules/arch/x86/tests/gas64/gas-movsxs.hex \ - modules/arch/x86/tests/gas64/gas-muldiv.asm \ - modules/arch/x86/tests/gas64/gas-muldiv.hex \ - modules/arch/x86/tests/gas64/gas-prefix.asm \ - modules/arch/x86/tests/gas64/gas-prefix.errwarn \ - modules/arch/x86/tests/gas64/gas-prefix.hex \ - modules/arch/x86/tests/gas64/gas-retenter.asm \ - modules/arch/x86/tests/gas64/gas-retenter.hex \ - modules/arch/x86/tests/gas64/gas-shift.asm \ - modules/arch/x86/tests/gas64/gas-shift.hex \ - modules/arch/x86/tests/gas64/gas64-jmpcall.asm \ - modules/arch/x86/tests/gas64/gas64-jmpcall.hex \ - modules/arch/x86/tests/gas64/riprel.asm \ - modules/arch/x86/tests/gas64/riprel.hex \ - modules/arch/lc3b/tests/Makefile.inc \ - modules/arch/lc3b/lc3bid.re \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/arch/lc3b/tests/lc3b-basic.asm \ - modules/arch/lc3b/tests/lc3b-basic.errwarn \ - modules/arch/lc3b/tests/lc3b-basic.hex \ - modules/arch/lc3b/tests/lc3b-br.asm \ - modules/arch/lc3b/tests/lc3b-br.hex \ - modules/arch/lc3b/tests/lc3b-ea-err.asm \ - modules/arch/lc3b/tests/lc3b-ea-err.errwarn \ - modules/arch/lc3b/tests/lc3b-mp22NC.asm \ - modules/arch/lc3b/tests/lc3b-mp22NC.hex \ - modules/arch/yasm_arch.xml modules/listfmts/nasm/Makefile.inc \ - modules/parsers/gas/Makefile.inc \ - modules/parsers/nasm/Makefile.inc \ - modules/parsers/gas/tests/Makefile.inc \ - modules/parsers/gas/gas-token.re \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/dataref-imm.asm \ - modules/parsers/gas/tests/dataref-imm.hex \ - modules/parsers/gas/tests/datavis.asm \ - modules/parsers/gas/tests/datavis.errwarn \ - modules/parsers/gas/tests/datavis.hex \ - modules/parsers/gas/tests/datavis2.asm \ - modules/parsers/gas/tests/datavis2.hex \ - modules/parsers/gas/tests/execsect.asm \ - modules/parsers/gas/tests/execsect.hex \ - modules/parsers/gas/tests/gas-fill.asm \ - modules/parsers/gas/tests/gas-fill.hex \ - modules/parsers/gas/tests/gas-float.asm \ - modules/parsers/gas/tests/gas-float.hex \ - modules/parsers/gas/tests/gas-instlabel.asm \ - modules/parsers/gas/tests/gas-instlabel.hex \ - modules/parsers/gas/tests/gas-line-err.asm \ - modules/parsers/gas/tests/gas-line-err.errwarn \ - modules/parsers/gas/tests/gas-line2-err.asm \ - modules/parsers/gas/tests/gas-line2-err.errwarn \ - modules/parsers/gas/tests/gas-push.asm \ - modules/parsers/gas/tests/gas-push.hex \ - modules/parsers/gas/tests/gas-segprefix.asm \ - modules/parsers/gas/tests/gas-segprefix.hex \ - modules/parsers/gas/tests/gas-semi.asm \ - modules/parsers/gas/tests/gas-semi.hex \ - modules/parsers/gas/tests/gassectalign.asm \ - modules/parsers/gas/tests/gassectalign.hex \ - modules/parsers/gas/tests/jmpcall.asm \ - modules/parsers/gas/tests/jmpcall.errwarn \ - modules/parsers/gas/tests/jmpcall.hex \ - modules/parsers/gas/tests/leb128.asm \ - modules/parsers/gas/tests/leb128.hex \ - modules/parsers/gas/tests/localcomm.asm \ - modules/parsers/gas/tests/localcomm.hex \ - modules/parsers/gas/tests/reggroup-err.asm \ - modules/parsers/gas/tests/reggroup-err.errwarn \ - modules/parsers/gas/tests/reggroup.asm \ - modules/parsers/gas/tests/reggroup.hex \ - modules/parsers/gas/tests/strzero.asm \ - modules/parsers/gas/tests/strzero.hex \ - modules/parsers/gas/tests/varinsn.asm \ - modules/parsers/gas/tests/varinsn.hex \ - modules/parsers/gas/tests/bin/Makefile.inc \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/gas/tests/bin/gas-comment.asm \ - modules/parsers/gas/tests/bin/gas-comment.errwarn \ - modules/parsers/gas/tests/bin/gas-comment.hex \ - modules/parsers/gas/tests/bin/gas-llabel.asm \ - modules/parsers/gas/tests/bin/gas-llabel.hex \ - modules/parsers/gas/tests/bin/gas-set.asm \ - modules/parsers/gas/tests/bin/gas-set.hex \ - modules/parsers/gas/tests/bin/rept-err.asm \ - modules/parsers/gas/tests/bin/rept-err.errwarn \ - modules/parsers/gas/tests/bin/reptempty.asm \ - modules/parsers/gas/tests/bin/reptempty.hex \ - modules/parsers/gas/tests/bin/reptlong.asm \ - modules/parsers/gas/tests/bin/reptlong.hex \ - modules/parsers/gas/tests/bin/reptnested-err.asm \ - modules/parsers/gas/tests/bin/reptnested-err.errwarn \ - modules/parsers/gas/tests/bin/reptsimple.asm \ - modules/parsers/gas/tests/bin/reptsimple.hex \ - modules/parsers/gas/tests/bin/reptwarn.asm \ - modules/parsers/gas/tests/bin/reptwarn.errwarn \ - modules/parsers/gas/tests/bin/reptwarn.hex \ - modules/parsers/gas/tests/bin/reptzero.asm \ - modules/parsers/gas/tests/bin/reptzero.hex \ - modules/parsers/nasm/nasm-token.re \ - modules/parsers/nasm/nasm-std.mac \ - modules/parsers/nasm/tests/Makefile.inc \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/alignnop16.asm \ - modules/parsers/nasm/tests/alignnop16.hex \ - modules/parsers/nasm/tests/alignnop32.asm \ - modules/parsers/nasm/tests/alignnop32.hex \ - modules/parsers/nasm/tests/charconstmath.asm \ - modules/parsers/nasm/tests/charconstmath.hex \ - modules/parsers/nasm/tests/dy.asm \ - modules/parsers/nasm/tests/dy.hex \ - modules/parsers/nasm/tests/endcomma.asm \ - modules/parsers/nasm/tests/endcomma.hex \ - modules/parsers/nasm/tests/equcolon.asm \ - modules/parsers/nasm/tests/equcolon.hex \ - modules/parsers/nasm/tests/equlocal.asm \ - modules/parsers/nasm/tests/equlocal.hex \ - modules/parsers/nasm/tests/hexconst.asm \ - modules/parsers/nasm/tests/hexconst.hex \ - modules/parsers/nasm/tests/long.asm \ - modules/parsers/nasm/tests/long.hex \ - modules/parsers/nasm/tests/locallabel.asm \ - modules/parsers/nasm/tests/locallabel.hex \ - modules/parsers/nasm/tests/locallabel2.asm \ - modules/parsers/nasm/tests/locallabel2.hex \ - modules/parsers/nasm/tests/nasm-prefix.asm \ - modules/parsers/nasm/tests/nasm-prefix.hex \ - modules/parsers/nasm/tests/newsect.asm \ - modules/parsers/nasm/tests/newsect.hex \ - modules/parsers/nasm/tests/orphannowarn.asm \ - modules/parsers/nasm/tests/orphannowarn.hex \ - modules/parsers/nasm/tests/prevlocalwarn.asm \ - modules/parsers/nasm/tests/prevlocalwarn.errwarn \ - modules/parsers/nasm/tests/prevlocalwarn.hex \ - modules/parsers/nasm/tests/strucalign.asm \ - modules/parsers/nasm/tests/strucalign.hex \ - modules/parsers/nasm/tests/struczero.asm \ - modules/parsers/nasm/tests/struczero.hex \ - modules/parsers/nasm/tests/syntax-err.asm \ - modules/parsers/nasm/tests/syntax-err.errwarn \ - modules/parsers/nasm/tests/uscore.asm \ - modules/parsers/nasm/tests/uscore.hex \ - modules/parsers/nasm/tests/worphan/Makefile.inc \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/nasm/tests/worphan/orphanwarn.asm \ - modules/parsers/nasm/tests/worphan/orphanwarn.errwarn \ - modules/parsers/nasm/tests/worphan/orphanwarn.hex \ - modules/parsers/tasm/tests/Makefile.inc \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/array.asm \ - modules/parsers/tasm/tests/array.hex \ - modules/parsers/tasm/tests/case.asm \ - modules/parsers/tasm/tests/case.hex \ - modules/parsers/tasm/tests/charstr.asm \ - modules/parsers/tasm/tests/charstr.hex \ - modules/parsers/tasm/tests/dup.asm \ - modules/parsers/tasm/tests/dup.hex \ - modules/parsers/tasm/tests/equal.asm \ - modules/parsers/tasm/tests/equal.hex \ - modules/parsers/tasm/tests/expr.asm \ - modules/parsers/tasm/tests/expr.hex \ - modules/parsers/tasm/tests/irp.asm \ - modules/parsers/tasm/tests/irp.hex \ - modules/parsers/tasm/tests/label.asm \ - modules/parsers/tasm/tests/label.hex \ - modules/parsers/tasm/tests/les.asm \ - modules/parsers/tasm/tests/les.hex \ - modules/parsers/tasm/tests/lidt.asm \ - modules/parsers/tasm/tests/lidt.hex \ - modules/parsers/tasm/tests/macro.asm \ - modules/parsers/tasm/tests/macro.hex \ - modules/parsers/tasm/tests/offset.asm \ - modules/parsers/tasm/tests/offset.hex \ - modules/parsers/tasm/tests/quote.asm \ - modules/parsers/tasm/tests/quote.hex \ - modules/parsers/tasm/tests/res.asm \ - modules/parsers/tasm/tests/res.errwarn \ - modules/parsers/tasm/tests/res.hex \ - modules/parsers/tasm/tests/segment.asm \ - modules/parsers/tasm/tests/segment.hex \ - modules/parsers/tasm/tests/size.asm \ - modules/parsers/tasm/tests/size.hex \ - modules/parsers/tasm/tests/struc.asm \ - modules/parsers/tasm/tests/struc.errwarn \ - modules/parsers/tasm/tests/struc.hex \ - modules/parsers/tasm/tests/exe/Makefile.inc \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/parsers/tasm/tests/exe/exe.asm \ - modules/parsers/tasm/tests/exe/exe.hex \ - modules/parsers/yasm_parsers.xml \ - modules/preprocs/nasm/Makefile.inc \ - modules/preprocs/raw/Makefile.inc \ - modules/preprocs/cpp/Makefile.inc \ - modules/preprocs/nasm/genversion.c \ - modules/preprocs/nasm/tests/Makefile.inc \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/nasm/tests/16args.asm \ - modules/preprocs/nasm/tests/16args.hex \ - modules/preprocs/nasm/tests/ifcritical-err.asm \ - modules/preprocs/nasm/tests/ifcritical-err.errwarn \ - modules/preprocs/nasm/tests/longline.asm \ - modules/preprocs/nasm/tests/longline.hex \ - modules/preprocs/nasm/tests/macroeof-err.asm \ - modules/preprocs/nasm/tests/macroeof-err.errwarn \ - modules/preprocs/nasm/tests/noinclude-err.asm \ - modules/preprocs/nasm/tests/noinclude-err.errwarn \ - modules/preprocs/nasm/tests/nasmpp-bigint.asm \ - modules/preprocs/nasm/tests/nasmpp-bigint.hex \ - modules/preprocs/nasm/tests/nasmpp-decimal.asm \ - modules/preprocs/nasm/tests/nasmpp-decimal.hex \ - modules/preprocs/nasm/tests/nasmpp-nested.asm \ - modules/preprocs/nasm/tests/nasmpp-nested.errwarn \ - modules/preprocs/nasm/tests/nasmpp-nested.hex \ - modules/preprocs/nasm/tests/orgsect.asm \ - modules/preprocs/nasm/tests/orgsect.hex \ - modules/preprocs/raw/tests/Makefile.inc \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/preprocs/raw/tests/longline.asm \ - modules/preprocs/raw/tests/longline.hex \ - modules/dbgfmts/codeview/Makefile.inc \ - modules/dbgfmts/dwarf2/Makefile.inc \ - modules/dbgfmts/null/Makefile.inc \ - modules/dbgfmts/stabs/Makefile.inc \ - modules/dbgfmts/codeview/cv8.txt \ - modules/dbgfmts/dwarf2/tests/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.errwarn \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.hex \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.asm \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.errwarn \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.hex \ - modules/dbgfmts/stabs/tests/Makefile.inc \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/dbgfmts/stabs/tests/stabs-elf.asm \ - modules/dbgfmts/stabs/tests/stabs-elf.hex \ - modules/dbgfmts/yasm_dbgfmts.xml \ - modules/objfmts/dbg/Makefile.inc \ - modules/objfmts/bin/Makefile.inc \ - modules/objfmts/elf/Makefile.inc \ - modules/objfmts/coff/Makefile.inc \ - modules/objfmts/macho/Makefile.inc \ - modules/objfmts/rdf/Makefile.inc \ - modules/objfmts/win32/Makefile.inc \ - modules/objfmts/win64/Makefile.inc \ - modules/objfmts/xdf/Makefile.inc \ - modules/objfmts/bin/tests/Makefile.inc \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/abs.asm \ - modules/objfmts/bin/tests/abs.hex \ - modules/objfmts/bin/tests/bigorg.asm \ - modules/objfmts/bin/tests/bigorg.hex \ - modules/objfmts/bin/tests/bigorg.errwarn \ - modules/objfmts/bin/tests/bin-farabs.asm \ - modules/objfmts/bin/tests/bin-farabs.hex \ - modules/objfmts/bin/tests/bin-rip.asm \ - modules/objfmts/bin/tests/bin-rip.hex \ - modules/objfmts/bin/tests/bintest.asm \ - modules/objfmts/bin/tests/bintest.hex \ - modules/objfmts/bin/tests/float-err.asm \ - modules/objfmts/bin/tests/float-err.errwarn \ - modules/objfmts/bin/tests/float.asm \ - modules/objfmts/bin/tests/float.hex \ - modules/objfmts/bin/tests/integer-warn.asm \ - modules/objfmts/bin/tests/integer-warn.hex \ - modules/objfmts/bin/tests/integer-warn.errwarn \ - modules/objfmts/bin/tests/integer.asm \ - modules/objfmts/bin/tests/integer.hex \ - modules/objfmts/bin/tests/levelop.asm \ - modules/objfmts/bin/tests/levelop.hex \ - modules/objfmts/bin/tests/reserve.asm \ - modules/objfmts/bin/tests/reserve.hex \ - modules/objfmts/bin/tests/reserve.errwarn \ - modules/objfmts/bin/tests/shr.asm \ - modules/objfmts/bin/tests/shr.hex \ - modules/objfmts/bin/tests/multisect/Makefile.inc \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/bin/tests/multisect/bin-align.asm \ - modules/objfmts/bin/tests/multisect/bin-align.errwarn \ - modules/objfmts/bin/tests/multisect/bin-align.hex \ - modules/objfmts/bin/tests/multisect/bin-align.map \ - modules/objfmts/bin/tests/multisect/bin-ssym.asm \ - modules/objfmts/bin/tests/multisect/bin-ssym.hex \ - modules/objfmts/bin/tests/multisect/bin-ssym.map \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.asm \ - modules/objfmts/bin/tests/multisect/initbss.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.hex \ - modules/objfmts/bin/tests/multisect/initbss.map \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.asm \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.hex \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.map \ - modules/objfmts/bin/tests/multisect/multisect1.asm \ - modules/objfmts/bin/tests/multisect/multisect1.hex \ - modules/objfmts/bin/tests/multisect/multisect1.map \ - modules/objfmts/bin/tests/multisect/multisect2.asm \ - modules/objfmts/bin/tests/multisect/multisect2.hex \ - modules/objfmts/bin/tests/multisect/multisect2.map \ - modules/objfmts/bin/tests/multisect/multisect3.asm \ - modules/objfmts/bin/tests/multisect/multisect3.hex \ - modules/objfmts/bin/tests/multisect/multisect3.map \ - modules/objfmts/bin/tests/multisect/multisect4.asm \ - modules/objfmts/bin/tests/multisect/multisect4.hex \ - modules/objfmts/bin/tests/multisect/multisect4.map \ - modules/objfmts/bin/tests/multisect/multisect5.asm \ - modules/objfmts/bin/tests/multisect/multisect5.hex \ - modules/objfmts/bin/tests/multisect/multisect5.map \ - modules/objfmts/bin/tests/multisect/nomultisect1.asm \ - modules/objfmts/bin/tests/multisect/nomultisect1.hex \ - modules/objfmts/bin/tests/multisect/nomultisect1.map \ - modules/objfmts/bin/tests/multisect/nomultisect2.asm \ - modules/objfmts/bin/tests/multisect/nomultisect2.hex \ - modules/objfmts/bin/tests/multisect/nomultisect2.map \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.errwarn \ - modules/objfmts/elf/tests/Makefile.inc \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/curpos.asm \ - modules/objfmts/elf/tests/curpos.hex \ - modules/objfmts/elf/tests/curpos-err.asm \ - modules/objfmts/elf/tests/curpos-err.errwarn \ - modules/objfmts/elf/tests/elf-overdef.asm \ - modules/objfmts/elf/tests/elf-overdef.hex \ - modules/objfmts/elf/tests/elf-x86id.asm \ - modules/objfmts/elf/tests/elf-x86id.hex \ - modules/objfmts/elf/tests/elfabssect.asm \ - modules/objfmts/elf/tests/elfabssect.hex \ - modules/objfmts/elf/tests/elfcond.asm \ - modules/objfmts/elf/tests/elfcond.hex \ - modules/objfmts/elf/tests/elfequabs.asm \ - modules/objfmts/elf/tests/elfequabs.hex \ - modules/objfmts/elf/tests/elfglobal.asm \ - modules/objfmts/elf/tests/elfglobal.hex \ - modules/objfmts/elf/tests/elfglobext.asm \ - modules/objfmts/elf/tests/elfglobext.hex \ - modules/objfmts/elf/tests/elfglobext2.asm \ - modules/objfmts/elf/tests/elfglobext2.hex \ - modules/objfmts/elf/tests/elfmanysym.asm \ - modules/objfmts/elf/tests/elfmanysym.hex \ - modules/objfmts/elf/tests/elfreloc.asm \ - modules/objfmts/elf/tests/elfreloc.hex \ - modules/objfmts/elf/tests/elfreloc-ext.asm \ - modules/objfmts/elf/tests/elfreloc-ext.hex \ - modules/objfmts/elf/tests/elfsectalign.asm \ - modules/objfmts/elf/tests/elfsectalign.hex \ - modules/objfmts/elf/tests/elfso.asm \ - modules/objfmts/elf/tests/elfso.hex \ - modules/objfmts/elf/tests/elftest.c \ - modules/objfmts/elf/tests/elftest.asm \ - modules/objfmts/elf/tests/elftest.hex \ - modules/objfmts/elf/tests/elftimes.asm \ - modules/objfmts/elf/tests/elftimes.hex \ - modules/objfmts/elf/tests/elftypesize.asm \ - modules/objfmts/elf/tests/elftypesize.hex \ - modules/objfmts/elf/tests/elfvisibility.asm \ - modules/objfmts/elf/tests/elfvisibility.errwarn \ - modules/objfmts/elf/tests/elfvisibility.hex \ - modules/objfmts/elf/tests/nasm-sectname.asm \ - modules/objfmts/elf/tests/nasm-sectname.hex \ - modules/objfmts/elf/tests/nasm-forceident.asm \ - modules/objfmts/elf/tests/nasm-forceident.hex \ - modules/objfmts/elf/tests/amd64/Makefile.inc \ - modules/objfmts/elf/tests/gas32/Makefile.inc \ - modules/objfmts/elf/tests/gas64/Makefile.inc \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/amd64/elf-rip.asm \ - modules/objfmts/elf/tests/amd64/elf-rip.hex \ - modules/objfmts/elf/tests/amd64/elfso64.asm \ - modules/objfmts/elf/tests/amd64/elfso64.hex \ - modules/objfmts/elf/tests/amd64/gotpcrel.asm \ - modules/objfmts/elf/tests/amd64/gotpcrel.hex \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.asm \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/elf/tests/gas64/crosssect.asm \ - modules/objfmts/elf/tests/gas64/crosssect.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.hex \ - modules/objfmts/coff/win64-nasm.mac \ - modules/objfmts/coff/win64-gas.mac \ - modules/objfmts/coff/tests/Makefile.inc \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/coff/tests/cofftest.c \ - modules/objfmts/coff/tests/cofftest.asm \ - modules/objfmts/coff/tests/cofftest.hex \ - modules/objfmts/coff/tests/cofftimes.asm \ - modules/objfmts/coff/tests/cofftimes.hex \ - modules/objfmts/coff/tests/x86id.asm \ - modules/objfmts/coff/tests/x86id.hex \ - modules/objfmts/coff/tests/x86id.errwarn \ - modules/objfmts/macho/tests/Makefile.inc \ - modules/objfmts/macho/tests/gas32/Makefile.inc \ - modules/objfmts/macho/tests/gas64/Makefile.inc \ - modules/objfmts/macho/tests/nasm32/Makefile.inc \ - modules/objfmts/macho/tests/nasm64/Makefile.inc \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas32/gas-macho32.asm \ - modules/objfmts/macho/tests/gas32/gas-macho32.hex \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/gas64/gas-macho64.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64.hex \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm32/machotest.c \ - modules/objfmts/macho/tests/nasm32/machotest.asm \ - modules/objfmts/macho/tests/nasm32/machotest.hex \ - modules/objfmts/macho/tests/nasm32/macho-reloc.asm \ - modules/objfmts/macho/tests/nasm32/macho-reloc.hex \ - modules/objfmts/macho/tests/nasm32/macho32-sect.asm \ - modules/objfmts/macho/tests/nasm32/macho32-sect.errwarn \ - modules/objfmts/macho/tests/nasm32/macho32-sect.hex \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.asm \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/macho/tests/nasm64/machotest64.c \ - modules/objfmts/macho/tests/nasm64/machotest64.asm \ - modules/objfmts/macho/tests/nasm64/machotest64.hex \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.asm \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.errwarn \ - modules/objfmts/rdf/tests/Makefile.inc \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/rdf/tests/rdfabs.asm \ - modules/objfmts/rdf/tests/rdfabs.errwarn \ - modules/objfmts/rdf/tests/rdfabs.hex \ - modules/objfmts/rdf/tests/rdfext.asm \ - modules/objfmts/rdf/tests/rdfext.hex \ - modules/objfmts/rdf/tests/rdfseg.asm \ - modules/objfmts/rdf/tests/rdfseg.hex \ - modules/objfmts/rdf/tests/rdfseg2.asm \ - modules/objfmts/rdf/tests/rdfseg2.hex \ - modules/objfmts/rdf/tests/rdftest1.asm \ - modules/objfmts/rdf/tests/rdftest1.hex \ - modules/objfmts/rdf/tests/rdftest2.asm \ - modules/objfmts/rdf/tests/rdftest2.hex \ - modules/objfmts/rdf/tests/rdtlib.asm \ - modules/objfmts/rdf/tests/rdtlib.hex \ - modules/objfmts/rdf/tests/rdtmain.asm \ - modules/objfmts/rdf/tests/rdtmain.hex \ - modules/objfmts/rdf/tests/testlib.asm \ - modules/objfmts/rdf/tests/testlib.hex \ - modules/objfmts/win32/tests/Makefile.inc \ - modules/objfmts/win32/tests/export.asm \ - modules/objfmts/win32/tests/export.hex \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/win32-curpos.asm \ - modules/objfmts/win32/tests/win32-curpos.hex \ - modules/objfmts/win32/tests/win32-overdef.asm \ - modules/objfmts/win32/tests/win32-overdef.hex \ - modules/objfmts/win32/tests/win32-safeseh.asm \ - modules/objfmts/win32/tests/win32-safeseh.hex \ - modules/objfmts/win32/tests/win32-safeseh.masm \ - modules/objfmts/win32/tests/win32-segof.asm \ - modules/objfmts/win32/tests/win32-segof.hex \ - modules/objfmts/win32/tests/win32test.c \ - modules/objfmts/win32/tests/win32test.asm \ - modules/objfmts/win32/tests/win32test.hex \ - modules/objfmts/win32/tests/gas/Makefile.inc \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win32/tests/gas/win32at.asm \ - modules/objfmts/win32/tests/gas/win32at.hex \ - modules/objfmts/win64/tests/Makefile.inc \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/sce1.asm \ - modules/objfmts/win64/tests/sce1.hex \ - modules/objfmts/win64/tests/sce1-err.asm \ - modules/objfmts/win64/tests/sce1-err.errwarn \ - modules/objfmts/win64/tests/sce2.asm \ - modules/objfmts/win64/tests/sce2.hex \ - modules/objfmts/win64/tests/sce2-err.asm \ - modules/objfmts/win64/tests/sce2-err.errwarn \ - modules/objfmts/win64/tests/sce3.asm \ - modules/objfmts/win64/tests/sce3.hex \ - modules/objfmts/win64/tests/sce3.masm \ - modules/objfmts/win64/tests/sce4.asm \ - modules/objfmts/win64/tests/sce4.hex \ - modules/objfmts/win64/tests/sce4.masm \ - modules/objfmts/win64/tests/sce4-err.asm \ - modules/objfmts/win64/tests/sce4-err.errwarn \ - modules/objfmts/win64/tests/win64-abs.asm \ - modules/objfmts/win64/tests/win64-abs.hex \ - modules/objfmts/win64/tests/win64-curpos.asm \ - modules/objfmts/win64/tests/win64-curpos.hex \ - modules/objfmts/win64/tests/win64-dataref.asm \ - modules/objfmts/win64/tests/win64-dataref.hex \ - modules/objfmts/win64/tests/win64-dataref.masm \ - modules/objfmts/win64/tests/win64-dataref2.asm \ - modules/objfmts/win64/tests/win64-dataref2.hex \ - modules/objfmts/win64/tests/win64-dataref2.masm \ - modules/objfmts/win64/tests/gas/Makefile.inc \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/win64/tests/gas/win64-gas-sce.asm \ - modules/objfmts/win64/tests/gas/win64-gas-sce.hex \ - modules/objfmts/xdf/tests/Makefile.inc \ - modules/objfmts/xdf/tests/xdf_test.sh \ - modules/objfmts/xdf/tests/xdf-overdef.asm \ - modules/objfmts/xdf/tests/xdf-overdef.hex \ - modules/objfmts/xdf/tests/xdflong.asm \ - modules/objfmts/xdf/tests/xdflong.hex \ - modules/objfmts/xdf/tests/xdflong.errwarn \ - modules/objfmts/xdf/tests/xdfother.asm \ - modules/objfmts/xdf/tests/xdfother.hex \ - modules/objfmts/xdf/tests/xdfprotect.asm \ - modules/objfmts/xdf/tests/xdfprotect.hex \ - modules/objfmts/xdf/tests/xdfsect.asm \ - modules/objfmts/xdf/tests/xdfsect.hex \ - modules/objfmts/xdf/tests/xdfsect-err.asm \ - modules/objfmts/xdf/tests/xdfsect-err.errwarn \ - modules/objfmts/xdf/tests/xdfvirtual.asm \ - modules/objfmts/xdf/tests/xdfvirtual.hex \ - modules/objfmts/yasm_objfmts.xml libyasm/genmodule.c \ - libyasm/module.in libyasm/tests/Makefile.inc \ - libyasm/tests/libyasm_test.sh libyasm/tests/1shl0.asm \ - libyasm/tests/1shl0.hex libyasm/tests/absloop-err.asm \ - libyasm/tests/absloop-err.errwarn \ - libyasm/tests/charconst64.asm libyasm/tests/charconst64.hex \ - libyasm/tests/data-rawvalue.asm \ - libyasm/tests/data-rawvalue.hex libyasm/tests/duplabel-err.asm \ - libyasm/tests/duplabel-err.errwarn libyasm/tests/emptydata.asm \ - libyasm/tests/emptydata.hex libyasm/tests/equ-expand.asm \ - libyasm/tests/equ-expand.hex libyasm/tests/expr-fold-level.asm \ - libyasm/tests/expr-fold-level.hex \ - libyasm/tests/expr-wide-ident.asm \ - libyasm/tests/expr-wide-ident.hex libyasm/tests/externdef.asm \ - libyasm/tests/externdef.errwarn libyasm/tests/externdef.hex \ - libyasm/tests/incbin.asm libyasm/tests/incbin.hex \ - libyasm/tests/jmpsize1.asm libyasm/tests/jmpsize1.hex \ - libyasm/tests/jmpsize1-err.asm \ - libyasm/tests/jmpsize1-err.errwarn \ - libyasm/tests/opt-align1.asm libyasm/tests/opt-align1.hex \ - libyasm/tests/opt-align2.asm libyasm/tests/opt-align2.hex \ - libyasm/tests/opt-align3.asm libyasm/tests/opt-align3.hex \ - libyasm/tests/opt-circular1-err.asm \ - libyasm/tests/opt-circular1-err.errwarn \ - libyasm/tests/opt-circular2-err.asm \ - libyasm/tests/opt-circular2-err.errwarn \ - libyasm/tests/opt-circular3-err.asm \ - libyasm/tests/opt-circular3-err.errwarn \ - libyasm/tests/opt-gvmat64.asm libyasm/tests/opt-gvmat64.hex \ - libyasm/tests/opt-immexpand.asm \ - libyasm/tests/opt-immexpand.hex \ - libyasm/tests/opt-immnoexpand.asm \ - libyasm/tests/opt-immnoexpand.hex \ - libyasm/tests/opt-oldalign.asm libyasm/tests/opt-oldalign.hex \ - libyasm/tests/opt-struc.asm libyasm/tests/opt-struc.hex \ - libyasm/tests/reserve-err1.asm \ - libyasm/tests/reserve-err1.errwarn \ - libyasm/tests/reserve-err2.asm \ - libyasm/tests/reserve-err2.errwarn libyasm/tests/strucsize.asm \ - libyasm/tests/strucsize.hex libyasm/tests/times0.asm \ - libyasm/tests/times0.hex libyasm/tests/timesover-err.asm \ - libyasm/tests/timesover-err.errwarn \ - libyasm/tests/timesunder.asm libyasm/tests/timesunder.hex \ - libyasm/tests/times-res.asm libyasm/tests/times-res.errwarn \ - libyasm/tests/times-res.hex libyasm/tests/unary.asm \ - libyasm/tests/unary.hex libyasm/tests/value-err.asm \ - libyasm/tests/value-err.errwarn \ - libyasm/tests/value-samesym.asm \ - libyasm/tests/value-samesym.errwarn \ - libyasm/tests/value-samesym.hex libyasm/tests/value-mask.asm \ - libyasm/tests/value-mask.errwarn libyasm/tests/value-mask.hex \ - frontends/yasm/Makefile.inc frontends/tasm/Makefile.inc \ - frontends/yasm/yasm.xml m4/intmax.m4 m4/longdouble.m4 \ - m4/nls.m4 m4/po.m4 m4/printf-posix.m4 m4/signed.m4 \ - m4/size_max.m4 m4/ulonglong.m4 m4/wchar_t.m4 m4/wint_t.m4 \ - m4/xsize.m4 m4/codeset.m4 m4/gettext.m4 m4/glibc21.m4 \ - m4/iconv.m4 m4/intdiv0.m4 m4/inttypes.m4 m4/inttypes_h.m4 \ - m4/inttypes-pri.m4 m4/isc-posix.m4 m4/lcmessage.m4 \ - m4/lib-ld.m4 m4/lib-link.m4 m4/lib-prefix.m4 m4/longlong.m4 \ - m4/progtest.m4 m4/stdint_h.m4 m4/uintmax_t.m4 m4/pythonhead.m4 \ - m4/pyrex.m4 out_test.sh Artistic.txt BSD.txt GNU_GPL-2.0 \ - GNU_LGPL-2.0 splint.sh Mkfiles/Makefile.flat \ - Mkfiles/Makefile.dj Mkfiles/dj/config.h \ - Mkfiles/dj/libyasm-stdint.h \ - Mkfiles/vc9/crt_secure_no_deprecate.vsprops \ - Mkfiles/vc9/yasm.sln Mkfiles/vc9/yasm.vcproj \ - Mkfiles/vc9/ytasm.vcproj Mkfiles/vc9/config.h \ - Mkfiles/vc9/libyasm-stdint.h Mkfiles/vc9/readme.vc9.txt \ - Mkfiles/vc9/yasm.rules Mkfiles/vc9/vc98_swap.py \ - Mkfiles/vc9/genmacro/genmacro.vcproj \ - Mkfiles/vc9/genmacro/run.bat \ - Mkfiles/vc9/genmodule/genmodule.vcproj \ - Mkfiles/vc9/genmodule/run.bat \ - Mkfiles/vc9/genstring/genstring.vcproj \ - Mkfiles/vc9/genstring/run.bat \ - Mkfiles/vc9/genversion/genversion.vcproj \ - Mkfiles/vc9/genversion/run.bat \ - Mkfiles/vc9/libyasm/libyasm.vcproj \ - Mkfiles/vc9/modules/modules.vcproj \ - Mkfiles/vc9/re2c/re2c.vcproj Mkfiles/vc9/re2c/run.bat \ - Mkfiles/vc9/genperf/genperf.vcproj Mkfiles/vc9/genperf/run.bat \ - genstring.c - -# libyasm-stdint.h doesn't clean up after itself? -CONFIG_CLEAN_FILES = libyasm-stdint.h -re2c_SOURCES = -re2c_LDADD = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -re2c_LINK = $(CCLD_FOR_BUILD) -o $@ -genmacro_SOURCES = -genmacro_LDADD = genmacro.$(OBJEXT) -genmacro_LINK = $(CCLD_FOR_BUILD) -o $@ -genperf_SOURCES = -genperf_LDADD = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -genperf_LINK = $(CCLD_FOR_BUILD) -o $@ -PYBINDING_DEPS = tools/python-yasm/bytecode.pxi \ - tools/python-yasm/errwarn.pxi tools/python-yasm/expr.pxi \ - tools/python-yasm/floatnum.pxi tools/python-yasm/intnum.pxi \ - tools/python-yasm/symrec.pxi tools/python-yasm/value.pxi -YASM_MODULES = arch_x86 arch_lc3b listfmt_nasm parser_gas parser_gnu \ - parser_nasm parser_tasm preproc_nasm preproc_tasm preproc_raw \ - preproc_cpp dbgfmt_cv8 dbgfmt_dwarf2 dbgfmt_null dbgfmt_stabs \ - objfmt_dbg objfmt_bin objfmt_dosexe objfmt_elf objfmt_elf32 \ - objfmt_elf64 objfmt_coff objfmt_macho objfmt_macho32 \ - objfmt_macho64 objfmt_rdf objfmt_win32 objfmt_win64 objfmt_x64 \ - objfmt_xdf -lib_LIBRARIES = libyasm.a -libyasm_a_SOURCES = modules/arch/x86/x86arch.c \ - modules/arch/x86/x86arch.h modules/arch/x86/x86bc.c \ - modules/arch/x86/x86expr.c modules/arch/x86/x86id.c \ - modules/arch/lc3b/lc3barch.c modules/arch/lc3b/lc3barch.h \ - modules/arch/lc3b/lc3bbc.c \ - modules/listfmts/nasm/nasm-listfmt.c \ - modules/parsers/gas/gas-parser.c \ - modules/parsers/gas/gas-parser.h \ - modules/parsers/gas/gas-parse.c \ - modules/parsers/nasm/nasm-parser.c \ - modules/parsers/nasm/nasm-parser.h \ - modules/parsers/nasm/nasm-parse.c \ - modules/preprocs/nasm/nasm-preproc.c \ - modules/preprocs/nasm/nasm-pp.h \ - modules/preprocs/nasm/nasm-pp.c modules/preprocs/nasm/nasm.h \ - modules/preprocs/nasm/nasmlib.h \ - modules/preprocs/nasm/nasmlib.c \ - modules/preprocs/nasm/nasm-eval.h \ - modules/preprocs/nasm/nasm-eval.c \ - modules/preprocs/raw/raw-preproc.c \ - modules/preprocs/cpp/cpp-preproc.c \ - modules/dbgfmts/codeview/cv-dbgfmt.h \ - modules/dbgfmts/codeview/cv-dbgfmt.c \ - modules/dbgfmts/codeview/cv-symline.c \ - modules/dbgfmts/codeview/cv-type.c \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.h \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c \ - modules/dbgfmts/dwarf2/dwarf2-line.c \ - modules/dbgfmts/dwarf2/dwarf2-aranges.c \ - modules/dbgfmts/dwarf2/dwarf2-info.c \ - modules/dbgfmts/null/null-dbgfmt.c \ - modules/dbgfmts/stabs/stabs-dbgfmt.c \ - modules/objfmts/dbg/dbg-objfmt.c \ - modules/objfmts/bin/bin-objfmt.c modules/objfmts/elf/elf.c \ - modules/objfmts/elf/elf.h modules/objfmts/elf/elf-objfmt.c \ - modules/objfmts/elf/elf-machine.h \ - modules/objfmts/elf/elf-x86-x86.c \ - modules/objfmts/elf/elf-x86-amd64.c \ - modules/objfmts/coff/coff-objfmt.c \ - modules/objfmts/coff/coff-objfmt.h \ - modules/objfmts/coff/win64-except.c \ - modules/objfmts/macho/macho-objfmt.c \ - modules/objfmts/rdf/rdf-objfmt.c \ - modules/objfmts/xdf/xdf-objfmt.c libyasm/assocdat.c \ - libyasm/bitvect.c libyasm/bc-align.c libyasm/bc-data.c \ - libyasm/bc-incbin.c libyasm/bc-org.c libyasm/bc-reserve.c \ - libyasm/bytecode.c libyasm/errwarn.c libyasm/expr.c \ - libyasm/file.c libyasm/floatnum.c libyasm/hamt.c \ - libyasm/insn.c libyasm/intnum.c libyasm/inttree.c \ - libyasm/linemap.c libyasm/md5.c libyasm/mergesort.c \ - libyasm/phash.c libyasm/section.c libyasm/strcasecmp.c \ - libyasm/strsep.c libyasm/symrec.c libyasm/valparam.c \ - libyasm/value.c libyasm/xmalloc.c libyasm/xstrdup.c -nodist_libyasm_a_SOURCES = x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c module.c -genversion_SOURCES = -genversion_LDADD = genversion.$(OBJEXT) -genversion_LINK = $(CCLD_FOR_BUILD) -o $@ -genmodule_SOURCES = -genmodule_LDADD = genmodule.$(OBJEXT) -genmodule_LINK = $(CCLD_FOR_BUILD) -o $@ -modincludedir = $(includedir)/libyasm -modinclude_HEADERS = libyasm/arch.h libyasm/assocdat.h \ - libyasm/bitvect.h libyasm/bytecode.h libyasm/compat-queue.h \ - libyasm/coretype.h libyasm/dbgfmt.h libyasm/errwarn.h \ - libyasm/expr.h libyasm/file.h libyasm/floatnum.h \ - libyasm/hamt.h libyasm/insn.h libyasm/intnum.h \ - libyasm/inttree.h libyasm/linemap.h libyasm/listfmt.h \ - libyasm/md5.h libyasm/module.h libyasm/objfmt.h \ - libyasm/parser.h libyasm/phash.h libyasm/preproc.h \ - libyasm/section.h libyasm/symrec.h libyasm/valparam.h \ - libyasm/value.h -bitvect_test_SOURCES = libyasm/tests/bitvect_test.c -bitvect_test_LDADD = libyasm.a $(INTLLIBS) -floatnum_test_SOURCES = libyasm/tests/floatnum_test.c -floatnum_test_LDADD = libyasm.a $(INTLLIBS) -leb128_test_SOURCES = libyasm/tests/leb128_test.c -leb128_test_LDADD = libyasm.a $(INTLLIBS) -splitpath_test_SOURCES = libyasm/tests/splitpath_test.c -splitpath_test_LDADD = libyasm.a $(INTLLIBS) -combpath_test_SOURCES = libyasm/tests/combpath_test.c -combpath_test_LDADD = libyasm.a $(INTLLIBS) -uncstring_test_SOURCES = libyasm/tests/uncstring_test.c -uncstring_test_LDADD = libyasm.a $(INTLLIBS) -yasm_SOURCES = frontends/yasm/yasm.c frontends/yasm/yasm-options.c \ - frontends/yasm/yasm-options.h -yasm_LDADD = libyasm.a $(INTLLIBS) -ytasm_SOURCES = frontends/tasm/tasm.c frontends/tasm/tasm-options.c \ - frontends/tasm/tasm-options.h -ytasm_LDADD = libyasm.a $(INTLLIBS) -ACLOCAL_AMFLAGS = -I m4 - -# genstring build -genstring_SOURCES = -genstring_LDADD = genstring.$(OBJEXT) -genstring_LINK = $(CCLD_FOR_BUILD) -o $@ -all: $(BUILT_SOURCES) config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .gperf .c .o .obj -am--refresh: - @: -$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/tools/Makefile.inc $(srcdir)/tools/re2c/Makefile.inc $(srcdir)/tools/genmacro/Makefile.inc $(srcdir)/tools/genperf/Makefile.inc $(srcdir)/tools/python-yasm/Makefile.inc $(srcdir)/tools/python-yasm/tests/Makefile.inc $(srcdir)/modules/Makefile.inc $(srcdir)/modules/arch/Makefile.inc $(srcdir)/modules/arch/x86/Makefile.inc $(srcdir)/modules/arch/x86/tests/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc $(srcdir)/modules/arch/lc3b/Makefile.inc $(srcdir)/modules/arch/lc3b/tests/Makefile.inc $(srcdir)/modules/listfmts/Makefile.inc $(srcdir)/modules/listfmts/nasm/Makefile.inc $(srcdir)/modules/parsers/Makefile.inc $(srcdir)/modules/parsers/gas/Makefile.inc $(srcdir)/modules/parsers/gas/tests/Makefile.inc $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc $(srcdir)/modules/parsers/nasm/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc $(srcdir)/modules/parsers/tasm/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc $(srcdir)/modules/preprocs/Makefile.inc $(srcdir)/modules/preprocs/nasm/Makefile.inc $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc $(srcdir)/modules/preprocs/raw/Makefile.inc $(srcdir)/modules/preprocs/raw/tests/Makefile.inc $(srcdir)/modules/preprocs/cpp/Makefile.inc $(srcdir)/modules/dbgfmts/Makefile.inc $(srcdir)/modules/dbgfmts/codeview/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc $(srcdir)/modules/dbgfmts/null/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc $(srcdir)/modules/objfmts/Makefile.inc $(srcdir)/modules/objfmts/dbg/Makefile.inc $(srcdir)/modules/objfmts/bin/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc $(srcdir)/modules/objfmts/elf/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/coff/Makefile.inc $(srcdir)/modules/objfmts/coff/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc $(srcdir)/modules/objfmts/rdf/Makefile.inc $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/win64/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/xdf/Makefile.inc $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc $(srcdir)/libyasm/Makefile.inc $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/frontends/Makefile.inc $(srcdir)/frontends/yasm/Makefile.inc $(srcdir)/frontends/tasm/Makefile.inc $(srcdir)/m4/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ - cd $(srcdir) && $(AUTOMAKE) --gnu \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: $(am__configure_deps) - cd $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): $(am__aclocal_m4_deps) - cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -install-libLIBRARIES: $(lib_LIBRARIES) - @$(NORMAL_INSTALL) - test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - f=$(am__strip_dir) \ - echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ - else :; fi; \ - done - @$(POST_INSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - p=$(am__strip_dir) \ - echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \ - $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \ - else :; fi; \ - done - -uninstall-libLIBRARIES: - @$(NORMAL_UNINSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - p=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - rm -f "$(DESTDIR)$(libdir)/$$p"; \ - done - -clean-libLIBRARIES: - -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES) -libyasm.a: $(libyasm_a_OBJECTS) $(libyasm_a_DEPENDENCIES) - -rm -f libyasm.a - $(libyasm_a_AR) libyasm.a $(libyasm_a_OBJECTS) $(libyasm_a_LIBADD) - $(RANLIB) libyasm.a -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) - -clean-checkPROGRAMS: - -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS) - -clean-noinstPROGRAMS: - -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) -bitvect_test$(EXEEXT): $(bitvect_test_OBJECTS) $(bitvect_test_DEPENDENCIES) - @rm -f bitvect_test$(EXEEXT) - $(LINK) $(bitvect_test_OBJECTS) $(bitvect_test_LDADD) $(LIBS) -combpath_test$(EXEEXT): $(combpath_test_OBJECTS) $(combpath_test_DEPENDENCIES) - @rm -f combpath_test$(EXEEXT) - $(LINK) $(combpath_test_OBJECTS) $(combpath_test_LDADD) $(LIBS) -floatnum_test$(EXEEXT): $(floatnum_test_OBJECTS) $(floatnum_test_DEPENDENCIES) - @rm -f floatnum_test$(EXEEXT) - $(LINK) $(floatnum_test_OBJECTS) $(floatnum_test_LDADD) $(LIBS) -genmacro$(EXEEXT): $(genmacro_OBJECTS) $(genmacro_DEPENDENCIES) - @rm -f genmacro$(EXEEXT) - $(genmacro_LINK) $(genmacro_OBJECTS) $(genmacro_LDADD) $(LIBS) -genmodule$(EXEEXT): $(genmodule_OBJECTS) $(genmodule_DEPENDENCIES) - @rm -f genmodule$(EXEEXT) - $(genmodule_LINK) $(genmodule_OBJECTS) $(genmodule_LDADD) $(LIBS) -genperf$(EXEEXT): $(genperf_OBJECTS) $(genperf_DEPENDENCIES) - @rm -f genperf$(EXEEXT) - $(genperf_LINK) $(genperf_OBJECTS) $(genperf_LDADD) $(LIBS) -genstring$(EXEEXT): $(genstring_OBJECTS) $(genstring_DEPENDENCIES) - @rm -f genstring$(EXEEXT) - $(genstring_LINK) $(genstring_OBJECTS) $(genstring_LDADD) $(LIBS) -genversion$(EXEEXT): $(genversion_OBJECTS) $(genversion_DEPENDENCIES) - @rm -f genversion$(EXEEXT) - $(genversion_LINK) $(genversion_OBJECTS) $(genversion_LDADD) $(LIBS) -leb128_test$(EXEEXT): $(leb128_test_OBJECTS) $(leb128_test_DEPENDENCIES) - @rm -f leb128_test$(EXEEXT) - $(LINK) $(leb128_test_OBJECTS) $(leb128_test_LDADD) $(LIBS) -re2c$(EXEEXT): $(re2c_OBJECTS) $(re2c_DEPENDENCIES) - @rm -f re2c$(EXEEXT) - $(re2c_LINK) $(re2c_OBJECTS) $(re2c_LDADD) $(LIBS) -splitpath_test$(EXEEXT): $(splitpath_test_OBJECTS) $(splitpath_test_DEPENDENCIES) - @rm -f splitpath_test$(EXEEXT) - $(LINK) $(splitpath_test_OBJECTS) $(splitpath_test_LDADD) $(LIBS) -test_hd$(EXEEXT): $(test_hd_OBJECTS) $(test_hd_DEPENDENCIES) - @rm -f test_hd$(EXEEXT) - $(LINK) $(test_hd_OBJECTS) $(test_hd_LDADD) $(LIBS) -uncstring_test$(EXEEXT): $(uncstring_test_OBJECTS) $(uncstring_test_DEPENDENCIES) - @rm -f uncstring_test$(EXEEXT) - $(LINK) $(uncstring_test_OBJECTS) $(uncstring_test_LDADD) $(LIBS) -yasm$(EXEEXT): $(yasm_OBJECTS) $(yasm_DEPENDENCIES) - @rm -f yasm$(EXEEXT) - $(LINK) $(yasm_OBJECTS) $(yasm_LDADD) $(LIBS) -ytasm$(EXEEXT): $(ytasm_OBJECTS) $(ytasm_DEPENDENCIES) - @rm -f ytasm$(EXEEXT) - $(LINK) $(ytasm_OBJECTS) $(ytasm_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/assocdat.Po -include ./$(DEPDIR)/bc-align.Po -include ./$(DEPDIR)/bc-data.Po -include ./$(DEPDIR)/bc-incbin.Po -include ./$(DEPDIR)/bc-org.Po -include ./$(DEPDIR)/bc-reserve.Po -include ./$(DEPDIR)/bin-objfmt.Po -include ./$(DEPDIR)/bitvect.Po -include ./$(DEPDIR)/bitvect_test.Po -include ./$(DEPDIR)/bytecode.Po -include ./$(DEPDIR)/coff-objfmt.Po -include ./$(DEPDIR)/combpath_test.Po -include ./$(DEPDIR)/cpp-preproc.Po -include ./$(DEPDIR)/cv-dbgfmt.Po -include ./$(DEPDIR)/cv-symline.Po -include ./$(DEPDIR)/cv-type.Po -include ./$(DEPDIR)/dbg-objfmt.Po -include ./$(DEPDIR)/dwarf2-aranges.Po -include ./$(DEPDIR)/dwarf2-dbgfmt.Po -include ./$(DEPDIR)/dwarf2-info.Po -include ./$(DEPDIR)/dwarf2-line.Po -include ./$(DEPDIR)/elf-objfmt.Po -include ./$(DEPDIR)/elf-x86-amd64.Po -include ./$(DEPDIR)/elf-x86-x86.Po -include ./$(DEPDIR)/elf.Po -include ./$(DEPDIR)/errwarn.Po -include ./$(DEPDIR)/expr.Po -include ./$(DEPDIR)/file.Po -include ./$(DEPDIR)/floatnum.Po -include ./$(DEPDIR)/floatnum_test.Po -include ./$(DEPDIR)/gas-parse.Po -include ./$(DEPDIR)/gas-parser.Po -include ./$(DEPDIR)/gas-token.Po -include ./$(DEPDIR)/hamt.Po -include ./$(DEPDIR)/insn.Po -include ./$(DEPDIR)/intnum.Po -include ./$(DEPDIR)/inttree.Po -include ./$(DEPDIR)/lc3barch.Po -include ./$(DEPDIR)/lc3bbc.Po -include ./$(DEPDIR)/lc3bid.Po -include ./$(DEPDIR)/leb128_test.Po -include ./$(DEPDIR)/linemap.Po -include ./$(DEPDIR)/macho-objfmt.Po -include ./$(DEPDIR)/md5.Po -include ./$(DEPDIR)/mergesort.Po -include ./$(DEPDIR)/module.Po -include ./$(DEPDIR)/nasm-eval.Po -include ./$(DEPDIR)/nasm-listfmt.Po -include ./$(DEPDIR)/nasm-parse.Po -include ./$(DEPDIR)/nasm-parser.Po -include ./$(DEPDIR)/nasm-pp.Po -include ./$(DEPDIR)/nasm-preproc.Po -include ./$(DEPDIR)/nasm-token.Po -include ./$(DEPDIR)/nasmlib.Po -include ./$(DEPDIR)/null-dbgfmt.Po -include ./$(DEPDIR)/phash.Po -include ./$(DEPDIR)/raw-preproc.Po -include ./$(DEPDIR)/rdf-objfmt.Po -include ./$(DEPDIR)/section.Po -include ./$(DEPDIR)/splitpath_test.Po -include ./$(DEPDIR)/stabs-dbgfmt.Po -include ./$(DEPDIR)/strcasecmp.Po -include ./$(DEPDIR)/strsep.Po -include ./$(DEPDIR)/symrec.Po -include ./$(DEPDIR)/tasm-options.Po -include ./$(DEPDIR)/tasm.Po -include ./$(DEPDIR)/test_hd.Po -include ./$(DEPDIR)/uncstring_test.Po -include ./$(DEPDIR)/valparam.Po -include ./$(DEPDIR)/value.Po -include ./$(DEPDIR)/win64-except.Po -include ./$(DEPDIR)/x86arch.Po -include ./$(DEPDIR)/x86bc.Po -include ./$(DEPDIR)/x86cpu.Po -include ./$(DEPDIR)/x86expr.Po -include ./$(DEPDIR)/x86id.Po -include ./$(DEPDIR)/x86regtmod.Po -include ./$(DEPDIR)/xdf-objfmt.Po -include ./$(DEPDIR)/xmalloc.Po -include ./$(DEPDIR)/xstrdup.Po -include ./$(DEPDIR)/yasm-options.Po -include ./$(DEPDIR)/yasm.Po - -.c.o: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -x86arch.o: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.o -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - -x86arch.obj: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.obj -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - -x86bc.o: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.o -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - -x86bc.obj: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.obj -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - -x86expr.o: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.o -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - -x86expr.obj: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.obj -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - -x86id.o: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.o -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - -x86id.obj: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.obj -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - -lc3barch.o: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.o -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - -lc3barch.obj: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.obj -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - -lc3bbc.o: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.o -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - -lc3bbc.obj: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.obj -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - -nasm-listfmt.o: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.o -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - -nasm-listfmt.obj: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.obj -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - -gas-parser.o: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.o -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - -gas-parser.obj: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.obj -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - -gas-parse.o: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.o -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - -gas-parse.obj: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.obj -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - -nasm-parser.o: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.o -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - -nasm-parser.obj: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.obj -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - -nasm-parse.o: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.o -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - -nasm-parse.obj: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.obj -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - -nasm-preproc.o: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.o -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - -nasm-preproc.obj: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.obj -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - -nasm-pp.o: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.o -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - -nasm-pp.obj: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.obj -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - -nasmlib.o: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.o -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - -nasmlib.obj: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.obj -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - -nasm-eval.o: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.o -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - -nasm-eval.obj: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.obj -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - -raw-preproc.o: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.o -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - -raw-preproc.obj: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.obj -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - -cpp-preproc.o: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.o -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - -cpp-preproc.obj: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.obj -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - -cv-dbgfmt.o: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.o -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - -cv-dbgfmt.obj: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.obj -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - -cv-symline.o: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.o -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - -cv-symline.obj: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.obj -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - -cv-type.o: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.o -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - -cv-type.obj: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.obj -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - -dwarf2-dbgfmt.o: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.o -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - -dwarf2-dbgfmt.obj: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.obj -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - -dwarf2-line.o: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.o -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - -dwarf2-line.obj: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.obj -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - -dwarf2-aranges.o: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.o -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - -dwarf2-aranges.obj: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.obj -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - -dwarf2-info.o: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.o -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - -dwarf2-info.obj: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.obj -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - -null-dbgfmt.o: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.o -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - -null-dbgfmt.obj: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.obj -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - -stabs-dbgfmt.o: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.o -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - -stabs-dbgfmt.obj: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.obj -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - -dbg-objfmt.o: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.o -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - -dbg-objfmt.obj: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.obj -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - -bin-objfmt.o: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.o -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - -bin-objfmt.obj: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.obj -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - -elf.o: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.o -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - -elf.obj: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.obj -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - -elf-objfmt.o: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.o -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - -elf-objfmt.obj: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.obj -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - -elf-x86-x86.o: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.o -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - -elf-x86-x86.obj: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.obj -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - -elf-x86-amd64.o: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.o -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - -elf-x86-amd64.obj: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.obj -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - -coff-objfmt.o: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.o -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - -coff-objfmt.obj: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.obj -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - -win64-except.o: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.o -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - -win64-except.obj: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.obj -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - -macho-objfmt.o: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.o -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - -macho-objfmt.obj: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.obj -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - -rdf-objfmt.o: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.o -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - -rdf-objfmt.obj: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.obj -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - -xdf-objfmt.o: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.o -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - -xdf-objfmt.obj: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.obj -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - -assocdat.o: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.o -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - -assocdat.obj: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.obj -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - -bitvect.o: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.o -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - -bitvect.obj: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.obj -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - -bc-align.o: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.o -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - -bc-align.obj: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.obj -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - -bc-data.o: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.o -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - -bc-data.obj: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.obj -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - -bc-incbin.o: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.o -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - -bc-incbin.obj: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.obj -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - -bc-org.o: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.o -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - -bc-org.obj: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.obj -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - -bc-reserve.o: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.o -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - -bc-reserve.obj: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.obj -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - -bytecode.o: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.o -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - -bytecode.obj: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.obj -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - -errwarn.o: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.o -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - -errwarn.obj: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.obj -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - -expr.o: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.o -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - -expr.obj: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.obj -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - -file.o: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.o -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - -file.obj: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.obj -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - -floatnum.o: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.o -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - -floatnum.obj: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.obj -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - -hamt.o: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.o -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - -hamt.obj: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.obj -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - -insn.o: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.o -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - -insn.obj: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.obj -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - -intnum.o: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.o -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - -intnum.obj: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.obj -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - -inttree.o: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.o -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - -inttree.obj: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.obj -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - -linemap.o: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.o -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - -linemap.obj: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.obj -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - -md5.o: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.o -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - -md5.obj: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.obj -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - -mergesort.o: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.o -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - -mergesort.obj: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.obj -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - -phash.o: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.o -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - -phash.obj: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.obj -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - -section.o: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.o -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - -section.obj: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.obj -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - -strcasecmp.o: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.o -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - -strcasecmp.obj: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.obj -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - -strsep.o: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.o -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - -strsep.obj: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.obj -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - -symrec.o: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.o -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - -symrec.obj: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.obj -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - -valparam.o: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.o -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - -valparam.obj: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.obj -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - -value.o: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.o -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - -value.obj: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.obj -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - -xmalloc.o: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.o -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - -xmalloc.obj: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.obj -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - -xstrdup.o: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.o -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - -xstrdup.obj: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.obj -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - -bitvect_test.o: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.o -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - -bitvect_test.obj: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.obj -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - -combpath_test.o: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.o -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - -combpath_test.obj: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.obj -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - -floatnum_test.o: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.o -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - -floatnum_test.obj: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.obj -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - -leb128_test.o: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.o -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - -leb128_test.obj: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.obj -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - -splitpath_test.o: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.o -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - -splitpath_test.obj: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.obj -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - -uncstring_test.o: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.o -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - -uncstring_test.obj: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.obj -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - -yasm.o: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.o -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - -yasm.obj: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.obj -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - -yasm-options.o: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.o -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - -yasm-options.obj: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.obj -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - -tasm.o: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.o -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - -tasm.obj: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.obj -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - -tasm-options.o: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.o -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - -tasm-options.obj: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.obj -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` -install-man1: $(man1_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \ - done -uninstall-man1: - @$(NORMAL_UNINSTALL) - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man1dir)/$$inst"; \ - done -install-man7: $(man7_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \ - done -uninstall-man7: - @$(NORMAL_UNINSTALL) - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man7dir)/$$inst"; \ - done -install-includeHEADERS: $(include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done -install-modincludeHEADERS: $(modinclude_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(modincludedir)" || $(MKDIR_P) "$(DESTDIR)$(modincludedir)" - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(modincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(modincludedir)/$$f'"; \ - $(modincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(modincludedir)/$$f"; \ - done - -uninstall-modincludeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(modincludedir)/$$f'"; \ - rm -f "$(DESTDIR)$(modincludedir)/$$f"; \ - done -install-nodist_includeHEADERS: $(nodist_include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(nodist_includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(nodist_includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-nodist_includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -$(RECURSIVE_CLEAN_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; ws='[ ]'; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - echo "XPASS: $$tst"; \ - ;; \ - *) \ - echo "PASS: $$tst"; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xfail=`expr $$xfail + 1`; \ - echo "XFAIL: $$tst"; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - echo "FAIL: $$tst"; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - echo "SKIP: $$tst"; \ - fi; \ - done; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="All $$all tests passed"; \ - else \ - banner="All $$all tests behaved as expected ($$xfail expected failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all tests failed"; \ - else \ - banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - skipped="($$skip tests were not run)"; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - echo "$$dashes"; \ - echo "$$banner"; \ - test -z "$$skipped" || echo "$$skipped"; \ - test -z "$$report" || echo "$$report"; \ - echo "$$dashes"; \ - test "$$failed" -eq 0; \ - else :; fi - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d $(distdir) || mkdir $(distdir) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r $(distdir) -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 - $(am__remove_distdir) - -dist-lzma: distdir - tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma - $(am__remove_distdir) - -dist-tarZ: distdir - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__remove_distdir) - -dist-shar: distdir - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__remove_distdir) - -dist dist-all: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lzma*) \ - unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir); chmod a+w $(distdir) - mkdir $(distdir)/_build - mkdir $(distdir)/_inst - chmod a-w $(distdir) - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && cd $(distdir)/_build \ - && ../configure --srcdir=.. --prefix="$$dc_install_base" \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck - $(am__remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @cd $(distuninstallcheck_dir) \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) check-recursive -all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(MANS) $(HEADERS) config.h \ - all-local -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" "$(DESTDIR)$(includedir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-local distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-includeHEADERS install-man \ - install-modincludeHEADERS install-nodist_includeHEADERS - -install-dvi: install-dvi-recursive - -install-exec-am: install-binPROGRAMS install-libLIBRARIES - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) install-exec-hook - -install-html: install-html-recursive - -install-info: install-info-recursive - -install-man: install-man1 install-man7 - -install-pdf: install-pdf-recursive - -install-ps: install-ps-recursive - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-includeHEADERS \ - uninstall-libLIBRARIES uninstall-man \ - uninstall-modincludeHEADERS uninstall-nodist_includeHEADERS - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) uninstall-hook - -uninstall-man: uninstall-man1 uninstall-man7 - -.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ - install-exec-am install-strip uninstall-am - -.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ - all all-am all-local am--refresh check check-TESTS check-am \ - clean clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS ctags ctags-recursive \ - dist dist-all dist-bzip2 dist-gzip dist-lzma dist-shar \ - dist-tarZ dist-zip distcheck distclean distclean-compile \ - distclean-generic distclean-hdr distclean-local distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-exec-hook install-html \ - install-html-am install-includeHEADERS install-info \ - install-info-am install-libLIBRARIES install-man install-man1 \ - install-man7 install-modincludeHEADERS \ - install-nodist_includeHEADERS install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-binPROGRAMS uninstall-hook \ - uninstall-includeHEADERS uninstall-libLIBRARIES uninstall-man \ - uninstall-man1 uninstall-man7 uninstall-modincludeHEADERS \ - uninstall-nodist_includeHEADERS - - -re2c-main.$(OBJEXT): tools/re2c/main.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/main.c || echo '$(srcdir)/'`tools/re2c/main.c - -re2c-code.$(OBJEXT): tools/re2c/code.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/code.c || echo '$(srcdir)/'`tools/re2c/code.c - -re2c-dfa.$(OBJEXT): tools/re2c/dfa.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/dfa.c || echo '$(srcdir)/'`tools/re2c/dfa.c - -re2c-parser.$(OBJEXT): tools/re2c/parser.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/parser.c || echo '$(srcdir)/'`tools/re2c/parser.c - -re2c-actions.$(OBJEXT): tools/re2c/actions.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/actions.c || echo '$(srcdir)/'`tools/re2c/actions.c - -re2c-scanner.$(OBJEXT): tools/re2c/scanner.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/scanner.c || echo '$(srcdir)/'`tools/re2c/scanner.c - -re2c-mbo_getopt.$(OBJEXT): tools/re2c/mbo_getopt.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/mbo_getopt.c || echo '$(srcdir)/'`tools/re2c/mbo_getopt.c - -re2c-substr.$(OBJEXT): tools/re2c/substr.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/substr.c || echo '$(srcdir)/'`tools/re2c/substr.c - -re2c-translate.$(OBJEXT): tools/re2c/translate.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/translate.c || echo '$(srcdir)/'`tools/re2c/translate.c - -genmacro.$(OBJEXT): tools/genmacro/genmacro.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genmacro/genmacro.c || echo '$(srcdir)/'`tools/genmacro/genmacro.c -.gperf.c: genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $< $@ - -genperf.$(OBJEXT): tools/genperf/genperf.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/genperf.c || echo '$(srcdir)/'`tools/genperf/genperf.c - -gp-perfect.$(OBJEXT): tools/genperf/perfect.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/perfect.c || echo '$(srcdir)/'`tools/genperf/perfect.c - -gp-phash.$(OBJEXT): libyasm/phash.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/phash.c || echo '$(srcdir)/'`libyasm/phash.c - -gp-xmalloc.$(OBJEXT): libyasm/xmalloc.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xmalloc.c || echo '$(srcdir)/'`libyasm/xmalloc.c - -gp-xstrdup.$(OBJEXT): libyasm/xstrdup.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xstrdup.c || echo '$(srcdir)/'`libyasm/xstrdup.c - -# Use Pyxelator to generate Pyrex function headers. -#_yasm.pxi: ${HEADERS} -# @rm -rf .tmp -# @mkdir .tmp -# $(PYTHON) $(srcdir)/tools/python-yasm/pyxelator/wrap_yasm.py \ -# "YASM_DIR=${srcdir}" "CPP=${CPP}" "CPPFLAGS=${CPPFLAGS}" -# @rm -rf .tmp - -# Need to build a local copy of the main Pyrex input file to include _yasm.pxi -# from the build directory. Also need to fixup the other .pxi include paths. -#yasm.pyx: $(srcdir)/tools/python-yasm/yasm.pyx -# sed -e 's,^include "\([^_]\),include "${srcdir}/tools/python-yasm/\1,' \ -# $(srcdir)/tools/python-yasm/yasm.pyx > $@ - -# Actually run Pyrex -#yasm_python.c: yasm.pyx _yasm.pxi $(PYBINDING_DEPS) -# $(PYTHON) -c "from Pyrex.Compiler.Main import main; main(command_line=1)" \ -# -o $@ yasm.pyx - -# Now the Python build magic... -#python-setup.txt: Makefile -# echo "includes=${DEFS} ${DEFAULT_INCLUDES} ${INCLUDES} ${AM_CPPFLAGS} ${CPPFLAGS}" > python-setup.txt -# echo "sources=${libyasm_a_SOURCES}" >> python-setup.txt -# echo "srcdir=${srcdir}" >> python-setup.txt -# echo "gcc=${GCC}" >> python-setup.txt - -#.python-build: python-setup.txt yasm_python.c ${libyasm_a_SOURCES} -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py build -# touch .python-build -#python-build: .python-build - -#python-install: .python-build -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py install "--install-lib=$(DESTDIR)$(pythondir)" - -#python-uninstall: -# rm -f `$(PYTHON) -c "import sys;sys.path.insert(0, '${DESTDIR}${pythondir}'); import yasm; print yasm.__file__"` - -python-build: -python-install: -python-uninstall: - -modules/arch/x86/x86id.c: x86insn_nasm.c x86insn_gas.c x86insns.c - -x86insn_nasm.gperf x86insn_gas.gperf x86insns.c: $(srcdir)/modules/arch/x86/gen_x86_insn.py - $(PYTHON) $(srcdir)/modules/arch/x86/gen_x86_insn.py -#x86insn_nasm.gperf: $(srcdir)/x86insn_nasm.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_nasm.gperf $@ -#x86insn_gas.gperf: $(srcdir)/x86insn_gas.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_gas.gperf $@ - -# Use suffix rules for gperf files -x86insn_nasm.c: x86insn_nasm.gperf genperf$(EXEEXT) -x86insn_gas.c: x86insn_gas.gperf genperf$(EXEEXT) -x86cpu.c: $(srcdir)/modules/arch/x86/x86cpu.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86cpu.gperf $@ -x86regtmod.c: $(srcdir)/modules/arch/x86/x86regtmod.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86regtmod.gperf $@ - -lc3bid.c: $(srcdir)/modules/arch/lc3b/lc3bid.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -s -o $@ $(srcdir)/modules/arch/lc3b/lc3bid.re - -yasm_arch.7: modules/arch/yasm_arch.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/arch/yasm_arch.xml - -#EXTRA_DIST += modules/listfmts/nasm/tests/Makefile.inc - -#include modules/listfmts/nasm/tests/Makefile.inc - -gas-token.c: $(srcdir)/modules/parsers/gas/gas-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/gas/gas-token.re - -nasm-token.c: $(srcdir)/modules/parsers/nasm/nasm-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/nasm/nasm-token.re - -$(top_srcdir)/modules/parsers/nasm/nasm-parser.c: nasm-macros.c - -nasm-macros.c: $(srcdir)/modules/parsers/nasm/nasm-std.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_standard_mac $(srcdir)/modules/parsers/nasm/nasm-std.mac - -yasm_parsers.7: modules/parsers/yasm_parsers.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/parsers/yasm_parsers.xml - -$(top_srcdir)/modules/preprocs/nasm/nasm-preproc.c: nasm-version.c - -nasm-version.c: version.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_version_mac version.mac - -version.mac: genversion$(EXEEXT) - $(top_builddir)/genversion$(EXEEXT) $@ - -genversion.$(OBJEXT): modules/preprocs/nasm/genversion.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f modules/preprocs/nasm/genversion.c || echo '$(srcdir)/'`modules/preprocs/nasm/genversion.c - -#EXTRA_DIST += modules/dbgfmts/codeview/tests/Makefile.inc -#include modules/dbgfmts/codeview/tests/Makefile.inc - -yasm_dbgfmts.7: modules/dbgfmts/yasm_dbgfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/dbgfmts/yasm_dbgfmts.xml - -$(top_srcdir)/modules/objfmts/coff/coff-objfmt.c: win64-nasm.c win64-gas.c - -win64-nasm.c: $(srcdir)/modules/objfmts/coff/win64-nasm.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_nasm_stdmac $(srcdir)/modules/objfmts/coff/win64-nasm.mac - -win64-gas.c: $(srcdir)/modules/objfmts/coff/win64-gas.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_gas_stdmac $(srcdir)/modules/objfmts/coff/win64-gas.mac - -yasm_objfmts.7: modules/objfmts/yasm_objfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/objfmts/yasm_objfmts.xml - -module.c: $(top_srcdir)/libyasm/module.in genmodule$(EXEEXT) Makefile - $(top_builddir)/genmodule$(EXEEXT) $(top_srcdir)/libyasm/module.in Makefile - -genmodule.$(OBJEXT): libyasm/genmodule.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/genmodule.c || echo '$(srcdir)/'`libyasm/genmodule.c - -yasm.1: frontends/yasm/yasm.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/frontends/yasm/yasm.xml - -$(srcdir)/frontends/yasm/yasm.c: license.c - -license.c: $(srcdir)/COPYING genstring$(EXEEXT) - $(top_builddir)/genstring$(EXEEXT) license_msg $@ $(srcdir)/COPYING - -distclean-local: - -rm -rf results - -rm -rf build - -all-local: python-build -install-exec-hook: python-install -uninstall-hook: python-uninstall - -genstring.$(OBJEXT): genstring.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f genstring.c || echo '$(srcdir)/'`genstring.c -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/packager/third_party/yasm/source/config/linux/config.h b/packager/third_party/yasm/source/config/linux/config.h deleted file mode 100644 index 9e3653957b..0000000000 --- a/packager/third_party/yasm/source/config/linux/config.h +++ /dev/null @@ -1,173 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Command name to run C preprocessor */ -#define CPP_PROG "gcc -E" - -/* */ -#define ENABLE_NLS 1 - -/* Define to 1 if you have the `abort' function. */ -#define HAVE_ABORT 1 - -/* */ -/* #undef HAVE_CATGETS */ - -/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the - CoreFoundation framework. */ -/* #undef HAVE_CFLOCALECOPYCURRENT */ - -/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in - the CoreFoundation framework. */ -/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */ - -/* Define if the GNU dcgettext() function is already present or preinstalled. - */ -#define HAVE_DCGETTEXT 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DIRECT_H */ - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* */ -#define HAVE_GETTEXT 1 - -/* Define to 1 if you have the GNU C Library */ -#define HAVE_GNU_C_LIBRARY 1 - -/* Define if you have the iconv() function and it works. */ -/* #undef HAVE_ICONV */ - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* */ -/* #undef HAVE_LC_MESSAGES */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mergesort' function. */ -/* #undef HAVE_MERGESORT */ - -/* Define to 1 if you have the `popen' function. */ -#define HAVE_POPEN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* */ -/* #undef HAVE_STPCPY */ - -/* Define to 1 if you have the `strcasecmp' function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the `strcmpi' function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the `stricmp' function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the `strsep' function. */ -#define HAVE_STRSEP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the `toascii' function. */ -#define HAVE_TOASCII 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to 1 if you have the `_stricmp' function. */ -/* #undef HAVE__STRICMP */ - -/* Name of package */ -#define PACKAGE "yasm" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "bug-yasm@tortall.net" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "yasm" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "yasm 1.2.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "yasm" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2.0" - -/* Define to 1 if the C compiler supports function prototypes. */ -#define PROTOTYPES 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void*', as computed by sizeof. */ -/* #undef SIZEOF_VOIDP */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.2.0" - -/* Define if using the dmalloc debugging malloc package */ -/* #undef WITH_DMALLOC */ - -/* Define like PROTOTYPES; this can be used by system headers. */ -#define __PROTOTYPES 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/packager/third_party/yasm/source/config/linux/libyasm-stdint.h b/packager/third_party/yasm/source/config/linux/libyasm-stdint.h deleted file mode 100644 index 357610e1c1..0000000000 --- a/packager/third_party/yasm/source/config/linux/libyasm-stdint.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _YASM_LIBYASM_STDINT_H -#define _YASM_LIBYASM_STDINT_H 1 -#ifndef _GENERATED_STDINT_H -#define _GENERATED_STDINT_H "yasm HEAD" -/* generated using gcc -std=gnu99 */ -#define _STDINT_HAVE_STDINT_H 1 -#include -#endif -#endif diff --git a/packager/third_party/yasm/source/config/mac/Makefile b/packager/third_party/yasm/source/config/mac/Makefile deleted file mode 100644 index 770352550d..0000000000 --- a/packager/third_party/yasm/source/config/mac/Makefile +++ /dev/null @@ -1,3822 +0,0 @@ -# Makefile.in generated by automake 1.10.1 from Makefile.am. -# Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - -# $Id: Makefile.am 2184 2009-03-24 05:04:15Z peter $ - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# $Id: Makefile.inc 1718 2006-12-24 00:13:19Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1939 2007-09-10 07:15:50Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1948 2007-09-13 02:53:30Z peter $ - -# $Id: Makefile.inc 1951 2007-09-14 05:19:10Z peter $ - -# $Id: Makefile.inc 1598 2006-08-10 04:02:59Z peter $ - -# $Id: Makefile.inc 1914 2007-08-20 05:13:35Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2170 2009-01-14 08:28:13Z peter $ - -# $Id: Makefile.inc 2193 2009-04-04 23:03:41Z peter $ - -# $Id: Makefile.inc 1776 2007-02-19 02:36:10Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 1928 2007-09-07 22:03:34Z peter $ - -# $Id: Makefile.inc 1152 2004-10-02 06:18:30Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1783 2007-02-22 03:40:31Z peter $ - -# $Id: Makefile.inc 2169 2009-01-02 20:46:57Z peter $ - -# $Id$ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2191 2009-03-25 03:42:05Z peter $ - -# $Id: Makefile.inc 1137 2004-09-04 01:24:57Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 1966 2007-09-20 03:54:36Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2185 2009-03-24 06:33:32Z peter $ - -# $Id: Makefile.inc 2172 2009-01-27 06:38:14Z peter $ - -# $Id: Makefile.inc 2176 2009-03-04 07:39:02Z peter $ - -# Makefile for cpp module. -# Copied from raw preprocessor module. - -# $Id: Makefile.inc 1662 2006-10-21 18:52:29Z peter $ - -# $Id: Makefile.inc 1428 2006-03-27 02:15:19Z peter $ - -# $Id: Makefile.inc 1378 2006-02-12 01:27:39Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id$ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 1252 2005-09-28 05:50:51Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 2126 2008-10-03 08:13:00Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 1168 2004-10-31 01:07:52Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1777 2007-02-19 08:21:17Z peter $ - -# $Id: Makefile.inc 1782 2007-02-21 06:45:39Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1854 2007-05-31 06:16:49Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 1331 2006-01-15 22:48:55Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2120 2008-09-04 04:45:30Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2123 2008-09-30 03:56:37Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - - - -VPATH = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -pkgdatadir = $(datadir)/yasm -pkglibdir = $(libdir)/yasm -pkgincludedir = $(includedir)/yasm -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = i686-apple-darwin9.8.0 -host_triplet = i686-apple-darwin9.8.0 -bin_PROGRAMS = yasm$(EXEEXT) ytasm$(EXEEXT) -TESTS = $(am__append_3) modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/xdf/tests/xdf_test.sh bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) libyasm/tests/libyasm_test.sh -noinst_PROGRAMS = genstring$(EXEEXT) re2c$(EXEEXT) genmacro$(EXEEXT) \ - genperf$(EXEEXT) genversion$(EXEEXT) genmodule$(EXEEXT) -check_PROGRAMS = test_hd$(EXEEXT) bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) -DIST_COMMON = README $(am__configure_deps) $(dist_man_MANS) \ - $(include_HEADERS) $(modinclude_HEADERS) $(noinst_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/frontends/Makefile.inc \ - $(srcdir)/frontends/tasm/Makefile.inc \ - $(srcdir)/frontends/yasm/Makefile.inc \ - $(srcdir)/libyasm/Makefile.inc \ - $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/m4/Makefile.inc \ - $(srcdir)/modules/Makefile.inc \ - $(srcdir)/modules/arch/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/Makefile.inc \ - $(srcdir)/modules/dbgfmts/codeview/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/null/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc \ - $(srcdir)/modules/listfmts/Makefile.inc \ - $(srcdir)/modules/listfmts/nasm/Makefile.inc \ - $(srcdir)/modules/objfmts/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/dbg/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc \ - $(srcdir)/modules/parsers/Makefile.inc \ - $(srcdir)/modules/parsers/gas/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc \ - $(srcdir)/modules/preprocs/Makefile.inc \ - $(srcdir)/modules/preprocs/cpp/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/tests/Makefile.inc \ - $(srcdir)/tools/Makefile.inc \ - $(srcdir)/tools/genmacro/Makefile.inc \ - $(srcdir)/tools/genperf/Makefile.inc \ - $(srcdir)/tools/python-yasm/Makefile.inc \ - $(srcdir)/tools/python-yasm/tests/Makefile.inc \ - $(srcdir)/tools/re2c/Makefile.inc $(top_srcdir)/configure \ - ABOUT-NLS AUTHORS COPYING ChangeLog INSTALL NEWS \ - config/config.guess config/config.rpath config/config.sub \ - config/depcomp config/install-sh config/ltmain.sh \ - config/missing -#am__append_1 = _yasm.pxi yasm.pyx \ -# yasm_python.c python-setup.txt \ -# .python-build -#am__append_2 = PYTHON=${PYTHON} -#am__append_3 = tools/python-yasm/tests/python_test.sh -#am__append_4 = $(dist_man_MANS) -subdir = . -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_create_stdint_h.m4 \ - $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \ - $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ - $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/nls.m4 \ - $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ - $(top_srcdir)/m4/pyrex.m4 $(top_srcdir)/m4/pythonhead.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \ - "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" \ - "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" \ - "$(DESTDIR)$(includedir)" -libLIBRARIES_INSTALL = $(INSTALL_DATA) -LIBRARIES = $(lib_LIBRARIES) -AR = ar -ARFLAGS = cru -libyasm_a_AR = $(AR) $(ARFLAGS) -libyasm_a_LIBADD = -am_libyasm_a_OBJECTS = x86arch.$(OBJEXT) x86bc.$(OBJEXT) \ - x86expr.$(OBJEXT) x86id.$(OBJEXT) lc3barch.$(OBJEXT) \ - lc3bbc.$(OBJEXT) nasm-listfmt.$(OBJEXT) gas-parser.$(OBJEXT) \ - gas-parse.$(OBJEXT) nasm-parser.$(OBJEXT) nasm-parse.$(OBJEXT) \ - nasm-preproc.$(OBJEXT) nasm-pp.$(OBJEXT) nasmlib.$(OBJEXT) \ - nasm-eval.$(OBJEXT) raw-preproc.$(OBJEXT) \ - cpp-preproc.$(OBJEXT) cv-dbgfmt.$(OBJEXT) cv-symline.$(OBJEXT) \ - cv-type.$(OBJEXT) dwarf2-dbgfmt.$(OBJEXT) \ - dwarf2-line.$(OBJEXT) dwarf2-aranges.$(OBJEXT) \ - dwarf2-info.$(OBJEXT) null-dbgfmt.$(OBJEXT) \ - stabs-dbgfmt.$(OBJEXT) dbg-objfmt.$(OBJEXT) \ - bin-objfmt.$(OBJEXT) elf.$(OBJEXT) elf-objfmt.$(OBJEXT) \ - elf-x86-x86.$(OBJEXT) elf-x86-amd64.$(OBJEXT) \ - coff-objfmt.$(OBJEXT) win64-except.$(OBJEXT) \ - macho-objfmt.$(OBJEXT) rdf-objfmt.$(OBJEXT) \ - xdf-objfmt.$(OBJEXT) assocdat.$(OBJEXT) bitvect.$(OBJEXT) \ - bc-align.$(OBJEXT) bc-data.$(OBJEXT) bc-incbin.$(OBJEXT) \ - bc-org.$(OBJEXT) bc-reserve.$(OBJEXT) bytecode.$(OBJEXT) \ - errwarn.$(OBJEXT) expr.$(OBJEXT) file.$(OBJEXT) \ - floatnum.$(OBJEXT) hamt.$(OBJEXT) insn.$(OBJEXT) \ - intnum.$(OBJEXT) inttree.$(OBJEXT) linemap.$(OBJEXT) \ - md5.$(OBJEXT) mergesort.$(OBJEXT) phash.$(OBJEXT) \ - section.$(OBJEXT) strcasecmp.$(OBJEXT) strsep.$(OBJEXT) \ - symrec.$(OBJEXT) valparam.$(OBJEXT) value.$(OBJEXT) \ - xmalloc.$(OBJEXT) xstrdup.$(OBJEXT) -nodist_libyasm_a_OBJECTS = x86cpu.$(OBJEXT) x86regtmod.$(OBJEXT) \ - lc3bid.$(OBJEXT) gas-token.$(OBJEXT) nasm-token.$(OBJEXT) \ - module.$(OBJEXT) -libyasm_a_OBJECTS = $(am_libyasm_a_OBJECTS) \ - $(nodist_libyasm_a_OBJECTS) -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) -am_bitvect_test_OBJECTS = bitvect_test.$(OBJEXT) -bitvect_test_OBJECTS = $(am_bitvect_test_OBJECTS) -am__DEPENDENCIES_1 = -bitvect_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_combpath_test_OBJECTS = combpath_test.$(OBJEXT) -combpath_test_OBJECTS = $(am_combpath_test_OBJECTS) -combpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_floatnum_test_OBJECTS = floatnum_test.$(OBJEXT) -floatnum_test_OBJECTS = $(am_floatnum_test_OBJECTS) -floatnum_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_genmacro_OBJECTS = -genmacro_OBJECTS = $(am_genmacro_OBJECTS) -genmacro_DEPENDENCIES = genmacro.$(OBJEXT) -am_genmodule_OBJECTS = -genmodule_OBJECTS = $(am_genmodule_OBJECTS) -genmodule_DEPENDENCIES = genmodule.$(OBJEXT) -am_genperf_OBJECTS = -genperf_OBJECTS = $(am_genperf_OBJECTS) -genperf_DEPENDENCIES = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -am_genstring_OBJECTS = -genstring_OBJECTS = $(am_genstring_OBJECTS) -genstring_DEPENDENCIES = genstring.$(OBJEXT) -am_genversion_OBJECTS = -genversion_OBJECTS = $(am_genversion_OBJECTS) -genversion_DEPENDENCIES = genversion.$(OBJEXT) -am_leb128_test_OBJECTS = leb128_test.$(OBJEXT) -leb128_test_OBJECTS = $(am_leb128_test_OBJECTS) -leb128_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_re2c_OBJECTS = -re2c_OBJECTS = $(am_re2c_OBJECTS) -re2c_DEPENDENCIES = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -am_splitpath_test_OBJECTS = splitpath_test.$(OBJEXT) -splitpath_test_OBJECTS = $(am_splitpath_test_OBJECTS) -splitpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_test_hd_OBJECTS = test_hd.$(OBJEXT) -test_hd_OBJECTS = $(am_test_hd_OBJECTS) -test_hd_LDADD = $(LDADD) -am_uncstring_test_OBJECTS = uncstring_test.$(OBJEXT) -uncstring_test_OBJECTS = $(am_uncstring_test_OBJECTS) -uncstring_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_yasm_OBJECTS = yasm.$(OBJEXT) yasm-options.$(OBJEXT) -yasm_OBJECTS = $(am_yasm_OBJECTS) -yasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_ytasm_OBJECTS = tasm.$(OBJEXT) tasm-options.$(OBJEXT) -ytasm_OBJECTS = $(am_ytasm_OBJECTS) -ytasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -I$(srcdir) -depcomp = $(SHELL) $(top_srcdir)/config/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -SOURCES = $(libyasm_a_SOURCES) $(nodist_libyasm_a_SOURCES) \ - $(bitvect_test_SOURCES) $(combpath_test_SOURCES) \ - $(floatnum_test_SOURCES) $(genmacro_SOURCES) \ - $(genmodule_SOURCES) $(genperf_SOURCES) $(genstring_SOURCES) \ - $(genversion_SOURCES) $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -DIST_SOURCES = $(libyasm_a_SOURCES) $(bitvect_test_SOURCES) \ - $(combpath_test_SOURCES) $(floatnum_test_SOURCES) \ - $(genmacro_SOURCES) $(genmodule_SOURCES) $(genperf_SOURCES) \ - $(genstring_SOURCES) $(genversion_SOURCES) \ - $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-dvi-recursive install-exec-recursive \ - install-html-recursive install-info-recursive \ - install-pdf-recursive install-ps-recursive install-recursive \ - installcheck-recursive installdirs-recursive pdf-recursive \ - ps-recursive uninstall-recursive -man1dir = $(mandir)/man1 -man7dir = $(mandir)/man7 -NROFF = nroff -MANS = $(dist_man_MANS) -includeHEADERS_INSTALL = $(INSTALL_HEADER) -modincludeHEADERS_INSTALL = $(INSTALL_HEADER) -nodist_includeHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(include_HEADERS) $(modinclude_HEADERS) \ - $(nodist_include_HEADERS) $(noinst_HEADERS) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - { test ! -d $(distdir) \ - || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr $(distdir); }; } -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -distuninstallcheck_listfiles = find . -type f -print -distcleancheck_listfiles = find . -type f -print -ACLOCAL = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run aclocal-1.10 -AMTAR = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run tar -ARCH = x86 -AUTOCONF = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run autoconf -AUTOHEADER = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run autoheader -AUTOMAKE = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run automake-1.10 -AWK = awk -CC = gcc -std=gnu99 -CCDEPMODE = depmode=gcc3 -CCLD_FOR_BUILD = gcc -std=gnu99 -CC_FOR_BUILD = gcc -std=gnu99 -CFLAGS = -g -O2 -CPP = gcc -E -CPPFLAGS = -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -ECHO_C = \c -ECHO_N = -ECHO_T = -EGREP = /usr/bin/grep -E -EXEEXT = -GCC = yes -GMSGFMT = /opt/local/bin/msgfmt -GMSGFMT_015 = /opt/local/bin/msgfmt -GREP = /usr/bin/grep -HOST_CC = gcc -std=gnu99 -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTLLIBS = -INTL_MACOSX_LIBS = -Wl,-framework -Wl,CoreFoundation -LDFLAGS = -LIBICONV = -liconv -LIBINTL = -LIBOBJS = -LIBS = -LN_S = ln -s -LTLIBICONV = -liconv -LTLIBINTL = -LTLIBOBJS = -MAINT = # -MAKEINFO = ${SHELL} /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/missing --run makeinfo -MKDIR_P = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/install-sh -c -d -MORE_CFLAGS = -MSGFMT = /opt/local/bin/msgfmt -MSGFMT_015 = /opt/local/bin/msgfmt -MSGMERGE = /opt/local/bin/msgmerge -OBJEXT = o -PACKAGE = yasm -PACKAGE_BUGREPORT = bug-yasm@tortall.net -PACKAGE_NAME = yasm -PACKAGE_STRING = yasm 0.8.0 -PACKAGE_TARNAME = yasm -PACKAGE_VERSION = 0.8.0 -PATH_SEPARATOR = : -POSUB = -PYTHON = /usr/bin/python -PYTHON_EXEC_PREFIX = ${exec_prefix} -PYTHON_INCLUDES = -PYTHON_PLATFORM = darwin -PYTHON_PREFIX = ${prefix} -PYTHON_VERSION = 2.5 -RANLIB = ranlib -SET_MAKE = -SHELL = /bin/sh -STRIP = -USE_NLS = no -VERSION = 0.8.0 -XGETTEXT = /opt/local/bin/xgettext -XGETTEXT_015 = /opt/local/bin/xgettext -XMLTO = : -abs_builddir = /tmp/yasm -abs_srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -abs_top_builddir = /tmp/yasm -abs_top_srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -ac_ct_CC = gcc -am__include = include -am__leading_dot = . -am__quote = -am__tar = ${AMTAR} chof - "$$tardir" -am__untar = ${AMTAR} xf - -bindir = ${exec_prefix}/bin -build = i686-apple-darwin9.8.0 -build_alias = -build_cpu = i686 -build_os = darwin9.8.0 -build_vendor = apple -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} -dvidir = ${docdir} -exec_prefix = ${prefix} -host = i686-apple-darwin9.8.0 -host_alias = -host_cpu = i686 -host_os = darwin9.8.0 -host_vendor = apple -htmldir = ${docdir} -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mkdir_p = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm/config/install-sh -c -d -oldincludedir = /usr/include -pdfdir = ${docdir} -pkgpyexecdir = ${pyexecdir}/yasm -pkgpythondir = ${pythondir}/yasm -prefix = /usr/local -program_transform_name = s,x,x, -psdir = ${docdir} -pyexecdir = /Library/Python/2.5/site-packages -pythondir = /Library/Python/2.5/site-packages -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -sysconfdir = ${prefix}/etc -target_alias = -top_builddir = . -top_srcdir = /workspace/ajwong/git-chrome/src/third_party/yasm/source/patched-yasm -SUBDIRS = po . -AM_YFLAGS = -d -AM_CFLAGS = - -#!include modules/objfmts/omf/Makefile.inc -dist_man_MANS = yasm_arch.7 yasm_parsers.7 yasm_dbgfmts.7 \ - yasm_objfmts.7 yasm.1 -TESTS_ENVIRONMENT = $(am__append_2) -test_hd_SOURCES = test_hd.c -include_HEADERS = libyasm.h -nodist_include_HEADERS = libyasm-stdint.h -noinst_HEADERS = util.h -BUILT_SOURCES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - x86insn_nasm.c x86insn_gas.c gas-token.c nasm-token.c \ - nasm-macros.c nasm-version.c version.mac win64-nasm.c \ - win64-gas.c license.c -MAINTAINERCLEANFILES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - $(am__append_4) - -# Until this gets fixed in automake -DISTCLEANFILES = libyasm/stamp-h libyasm/stamp-h[0-9]* - -# Suffix rule for genperf -SUFFIXES = .gperf - -# configure.lineno doesn't clean up after itself? -CLEANFILES = configure.lineno $(am__append_1) x86insn_nasm.c \ - x86insn_gas.c x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c nasm-macros.c nasm-version.c version.mac \ - win64-nasm.c win64-gas.c module.c license.c - -# automake doesn't distribute mkinstalldirs? -#!EXTRA_DIST += modules/objfmts/omf/Makefile.inc -EXTRA_DIST = config/config.rpath config/mkinstalldirs \ - tools/Makefile.inc libyasm/Makefile.inc modules/Makefile.inc \ - frontends/Makefile.inc tools/re2c/Makefile.inc \ - tools/genmacro/Makefile.inc tools/genperf/Makefile.inc \ - tools/python-yasm/Makefile.inc tools/re2c/main.c \ - tools/re2c/basics.h tools/re2c/globals.h tools/re2c/ins.h \ - tools/re2c/re.h tools/re2c/token.h tools/re2c/code.c \ - tools/re2c/dfa.h tools/re2c/dfa.c tools/re2c/parse.h \ - tools/re2c/parser.h tools/re2c/parser.c tools/re2c/actions.c \ - tools/re2c/scanner.h tools/re2c/scanner.c \ - tools/re2c/mbo_getopt.h tools/re2c/mbo_getopt.c \ - tools/re2c/substr.h tools/re2c/substr.c tools/re2c/translate.c \ - tools/re2c/CHANGELOG tools/re2c/NO_WARRANTY tools/re2c/README \ - tools/re2c/scanner.re tools/re2c/re2c.1 \ - tools/re2c/bootstrap/scanner.c tools/re2c/doc/loplas.ps.gz \ - tools/re2c/doc/sample.bib tools/re2c/examples/basemmap.c \ - tools/re2c/examples/c.re tools/re2c/examples/cmmap.re \ - tools/re2c/examples/cnokw.re tools/re2c/examples/cunroll.re \ - tools/re2c/examples/modula.re tools/re2c/examples/repeater.re \ - tools/re2c/examples/sample.re tools/re2c/examples/simple.re \ - tools/re2c/examples/rexx/README \ - tools/re2c/examples/rexx/rexx.l \ - tools/re2c/examples/rexx/scanio.c tools/genmacro/genmacro.c \ - tools/genperf/genperf.c tools/genperf/perfect.c \ - tools/genperf/perfect.h tools/genperf/standard.h \ - tools/python-yasm/pyxelator/cparse.py \ - tools/python-yasm/pyxelator/genpyx.py \ - tools/python-yasm/pyxelator/ir.py \ - tools/python-yasm/pyxelator/lexer.py \ - tools/python-yasm/pyxelator/node.py \ - tools/python-yasm/pyxelator/parse_core.py \ - tools/python-yasm/pyxelator/work_unit.py \ - tools/python-yasm/pyxelator/wrap_yasm.py \ - tools/python-yasm/setup.py tools/python-yasm/yasm.pyx \ - $(PYBINDING_DEPS) tools/python-yasm/tests/Makefile.inc \ - tools/python-yasm/tests/python_test.sh \ - tools/python-yasm/tests/__init__.py \ - tools/python-yasm/tests/test_bytecode.py \ - tools/python-yasm/tests/test_expr.py \ - tools/python-yasm/tests/test_intnum.py \ - tools/python-yasm/tests/test_symrec.py \ - modules/arch/Makefile.inc modules/listfmts/Makefile.inc \ - modules/parsers/Makefile.inc modules/preprocs/Makefile.inc \ - modules/objfmts/Makefile.inc modules/arch/x86/Makefile.inc \ - modules/arch/lc3b/Makefile.inc \ - modules/arch/x86/gen_x86_insn.py x86insns.c x86insn_nasm.gperf \ - x86insn_gas.gperf modules/arch/x86/x86cpu.gperf \ - modules/arch/x86/x86regtmod.gperf \ - modules/arch/x86/tests/Makefile.inc \ - modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gen-fma-test.py \ - modules/arch/x86/tests/addbyte.asm \ - modules/arch/x86/tests/addbyte.errwarn \ - modules/arch/x86/tests/addbyte.hex \ - modules/arch/x86/tests/addrop.asm \ - modules/arch/x86/tests/addrop.errwarn \ - modules/arch/x86/tests/addrop.hex \ - modules/arch/x86/tests/addrop-err.asm \ - modules/arch/x86/tests/addrop-err.errwarn \ - modules/arch/x86/tests/aes.asm modules/arch/x86/tests/aes.hex \ - modules/arch/x86/tests/amd200707.asm \ - modules/arch/x86/tests/amd200707.hex \ - modules/arch/x86/tests/arithsmall.asm \ - modules/arch/x86/tests/arithsmall.errwarn \ - modules/arch/x86/tests/arithsmall.hex \ - modules/arch/x86/tests/avx.asm modules/arch/x86/tests/avx.hex \ - modules/arch/x86/tests/avxcc.asm \ - modules/arch/x86/tests/avxcc.hex \ - modules/arch/x86/tests/bittest.asm \ - modules/arch/x86/tests/bittest.hex \ - modules/arch/x86/tests/bswap64.asm \ - modules/arch/x86/tests/bswap64.hex \ - modules/arch/x86/tests/clmul.asm \ - modules/arch/x86/tests/clmul.hex \ - modules/arch/x86/tests/cmpxchg.asm \ - modules/arch/x86/tests/cmpxchg.hex \ - modules/arch/x86/tests/cpubasic-err.asm \ - modules/arch/x86/tests/cpubasic-err.errwarn \ - modules/arch/x86/tests/cyrix.asm \ - modules/arch/x86/tests/cyrix.hex \ - modules/arch/x86/tests/div-err.asm \ - modules/arch/x86/tests/div-err.errwarn \ - modules/arch/x86/tests/ea-nonzero.asm \ - modules/arch/x86/tests/ea-nonzero.hex \ - modules/arch/x86/tests/ea-over.asm \ - modules/arch/x86/tests/ea-over.errwarn \ - modules/arch/x86/tests/ea-over.hex \ - modules/arch/x86/tests/ea-warn.asm \ - modules/arch/x86/tests/ea-warn.errwarn \ - modules/arch/x86/tests/ea-warn.hex \ - modules/arch/x86/tests/ebpindex.asm \ - modules/arch/x86/tests/ebpindex.hex \ - modules/arch/x86/tests/effaddr.asm \ - modules/arch/x86/tests/effaddr.hex \ - modules/arch/x86/tests/enter.asm \ - modules/arch/x86/tests/enter.errwarn \ - modules/arch/x86/tests/enter.hex \ - modules/arch/x86/tests/far64.asm \ - modules/arch/x86/tests/far64.hex \ - modules/arch/x86/tests/farbasic.asm \ - modules/arch/x86/tests/farbasic.hex \ - modules/arch/x86/tests/farithr.asm \ - modules/arch/x86/tests/farithr.hex \ - modules/arch/x86/tests/fcmov.asm \ - modules/arch/x86/tests/fcmov.hex \ - modules/arch/x86/tests/fma.asm modules/arch/x86/tests/fma.hex \ - modules/arch/x86/tests/fwdequ64.asm \ - modules/arch/x86/tests/fwdequ64.hex \ - modules/arch/x86/tests/genopcode.asm \ - modules/arch/x86/tests/genopcode.hex \ - modules/arch/x86/tests/imm64.asm \ - modules/arch/x86/tests/imm64.errwarn \ - modules/arch/x86/tests/imm64.hex \ - modules/arch/x86/tests/iret.asm \ - modules/arch/x86/tests/iret.hex \ - modules/arch/x86/tests/jmp64-1.asm \ - modules/arch/x86/tests/jmp64-1.hex \ - modules/arch/x86/tests/jmp64-2.asm \ - modules/arch/x86/tests/jmp64-2.hex \ - modules/arch/x86/tests/jmp64-3.asm \ - modules/arch/x86/tests/jmp64-3.hex \ - modules/arch/x86/tests/jmp64-4.asm \ - modules/arch/x86/tests/jmp64-4.hex \ - modules/arch/x86/tests/jmp64-5.asm \ - modules/arch/x86/tests/jmp64-5.hex \ - modules/arch/x86/tests/jmp64-6.asm \ - modules/arch/x86/tests/jmp64-6.hex \ - modules/arch/x86/tests/jmpfar.asm \ - modules/arch/x86/tests/jmpfar.hex \ - modules/arch/x86/tests/lds.asm modules/arch/x86/tests/lds.hex \ - modules/arch/x86/tests/loopadsz.asm \ - modules/arch/x86/tests/loopadsz.hex \ - modules/arch/x86/tests/lsahf.asm \ - modules/arch/x86/tests/lsahf.hex \ - modules/arch/x86/tests/mem64-err.asm \ - modules/arch/x86/tests/mem64-err.errwarn \ - modules/arch/x86/tests/mem64.asm \ - modules/arch/x86/tests/mem64.errwarn \ - modules/arch/x86/tests/mem64.hex \ - modules/arch/x86/tests/mem64hi32.asm \ - modules/arch/x86/tests/mem64hi32.hex \ - modules/arch/x86/tests/mem64rip.asm \ - modules/arch/x86/tests/mem64rip.hex \ - modules/arch/x86/tests/mixcase.asm \ - modules/arch/x86/tests/mixcase.hex \ - modules/arch/x86/tests/movbe.asm \ - modules/arch/x86/tests/movbe.hex \ - modules/arch/x86/tests/movdq32.asm \ - modules/arch/x86/tests/movdq32.hex \ - modules/arch/x86/tests/movdq64.asm \ - modules/arch/x86/tests/movdq64.hex \ - modules/arch/x86/tests/negequ.asm \ - modules/arch/x86/tests/negequ.hex \ - modules/arch/x86/tests/nomem64-err.asm \ - modules/arch/x86/tests/nomem64-err.errwarn \ - modules/arch/x86/tests/nomem64-err2.asm \ - modules/arch/x86/tests/nomem64-err2.errwarn \ - modules/arch/x86/tests/nomem64.asm \ - modules/arch/x86/tests/nomem64.errwarn \ - modules/arch/x86/tests/nomem64.hex \ - modules/arch/x86/tests/o64.asm modules/arch/x86/tests/o64.hex \ - modules/arch/x86/tests/o64loop.asm \ - modules/arch/x86/tests/o64loop.errwarn \ - modules/arch/x86/tests/o64loop.hex \ - modules/arch/x86/tests/opersize.asm \ - modules/arch/x86/tests/opersize.hex \ - modules/arch/x86/tests/opsize-err.asm \ - modules/arch/x86/tests/opsize-err.errwarn \ - modules/arch/x86/tests/overflow.asm \ - modules/arch/x86/tests/overflow.errwarn \ - modules/arch/x86/tests/overflow.hex \ - modules/arch/x86/tests/padlock.asm \ - modules/arch/x86/tests/padlock.hex \ - modules/arch/x86/tests/pshift.asm \ - modules/arch/x86/tests/pshift.hex \ - modules/arch/x86/tests/push64.asm \ - modules/arch/x86/tests/push64.errwarn \ - modules/arch/x86/tests/push64.hex \ - modules/arch/x86/tests/pushf.asm \ - modules/arch/x86/tests/pushf.hex \ - modules/arch/x86/tests/pushf-err.asm \ - modules/arch/x86/tests/pushf-err.errwarn \ - modules/arch/x86/tests/pushnosize.asm \ - modules/arch/x86/tests/pushnosize.errwarn \ - modules/arch/x86/tests/pushnosize.hex \ - modules/arch/x86/tests/rep.asm modules/arch/x86/tests/rep.hex \ - modules/arch/x86/tests/ret.asm modules/arch/x86/tests/ret.hex \ - modules/arch/x86/tests/riprel1.asm \ - modules/arch/x86/tests/riprel1.hex \ - modules/arch/x86/tests/riprel2.asm \ - modules/arch/x86/tests/riprel2.errwarn \ - modules/arch/x86/tests/riprel2.hex \ - modules/arch/x86/tests/ripseg.asm \ - modules/arch/x86/tests/ripseg.errwarn \ - modules/arch/x86/tests/ripseg.hex \ - modules/arch/x86/tests/segmov.asm \ - modules/arch/x86/tests/segmov.hex \ - modules/arch/x86/tests/segoff.asm \ - modules/arch/x86/tests/segoff.hex \ - modules/arch/x86/tests/segoff-err.asm \ - modules/arch/x86/tests/segoff-err.errwarn \ - modules/arch/x86/tests/shift.asm \ - modules/arch/x86/tests/shift.hex \ - modules/arch/x86/tests/simd-1.asm \ - modules/arch/x86/tests/simd-1.hex \ - modules/arch/x86/tests/simd-2.asm \ - modules/arch/x86/tests/simd-2.hex \ - modules/arch/x86/tests/simd64-1.asm \ - modules/arch/x86/tests/simd64-1.hex \ - modules/arch/x86/tests/simd64-2.asm \ - modules/arch/x86/tests/simd64-2.hex \ - modules/arch/x86/tests/sse-prefix.asm \ - modules/arch/x86/tests/sse-prefix.hex \ - modules/arch/x86/tests/sse3.asm \ - modules/arch/x86/tests/sse3.hex \ - modules/arch/x86/tests/sse4.asm \ - modules/arch/x86/tests/sse4.hex \ - modules/arch/x86/tests/sse4-err.asm \ - modules/arch/x86/tests/sse4-err.errwarn \ - modules/arch/x86/tests/sse5-all.asm \ - modules/arch/x86/tests/sse5-all.hex \ - modules/arch/x86/tests/sse5-basic.asm \ - modules/arch/x86/tests/sse5-basic.hex \ - modules/arch/x86/tests/sse5-cc.asm \ - modules/arch/x86/tests/sse5-cc.hex \ - modules/arch/x86/tests/sse5-err.asm \ - modules/arch/x86/tests/sse5-err.errwarn \ - modules/arch/x86/tests/ssewidth.asm \ - modules/arch/x86/tests/ssewidth.hex \ - modules/arch/x86/tests/ssse3.asm \ - modules/arch/x86/tests/ssse3.c \ - modules/arch/x86/tests/ssse3.hex \ - modules/arch/x86/tests/stos.asm \ - modules/arch/x86/tests/stos.hex modules/arch/x86/tests/str.asm \ - modules/arch/x86/tests/str.hex \ - modules/arch/x86/tests/strict.asm \ - modules/arch/x86/tests/strict.errwarn \ - modules/arch/x86/tests/strict.hex \ - modules/arch/x86/tests/strict-err.asm \ - modules/arch/x86/tests/strict-err.errwarn \ - modules/arch/x86/tests/stringseg.asm \ - modules/arch/x86/tests/stringseg.errwarn \ - modules/arch/x86/tests/stringseg.hex \ - modules/arch/x86/tests/svm.asm modules/arch/x86/tests/svm.hex \ - modules/arch/x86/tests/twobytemem.asm \ - modules/arch/x86/tests/twobytemem.errwarn \ - modules/arch/x86/tests/twobytemem.hex \ - modules/arch/x86/tests/vmx.asm modules/arch/x86/tests/vmx.hex \ - modules/arch/x86/tests/vmx-err.asm \ - modules/arch/x86/tests/vmx-err.errwarn \ - modules/arch/x86/tests/x86label.asm \ - modules/arch/x86/tests/x86label.hex \ - modules/arch/x86/tests/xchg64.asm \ - modules/arch/x86/tests/xchg64.hex \ - modules/arch/x86/tests/xmm64.asm \ - modules/arch/x86/tests/xmm64.hex \ - modules/arch/x86/tests/xsave.asm \ - modules/arch/x86/tests/xsave.hex \ - modules/arch/x86/tests/gas32/Makefile.inc \ - modules/arch/x86/tests/gas64/Makefile.inc \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas32/align32.asm \ - modules/arch/x86/tests/gas32/align32.hex \ - modules/arch/x86/tests/gas32/gas-farithr.asm \ - modules/arch/x86/tests/gas32/gas-farithr.hex \ - modules/arch/x86/tests/gas32/gas-fpmem.asm \ - modules/arch/x86/tests/gas32/gas-fpmem.hex \ - modules/arch/x86/tests/gas32/gas-movdq32.asm \ - modules/arch/x86/tests/gas32/gas-movdq32.hex \ - modules/arch/x86/tests/gas32/gas-movsd.asm \ - modules/arch/x86/tests/gas32/gas-movsd.hex \ - modules/arch/x86/tests/gas32/gas32-jmpcall.asm \ - modules/arch/x86/tests/gas32/gas32-jmpcall.hex \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/x86/tests/gas64/align64.asm \ - modules/arch/x86/tests/gas64/align64.hex \ - modules/arch/x86/tests/gas64/gas-cbw.asm \ - modules/arch/x86/tests/gas64/gas-cbw.hex \ - modules/arch/x86/tests/gas64/gas-fp.asm \ - modules/arch/x86/tests/gas64/gas-fp.hex \ - modules/arch/x86/tests/gas64/gas-inout.asm \ - modules/arch/x86/tests/gas64/gas-inout.hex \ - modules/arch/x86/tests/gas64/gas-moreinsn.asm \ - modules/arch/x86/tests/gas64/gas-moreinsn.hex \ - modules/arch/x86/tests/gas64/gas-movabs.asm \ - modules/arch/x86/tests/gas64/gas-movabs.hex \ - modules/arch/x86/tests/gas64/gas-movdq64.asm \ - modules/arch/x86/tests/gas64/gas-movdq64.hex \ - modules/arch/x86/tests/gas64/gas-movsxs.asm \ - modules/arch/x86/tests/gas64/gas-movsxs.hex \ - modules/arch/x86/tests/gas64/gas-muldiv.asm \ - modules/arch/x86/tests/gas64/gas-muldiv.hex \ - modules/arch/x86/tests/gas64/gas-prefix.asm \ - modules/arch/x86/tests/gas64/gas-prefix.errwarn \ - modules/arch/x86/tests/gas64/gas-prefix.hex \ - modules/arch/x86/tests/gas64/gas-retenter.asm \ - modules/arch/x86/tests/gas64/gas-retenter.hex \ - modules/arch/x86/tests/gas64/gas-shift.asm \ - modules/arch/x86/tests/gas64/gas-shift.hex \ - modules/arch/x86/tests/gas64/gas64-jmpcall.asm \ - modules/arch/x86/tests/gas64/gas64-jmpcall.hex \ - modules/arch/x86/tests/gas64/riprel.asm \ - modules/arch/x86/tests/gas64/riprel.hex \ - modules/arch/lc3b/tests/Makefile.inc \ - modules/arch/lc3b/lc3bid.re \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/arch/lc3b/tests/lc3b-basic.asm \ - modules/arch/lc3b/tests/lc3b-basic.errwarn \ - modules/arch/lc3b/tests/lc3b-basic.hex \ - modules/arch/lc3b/tests/lc3b-br.asm \ - modules/arch/lc3b/tests/lc3b-br.hex \ - modules/arch/lc3b/tests/lc3b-ea-err.asm \ - modules/arch/lc3b/tests/lc3b-ea-err.errwarn \ - modules/arch/lc3b/tests/lc3b-mp22NC.asm \ - modules/arch/lc3b/tests/lc3b-mp22NC.hex \ - modules/arch/yasm_arch.xml modules/listfmts/nasm/Makefile.inc \ - modules/parsers/gas/Makefile.inc \ - modules/parsers/nasm/Makefile.inc \ - modules/parsers/gas/tests/Makefile.inc \ - modules/parsers/gas/gas-token.re \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/dataref-imm.asm \ - modules/parsers/gas/tests/dataref-imm.hex \ - modules/parsers/gas/tests/datavis.asm \ - modules/parsers/gas/tests/datavis.errwarn \ - modules/parsers/gas/tests/datavis.hex \ - modules/parsers/gas/tests/datavis2.asm \ - modules/parsers/gas/tests/datavis2.hex \ - modules/parsers/gas/tests/execsect.asm \ - modules/parsers/gas/tests/execsect.hex \ - modules/parsers/gas/tests/gas-fill.asm \ - modules/parsers/gas/tests/gas-fill.hex \ - modules/parsers/gas/tests/gas-float.asm \ - modules/parsers/gas/tests/gas-float.hex \ - modules/parsers/gas/tests/gas-instlabel.asm \ - modules/parsers/gas/tests/gas-instlabel.hex \ - modules/parsers/gas/tests/gas-line-err.asm \ - modules/parsers/gas/tests/gas-line-err.errwarn \ - modules/parsers/gas/tests/gas-line2-err.asm \ - modules/parsers/gas/tests/gas-line2-err.errwarn \ - modules/parsers/gas/tests/gas-push.asm \ - modules/parsers/gas/tests/gas-push.hex \ - modules/parsers/gas/tests/gas-segprefix.asm \ - modules/parsers/gas/tests/gas-segprefix.hex \ - modules/parsers/gas/tests/gas-semi.asm \ - modules/parsers/gas/tests/gas-semi.hex \ - modules/parsers/gas/tests/gassectalign.asm \ - modules/parsers/gas/tests/gassectalign.hex \ - modules/parsers/gas/tests/jmpcall.asm \ - modules/parsers/gas/tests/jmpcall.errwarn \ - modules/parsers/gas/tests/jmpcall.hex \ - modules/parsers/gas/tests/leb128.asm \ - modules/parsers/gas/tests/leb128.hex \ - modules/parsers/gas/tests/localcomm.asm \ - modules/parsers/gas/tests/localcomm.hex \ - modules/parsers/gas/tests/reggroup-err.asm \ - modules/parsers/gas/tests/reggroup-err.errwarn \ - modules/parsers/gas/tests/reggroup.asm \ - modules/parsers/gas/tests/reggroup.hex \ - modules/parsers/gas/tests/strzero.asm \ - modules/parsers/gas/tests/strzero.hex \ - modules/parsers/gas/tests/varinsn.asm \ - modules/parsers/gas/tests/varinsn.hex \ - modules/parsers/gas/tests/bin/Makefile.inc \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/gas/tests/bin/gas-comment.asm \ - modules/parsers/gas/tests/bin/gas-comment.errwarn \ - modules/parsers/gas/tests/bin/gas-comment.hex \ - modules/parsers/gas/tests/bin/gas-llabel.asm \ - modules/parsers/gas/tests/bin/gas-llabel.hex \ - modules/parsers/gas/tests/bin/gas-set.asm \ - modules/parsers/gas/tests/bin/gas-set.hex \ - modules/parsers/gas/tests/bin/rept-err.asm \ - modules/parsers/gas/tests/bin/rept-err.errwarn \ - modules/parsers/gas/tests/bin/reptempty.asm \ - modules/parsers/gas/tests/bin/reptempty.hex \ - modules/parsers/gas/tests/bin/reptlong.asm \ - modules/parsers/gas/tests/bin/reptlong.hex \ - modules/parsers/gas/tests/bin/reptnested-err.asm \ - modules/parsers/gas/tests/bin/reptnested-err.errwarn \ - modules/parsers/gas/tests/bin/reptsimple.asm \ - modules/parsers/gas/tests/bin/reptsimple.hex \ - modules/parsers/gas/tests/bin/reptwarn.asm \ - modules/parsers/gas/tests/bin/reptwarn.errwarn \ - modules/parsers/gas/tests/bin/reptwarn.hex \ - modules/parsers/gas/tests/bin/reptzero.asm \ - modules/parsers/gas/tests/bin/reptzero.hex \ - modules/parsers/nasm/nasm-token.re \ - modules/parsers/nasm/nasm-std.mac \ - modules/parsers/nasm/tests/Makefile.inc \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/alignnop16.asm \ - modules/parsers/nasm/tests/alignnop16.hex \ - modules/parsers/nasm/tests/alignnop32.asm \ - modules/parsers/nasm/tests/alignnop32.hex \ - modules/parsers/nasm/tests/charconstmath.asm \ - modules/parsers/nasm/tests/charconstmath.hex \ - modules/parsers/nasm/tests/dy.asm \ - modules/parsers/nasm/tests/dy.hex \ - modules/parsers/nasm/tests/endcomma.asm \ - modules/parsers/nasm/tests/endcomma.hex \ - modules/parsers/nasm/tests/equcolon.asm \ - modules/parsers/nasm/tests/equcolon.hex \ - modules/parsers/nasm/tests/equlocal.asm \ - modules/parsers/nasm/tests/equlocal.hex \ - modules/parsers/nasm/tests/hexconst.asm \ - modules/parsers/nasm/tests/hexconst.hex \ - modules/parsers/nasm/tests/long.asm \ - modules/parsers/nasm/tests/long.hex \ - modules/parsers/nasm/tests/locallabel.asm \ - modules/parsers/nasm/tests/locallabel.hex \ - modules/parsers/nasm/tests/locallabel2.asm \ - modules/parsers/nasm/tests/locallabel2.hex \ - modules/parsers/nasm/tests/nasm-prefix.asm \ - modules/parsers/nasm/tests/nasm-prefix.hex \ - modules/parsers/nasm/tests/newsect.asm \ - modules/parsers/nasm/tests/newsect.hex \ - modules/parsers/nasm/tests/orphannowarn.asm \ - modules/parsers/nasm/tests/orphannowarn.hex \ - modules/parsers/nasm/tests/prevlocalwarn.asm \ - modules/parsers/nasm/tests/prevlocalwarn.errwarn \ - modules/parsers/nasm/tests/prevlocalwarn.hex \ - modules/parsers/nasm/tests/strucalign.asm \ - modules/parsers/nasm/tests/strucalign.hex \ - modules/parsers/nasm/tests/struczero.asm \ - modules/parsers/nasm/tests/struczero.hex \ - modules/parsers/nasm/tests/syntax-err.asm \ - modules/parsers/nasm/tests/syntax-err.errwarn \ - modules/parsers/nasm/tests/uscore.asm \ - modules/parsers/nasm/tests/uscore.hex \ - modules/parsers/nasm/tests/worphan/Makefile.inc \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/nasm/tests/worphan/orphanwarn.asm \ - modules/parsers/nasm/tests/worphan/orphanwarn.errwarn \ - modules/parsers/nasm/tests/worphan/orphanwarn.hex \ - modules/parsers/tasm/tests/Makefile.inc \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/array.asm \ - modules/parsers/tasm/tests/array.hex \ - modules/parsers/tasm/tests/case.asm \ - modules/parsers/tasm/tests/case.hex \ - modules/parsers/tasm/tests/charstr.asm \ - modules/parsers/tasm/tests/charstr.hex \ - modules/parsers/tasm/tests/dup.asm \ - modules/parsers/tasm/tests/dup.hex \ - modules/parsers/tasm/tests/equal.asm \ - modules/parsers/tasm/tests/equal.hex \ - modules/parsers/tasm/tests/expr.asm \ - modules/parsers/tasm/tests/expr.hex \ - modules/parsers/tasm/tests/irp.asm \ - modules/parsers/tasm/tests/irp.hex \ - modules/parsers/tasm/tests/label.asm \ - modules/parsers/tasm/tests/label.hex \ - modules/parsers/tasm/tests/les.asm \ - modules/parsers/tasm/tests/les.hex \ - modules/parsers/tasm/tests/lidt.asm \ - modules/parsers/tasm/tests/lidt.hex \ - modules/parsers/tasm/tests/macro.asm \ - modules/parsers/tasm/tests/macro.hex \ - modules/parsers/tasm/tests/offset.asm \ - modules/parsers/tasm/tests/offset.hex \ - modules/parsers/tasm/tests/quote.asm \ - modules/parsers/tasm/tests/quote.hex \ - modules/parsers/tasm/tests/res.asm \ - modules/parsers/tasm/tests/res.errwarn \ - modules/parsers/tasm/tests/res.hex \ - modules/parsers/tasm/tests/segment.asm \ - modules/parsers/tasm/tests/segment.hex \ - modules/parsers/tasm/tests/size.asm \ - modules/parsers/tasm/tests/size.hex \ - modules/parsers/tasm/tests/struc.asm \ - modules/parsers/tasm/tests/struc.errwarn \ - modules/parsers/tasm/tests/struc.hex \ - modules/parsers/tasm/tests/exe/Makefile.inc \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/parsers/tasm/tests/exe/exe.asm \ - modules/parsers/tasm/tests/exe/exe.hex \ - modules/parsers/yasm_parsers.xml \ - modules/preprocs/nasm/Makefile.inc \ - modules/preprocs/raw/Makefile.inc \ - modules/preprocs/cpp/Makefile.inc \ - modules/preprocs/nasm/genversion.c \ - modules/preprocs/nasm/tests/Makefile.inc \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/nasm/tests/16args.asm \ - modules/preprocs/nasm/tests/16args.hex \ - modules/preprocs/nasm/tests/ifcritical-err.asm \ - modules/preprocs/nasm/tests/ifcritical-err.errwarn \ - modules/preprocs/nasm/tests/longline.asm \ - modules/preprocs/nasm/tests/longline.hex \ - modules/preprocs/nasm/tests/macroeof-err.asm \ - modules/preprocs/nasm/tests/macroeof-err.errwarn \ - modules/preprocs/nasm/tests/noinclude-err.asm \ - modules/preprocs/nasm/tests/noinclude-err.errwarn \ - modules/preprocs/nasm/tests/nasmpp-bigint.asm \ - modules/preprocs/nasm/tests/nasmpp-bigint.hex \ - modules/preprocs/nasm/tests/nasmpp-decimal.asm \ - modules/preprocs/nasm/tests/nasmpp-decimal.hex \ - modules/preprocs/nasm/tests/nasmpp-nested.asm \ - modules/preprocs/nasm/tests/nasmpp-nested.errwarn \ - modules/preprocs/nasm/tests/nasmpp-nested.hex \ - modules/preprocs/nasm/tests/orgsect.asm \ - modules/preprocs/nasm/tests/orgsect.hex \ - modules/preprocs/raw/tests/Makefile.inc \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/preprocs/raw/tests/longline.asm \ - modules/preprocs/raw/tests/longline.hex \ - modules/dbgfmts/codeview/Makefile.inc \ - modules/dbgfmts/dwarf2/Makefile.inc \ - modules/dbgfmts/null/Makefile.inc \ - modules/dbgfmts/stabs/Makefile.inc \ - modules/dbgfmts/codeview/cv8.txt \ - modules/dbgfmts/dwarf2/tests/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.errwarn \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.hex \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.asm \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.errwarn \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.hex \ - modules/dbgfmts/stabs/tests/Makefile.inc \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/dbgfmts/stabs/tests/stabs-elf.asm \ - modules/dbgfmts/stabs/tests/stabs-elf.hex \ - modules/dbgfmts/yasm_dbgfmts.xml \ - modules/objfmts/dbg/Makefile.inc \ - modules/objfmts/bin/Makefile.inc \ - modules/objfmts/elf/Makefile.inc \ - modules/objfmts/coff/Makefile.inc \ - modules/objfmts/macho/Makefile.inc \ - modules/objfmts/rdf/Makefile.inc \ - modules/objfmts/win32/Makefile.inc \ - modules/objfmts/win64/Makefile.inc \ - modules/objfmts/xdf/Makefile.inc \ - modules/objfmts/bin/tests/Makefile.inc \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/abs.asm \ - modules/objfmts/bin/tests/abs.hex \ - modules/objfmts/bin/tests/bigorg.asm \ - modules/objfmts/bin/tests/bigorg.hex \ - modules/objfmts/bin/tests/bigorg.errwarn \ - modules/objfmts/bin/tests/bin-farabs.asm \ - modules/objfmts/bin/tests/bin-farabs.hex \ - modules/objfmts/bin/tests/bin-rip.asm \ - modules/objfmts/bin/tests/bin-rip.hex \ - modules/objfmts/bin/tests/bintest.asm \ - modules/objfmts/bin/tests/bintest.hex \ - modules/objfmts/bin/tests/float-err.asm \ - modules/objfmts/bin/tests/float-err.errwarn \ - modules/objfmts/bin/tests/float.asm \ - modules/objfmts/bin/tests/float.hex \ - modules/objfmts/bin/tests/integer-warn.asm \ - modules/objfmts/bin/tests/integer-warn.hex \ - modules/objfmts/bin/tests/integer-warn.errwarn \ - modules/objfmts/bin/tests/integer.asm \ - modules/objfmts/bin/tests/integer.hex \ - modules/objfmts/bin/tests/levelop.asm \ - modules/objfmts/bin/tests/levelop.hex \ - modules/objfmts/bin/tests/reserve.asm \ - modules/objfmts/bin/tests/reserve.hex \ - modules/objfmts/bin/tests/reserve.errwarn \ - modules/objfmts/bin/tests/shr.asm \ - modules/objfmts/bin/tests/shr.hex \ - modules/objfmts/bin/tests/multisect/Makefile.inc \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/bin/tests/multisect/bin-align.asm \ - modules/objfmts/bin/tests/multisect/bin-align.errwarn \ - modules/objfmts/bin/tests/multisect/bin-align.hex \ - modules/objfmts/bin/tests/multisect/bin-align.map \ - modules/objfmts/bin/tests/multisect/bin-ssym.asm \ - modules/objfmts/bin/tests/multisect/bin-ssym.hex \ - modules/objfmts/bin/tests/multisect/bin-ssym.map \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.asm \ - modules/objfmts/bin/tests/multisect/initbss.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.hex \ - modules/objfmts/bin/tests/multisect/initbss.map \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.asm \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.hex \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.map \ - modules/objfmts/bin/tests/multisect/multisect1.asm \ - modules/objfmts/bin/tests/multisect/multisect1.hex \ - modules/objfmts/bin/tests/multisect/multisect1.map \ - modules/objfmts/bin/tests/multisect/multisect2.asm \ - modules/objfmts/bin/tests/multisect/multisect2.hex \ - modules/objfmts/bin/tests/multisect/multisect2.map \ - modules/objfmts/bin/tests/multisect/multisect3.asm \ - modules/objfmts/bin/tests/multisect/multisect3.hex \ - modules/objfmts/bin/tests/multisect/multisect3.map \ - modules/objfmts/bin/tests/multisect/multisect4.asm \ - modules/objfmts/bin/tests/multisect/multisect4.hex \ - modules/objfmts/bin/tests/multisect/multisect4.map \ - modules/objfmts/bin/tests/multisect/multisect5.asm \ - modules/objfmts/bin/tests/multisect/multisect5.hex \ - modules/objfmts/bin/tests/multisect/multisect5.map \ - modules/objfmts/bin/tests/multisect/nomultisect1.asm \ - modules/objfmts/bin/tests/multisect/nomultisect1.hex \ - modules/objfmts/bin/tests/multisect/nomultisect1.map \ - modules/objfmts/bin/tests/multisect/nomultisect2.asm \ - modules/objfmts/bin/tests/multisect/nomultisect2.hex \ - modules/objfmts/bin/tests/multisect/nomultisect2.map \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.errwarn \ - modules/objfmts/elf/tests/Makefile.inc \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/curpos.asm \ - modules/objfmts/elf/tests/curpos.hex \ - modules/objfmts/elf/tests/curpos-err.asm \ - modules/objfmts/elf/tests/curpos-err.errwarn \ - modules/objfmts/elf/tests/elf-overdef.asm \ - modules/objfmts/elf/tests/elf-overdef.hex \ - modules/objfmts/elf/tests/elf-x86id.asm \ - modules/objfmts/elf/tests/elf-x86id.hex \ - modules/objfmts/elf/tests/elfabssect.asm \ - modules/objfmts/elf/tests/elfabssect.hex \ - modules/objfmts/elf/tests/elfcond.asm \ - modules/objfmts/elf/tests/elfcond.hex \ - modules/objfmts/elf/tests/elfequabs.asm \ - modules/objfmts/elf/tests/elfequabs.hex \ - modules/objfmts/elf/tests/elfglobal.asm \ - modules/objfmts/elf/tests/elfglobal.hex \ - modules/objfmts/elf/tests/elfglobext.asm \ - modules/objfmts/elf/tests/elfglobext.hex \ - modules/objfmts/elf/tests/elfglobext2.asm \ - modules/objfmts/elf/tests/elfglobext2.hex \ - modules/objfmts/elf/tests/elfmanysym.asm \ - modules/objfmts/elf/tests/elfmanysym.hex \ - modules/objfmts/elf/tests/elfreloc.asm \ - modules/objfmts/elf/tests/elfreloc.hex \ - modules/objfmts/elf/tests/elfreloc-ext.asm \ - modules/objfmts/elf/tests/elfreloc-ext.hex \ - modules/objfmts/elf/tests/elfsectalign.asm \ - modules/objfmts/elf/tests/elfsectalign.hex \ - modules/objfmts/elf/tests/elfso.asm \ - modules/objfmts/elf/tests/elfso.hex \ - modules/objfmts/elf/tests/elftest.c \ - modules/objfmts/elf/tests/elftest.asm \ - modules/objfmts/elf/tests/elftest.hex \ - modules/objfmts/elf/tests/elftimes.asm \ - modules/objfmts/elf/tests/elftimes.hex \ - modules/objfmts/elf/tests/elftypesize.asm \ - modules/objfmts/elf/tests/elftypesize.hex \ - modules/objfmts/elf/tests/elfvisibility.asm \ - modules/objfmts/elf/tests/elfvisibility.errwarn \ - modules/objfmts/elf/tests/elfvisibility.hex \ - modules/objfmts/elf/tests/nasm-sectname.asm \ - modules/objfmts/elf/tests/nasm-sectname.hex \ - modules/objfmts/elf/tests/nasm-forceident.asm \ - modules/objfmts/elf/tests/nasm-forceident.hex \ - modules/objfmts/elf/tests/amd64/Makefile.inc \ - modules/objfmts/elf/tests/gas32/Makefile.inc \ - modules/objfmts/elf/tests/gas64/Makefile.inc \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/amd64/elf-rip.asm \ - modules/objfmts/elf/tests/amd64/elf-rip.hex \ - modules/objfmts/elf/tests/amd64/elfso64.asm \ - modules/objfmts/elf/tests/amd64/elfso64.hex \ - modules/objfmts/elf/tests/amd64/gotpcrel.asm \ - modules/objfmts/elf/tests/amd64/gotpcrel.hex \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.asm \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/elf/tests/gas64/crosssect.asm \ - modules/objfmts/elf/tests/gas64/crosssect.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.hex \ - modules/objfmts/coff/win64-nasm.mac \ - modules/objfmts/coff/win64-gas.mac \ - modules/objfmts/coff/tests/Makefile.inc \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/coff/tests/cofftest.c \ - modules/objfmts/coff/tests/cofftest.asm \ - modules/objfmts/coff/tests/cofftest.hex \ - modules/objfmts/coff/tests/cofftimes.asm \ - modules/objfmts/coff/tests/cofftimes.hex \ - modules/objfmts/coff/tests/x86id.asm \ - modules/objfmts/coff/tests/x86id.hex \ - modules/objfmts/coff/tests/x86id.errwarn \ - modules/objfmts/macho/tests/Makefile.inc \ - modules/objfmts/macho/tests/gas32/Makefile.inc \ - modules/objfmts/macho/tests/gas64/Makefile.inc \ - modules/objfmts/macho/tests/nasm32/Makefile.inc \ - modules/objfmts/macho/tests/nasm64/Makefile.inc \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas32/gas-macho32.asm \ - modules/objfmts/macho/tests/gas32/gas-macho32.hex \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/gas64/gas-macho64.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64.hex \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm32/machotest.c \ - modules/objfmts/macho/tests/nasm32/machotest.asm \ - modules/objfmts/macho/tests/nasm32/machotest.hex \ - modules/objfmts/macho/tests/nasm32/macho-reloc.asm \ - modules/objfmts/macho/tests/nasm32/macho-reloc.hex \ - modules/objfmts/macho/tests/nasm32/macho32-sect.asm \ - modules/objfmts/macho/tests/nasm32/macho32-sect.errwarn \ - modules/objfmts/macho/tests/nasm32/macho32-sect.hex \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.asm \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/macho/tests/nasm64/machotest64.c \ - modules/objfmts/macho/tests/nasm64/machotest64.asm \ - modules/objfmts/macho/tests/nasm64/machotest64.hex \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.asm \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.errwarn \ - modules/objfmts/rdf/tests/Makefile.inc \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/rdf/tests/rdfabs.asm \ - modules/objfmts/rdf/tests/rdfabs.errwarn \ - modules/objfmts/rdf/tests/rdfabs.hex \ - modules/objfmts/rdf/tests/rdfext.asm \ - modules/objfmts/rdf/tests/rdfext.hex \ - modules/objfmts/rdf/tests/rdfseg.asm \ - modules/objfmts/rdf/tests/rdfseg.hex \ - modules/objfmts/rdf/tests/rdfseg2.asm \ - modules/objfmts/rdf/tests/rdfseg2.hex \ - modules/objfmts/rdf/tests/rdftest1.asm \ - modules/objfmts/rdf/tests/rdftest1.hex \ - modules/objfmts/rdf/tests/rdftest2.asm \ - modules/objfmts/rdf/tests/rdftest2.hex \ - modules/objfmts/rdf/tests/rdtlib.asm \ - modules/objfmts/rdf/tests/rdtlib.hex \ - modules/objfmts/rdf/tests/rdtmain.asm \ - modules/objfmts/rdf/tests/rdtmain.hex \ - modules/objfmts/rdf/tests/testlib.asm \ - modules/objfmts/rdf/tests/testlib.hex \ - modules/objfmts/win32/tests/Makefile.inc \ - modules/objfmts/win32/tests/export.asm \ - modules/objfmts/win32/tests/export.hex \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/win32-curpos.asm \ - modules/objfmts/win32/tests/win32-curpos.hex \ - modules/objfmts/win32/tests/win32-overdef.asm \ - modules/objfmts/win32/tests/win32-overdef.hex \ - modules/objfmts/win32/tests/win32-safeseh.asm \ - modules/objfmts/win32/tests/win32-safeseh.hex \ - modules/objfmts/win32/tests/win32-safeseh.masm \ - modules/objfmts/win32/tests/win32-segof.asm \ - modules/objfmts/win32/tests/win32-segof.hex \ - modules/objfmts/win32/tests/win32test.c \ - modules/objfmts/win32/tests/win32test.asm \ - modules/objfmts/win32/tests/win32test.hex \ - modules/objfmts/win32/tests/gas/Makefile.inc \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win32/tests/gas/win32at.asm \ - modules/objfmts/win32/tests/gas/win32at.hex \ - modules/objfmts/win64/tests/Makefile.inc \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/sce1.asm \ - modules/objfmts/win64/tests/sce1.hex \ - modules/objfmts/win64/tests/sce1-err.asm \ - modules/objfmts/win64/tests/sce1-err.errwarn \ - modules/objfmts/win64/tests/sce2.asm \ - modules/objfmts/win64/tests/sce2.hex \ - modules/objfmts/win64/tests/sce2-err.asm \ - modules/objfmts/win64/tests/sce2-err.errwarn \ - modules/objfmts/win64/tests/sce3.asm \ - modules/objfmts/win64/tests/sce3.hex \ - modules/objfmts/win64/tests/sce3.masm \ - modules/objfmts/win64/tests/sce4.asm \ - modules/objfmts/win64/tests/sce4.hex \ - modules/objfmts/win64/tests/sce4.masm \ - modules/objfmts/win64/tests/sce4-err.asm \ - modules/objfmts/win64/tests/sce4-err.errwarn \ - modules/objfmts/win64/tests/win64-abs.asm \ - modules/objfmts/win64/tests/win64-abs.hex \ - modules/objfmts/win64/tests/win64-curpos.asm \ - modules/objfmts/win64/tests/win64-curpos.hex \ - modules/objfmts/win64/tests/win64-dataref.asm \ - modules/objfmts/win64/tests/win64-dataref.hex \ - modules/objfmts/win64/tests/win64-dataref.masm \ - modules/objfmts/win64/tests/win64-dataref2.asm \ - modules/objfmts/win64/tests/win64-dataref2.hex \ - modules/objfmts/win64/tests/win64-dataref2.masm \ - modules/objfmts/win64/tests/gas/Makefile.inc \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/win64/tests/gas/win64-gas-sce.asm \ - modules/objfmts/win64/tests/gas/win64-gas-sce.hex \ - modules/objfmts/xdf/tests/Makefile.inc \ - modules/objfmts/xdf/tests/xdf_test.sh \ - modules/objfmts/xdf/tests/xdf-overdef.asm \ - modules/objfmts/xdf/tests/xdf-overdef.hex \ - modules/objfmts/xdf/tests/xdflong.asm \ - modules/objfmts/xdf/tests/xdflong.hex \ - modules/objfmts/xdf/tests/xdflong.errwarn \ - modules/objfmts/xdf/tests/xdfother.asm \ - modules/objfmts/xdf/tests/xdfother.hex \ - modules/objfmts/xdf/tests/xdfprotect.asm \ - modules/objfmts/xdf/tests/xdfprotect.hex \ - modules/objfmts/xdf/tests/xdfsect.asm \ - modules/objfmts/xdf/tests/xdfsect.hex \ - modules/objfmts/xdf/tests/xdfsect-err.asm \ - modules/objfmts/xdf/tests/xdfsect-err.errwarn \ - modules/objfmts/xdf/tests/xdfvirtual.asm \ - modules/objfmts/xdf/tests/xdfvirtual.hex \ - modules/objfmts/yasm_objfmts.xml libyasm/genmodule.c \ - libyasm/module.in libyasm/tests/Makefile.inc \ - libyasm/tests/libyasm_test.sh libyasm/tests/1shl0.asm \ - libyasm/tests/1shl0.hex libyasm/tests/absloop-err.asm \ - libyasm/tests/absloop-err.errwarn \ - libyasm/tests/charconst64.asm libyasm/tests/charconst64.hex \ - libyasm/tests/data-rawvalue.asm \ - libyasm/tests/data-rawvalue.hex libyasm/tests/duplabel-err.asm \ - libyasm/tests/duplabel-err.errwarn libyasm/tests/emptydata.asm \ - libyasm/tests/emptydata.hex libyasm/tests/equ-expand.asm \ - libyasm/tests/equ-expand.hex libyasm/tests/expr-fold-level.asm \ - libyasm/tests/expr-fold-level.hex \ - libyasm/tests/expr-wide-ident.asm \ - libyasm/tests/expr-wide-ident.hex libyasm/tests/externdef.asm \ - libyasm/tests/externdef.errwarn libyasm/tests/externdef.hex \ - libyasm/tests/incbin.asm libyasm/tests/incbin.hex \ - libyasm/tests/jmpsize1.asm libyasm/tests/jmpsize1.hex \ - libyasm/tests/jmpsize1-err.asm \ - libyasm/tests/jmpsize1-err.errwarn \ - libyasm/tests/opt-align1.asm libyasm/tests/opt-align1.hex \ - libyasm/tests/opt-align2.asm libyasm/tests/opt-align2.hex \ - libyasm/tests/opt-align3.asm libyasm/tests/opt-align3.hex \ - libyasm/tests/opt-circular1-err.asm \ - libyasm/tests/opt-circular1-err.errwarn \ - libyasm/tests/opt-circular2-err.asm \ - libyasm/tests/opt-circular2-err.errwarn \ - libyasm/tests/opt-circular3-err.asm \ - libyasm/tests/opt-circular3-err.errwarn \ - libyasm/tests/opt-gvmat64.asm libyasm/tests/opt-gvmat64.hex \ - libyasm/tests/opt-immexpand.asm \ - libyasm/tests/opt-immexpand.hex \ - libyasm/tests/opt-immnoexpand.asm \ - libyasm/tests/opt-immnoexpand.hex \ - libyasm/tests/opt-oldalign.asm libyasm/tests/opt-oldalign.hex \ - libyasm/tests/opt-struc.asm libyasm/tests/opt-struc.hex \ - libyasm/tests/reserve-err1.asm \ - libyasm/tests/reserve-err1.errwarn \ - libyasm/tests/reserve-err2.asm \ - libyasm/tests/reserve-err2.errwarn libyasm/tests/strucsize.asm \ - libyasm/tests/strucsize.hex libyasm/tests/times0.asm \ - libyasm/tests/times0.hex libyasm/tests/timesover-err.asm \ - libyasm/tests/timesover-err.errwarn \ - libyasm/tests/timesunder.asm libyasm/tests/timesunder.hex \ - libyasm/tests/times-res.asm libyasm/tests/times-res.errwarn \ - libyasm/tests/times-res.hex libyasm/tests/unary.asm \ - libyasm/tests/unary.hex libyasm/tests/value-err.asm \ - libyasm/tests/value-err.errwarn \ - libyasm/tests/value-samesym.asm \ - libyasm/tests/value-samesym.errwarn \ - libyasm/tests/value-samesym.hex libyasm/tests/value-mask.asm \ - libyasm/tests/value-mask.errwarn libyasm/tests/value-mask.hex \ - frontends/yasm/Makefile.inc frontends/tasm/Makefile.inc \ - frontends/yasm/yasm.xml m4/intmax.m4 m4/longdouble.m4 \ - m4/nls.m4 m4/po.m4 m4/printf-posix.m4 m4/signed.m4 \ - m4/size_max.m4 m4/ulonglong.m4 m4/wchar_t.m4 m4/wint_t.m4 \ - m4/xsize.m4 m4/codeset.m4 m4/gettext.m4 m4/glibc21.m4 \ - m4/iconv.m4 m4/intdiv0.m4 m4/inttypes.m4 m4/inttypes_h.m4 \ - m4/inttypes-pri.m4 m4/isc-posix.m4 m4/lcmessage.m4 \ - m4/lib-ld.m4 m4/lib-link.m4 m4/lib-prefix.m4 m4/longlong.m4 \ - m4/progtest.m4 m4/stdint_h.m4 m4/uintmax_t.m4 m4/pythonhead.m4 \ - m4/pyrex.m4 out_test.sh Artistic.txt BSD.txt GNU_GPL-2.0 \ - GNU_LGPL-2.0 splint.sh Mkfiles/Makefile.flat \ - Mkfiles/Makefile.dj Mkfiles/dj/config.h \ - Mkfiles/dj/libyasm-stdint.h \ - Mkfiles/vc9/crt_secure_no_deprecate.vsprops \ - Mkfiles/vc9/yasm.sln Mkfiles/vc9/yasm.vcproj \ - Mkfiles/vc9/ytasm.vcproj Mkfiles/vc9/config.h \ - Mkfiles/vc9/libyasm-stdint.h Mkfiles/vc9/readme.vc9.txt \ - Mkfiles/vc9/yasm.rules Mkfiles/vc9/vc98_swap.py \ - Mkfiles/vc9/genmacro/genmacro.vcproj \ - Mkfiles/vc9/genmacro/run.bat \ - Mkfiles/vc9/genmodule/genmodule.vcproj \ - Mkfiles/vc9/genmodule/run.bat \ - Mkfiles/vc9/genstring/genstring.vcproj \ - Mkfiles/vc9/genstring/run.bat \ - Mkfiles/vc9/genversion/genversion.vcproj \ - Mkfiles/vc9/genversion/run.bat \ - Mkfiles/vc9/libyasm/libyasm.vcproj \ - Mkfiles/vc9/modules/modules.vcproj \ - Mkfiles/vc9/re2c/re2c.vcproj Mkfiles/vc9/re2c/run.bat \ - Mkfiles/vc9/genperf/genperf.vcproj Mkfiles/vc9/genperf/run.bat \ - genstring.c - -# libyasm-stdint.h doesn't clean up after itself? -CONFIG_CLEAN_FILES = libyasm-stdint.h -re2c_SOURCES = -re2c_LDADD = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -re2c_LINK = $(CCLD_FOR_BUILD) -o $@ -genmacro_SOURCES = -genmacro_LDADD = genmacro.$(OBJEXT) -genmacro_LINK = $(CCLD_FOR_BUILD) -o $@ -genperf_SOURCES = -genperf_LDADD = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -genperf_LINK = $(CCLD_FOR_BUILD) -o $@ -PYBINDING_DEPS = tools/python-yasm/bytecode.pxi \ - tools/python-yasm/errwarn.pxi tools/python-yasm/expr.pxi \ - tools/python-yasm/floatnum.pxi tools/python-yasm/intnum.pxi \ - tools/python-yasm/symrec.pxi tools/python-yasm/value.pxi -YASM_MODULES = arch_x86 arch_lc3b listfmt_nasm parser_gas parser_gnu \ - parser_nasm parser_tasm preproc_nasm preproc_tasm preproc_raw \ - preproc_cpp dbgfmt_cv8 dbgfmt_dwarf2 dbgfmt_null dbgfmt_stabs \ - objfmt_dbg objfmt_bin objfmt_dosexe objfmt_elf objfmt_elf32 \ - objfmt_elf64 objfmt_coff objfmt_macho objfmt_macho32 \ - objfmt_macho64 objfmt_rdf objfmt_win32 objfmt_win64 objfmt_x64 \ - objfmt_xdf -lib_LIBRARIES = libyasm.a -libyasm_a_SOURCES = modules/arch/x86/x86arch.c \ - modules/arch/x86/x86arch.h modules/arch/x86/x86bc.c \ - modules/arch/x86/x86expr.c modules/arch/x86/x86id.c \ - modules/arch/lc3b/lc3barch.c modules/arch/lc3b/lc3barch.h \ - modules/arch/lc3b/lc3bbc.c \ - modules/listfmts/nasm/nasm-listfmt.c \ - modules/parsers/gas/gas-parser.c \ - modules/parsers/gas/gas-parser.h \ - modules/parsers/gas/gas-parse.c \ - modules/parsers/nasm/nasm-parser.c \ - modules/parsers/nasm/nasm-parser.h \ - modules/parsers/nasm/nasm-parse.c \ - modules/preprocs/nasm/nasm-preproc.c \ - modules/preprocs/nasm/nasm-pp.h \ - modules/preprocs/nasm/nasm-pp.c modules/preprocs/nasm/nasm.h \ - modules/preprocs/nasm/nasmlib.h \ - modules/preprocs/nasm/nasmlib.c \ - modules/preprocs/nasm/nasm-eval.h \ - modules/preprocs/nasm/nasm-eval.c \ - modules/preprocs/raw/raw-preproc.c \ - modules/preprocs/cpp/cpp-preproc.c \ - modules/dbgfmts/codeview/cv-dbgfmt.h \ - modules/dbgfmts/codeview/cv-dbgfmt.c \ - modules/dbgfmts/codeview/cv-symline.c \ - modules/dbgfmts/codeview/cv-type.c \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.h \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c \ - modules/dbgfmts/dwarf2/dwarf2-line.c \ - modules/dbgfmts/dwarf2/dwarf2-aranges.c \ - modules/dbgfmts/dwarf2/dwarf2-info.c \ - modules/dbgfmts/null/null-dbgfmt.c \ - modules/dbgfmts/stabs/stabs-dbgfmt.c \ - modules/objfmts/dbg/dbg-objfmt.c \ - modules/objfmts/bin/bin-objfmt.c modules/objfmts/elf/elf.c \ - modules/objfmts/elf/elf.h modules/objfmts/elf/elf-objfmt.c \ - modules/objfmts/elf/elf-machine.h \ - modules/objfmts/elf/elf-x86-x86.c \ - modules/objfmts/elf/elf-x86-amd64.c \ - modules/objfmts/coff/coff-objfmt.c \ - modules/objfmts/coff/coff-objfmt.h \ - modules/objfmts/coff/win64-except.c \ - modules/objfmts/macho/macho-objfmt.c \ - modules/objfmts/rdf/rdf-objfmt.c \ - modules/objfmts/xdf/xdf-objfmt.c libyasm/assocdat.c \ - libyasm/bitvect.c libyasm/bc-align.c libyasm/bc-data.c \ - libyasm/bc-incbin.c libyasm/bc-org.c libyasm/bc-reserve.c \ - libyasm/bytecode.c libyasm/errwarn.c libyasm/expr.c \ - libyasm/file.c libyasm/floatnum.c libyasm/hamt.c \ - libyasm/insn.c libyasm/intnum.c libyasm/inttree.c \ - libyasm/linemap.c libyasm/md5.c libyasm/mergesort.c \ - libyasm/phash.c libyasm/section.c libyasm/strcasecmp.c \ - libyasm/strsep.c libyasm/symrec.c libyasm/valparam.c \ - libyasm/value.c libyasm/xmalloc.c libyasm/xstrdup.c -nodist_libyasm_a_SOURCES = x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c module.c -genversion_SOURCES = -genversion_LDADD = genversion.$(OBJEXT) -genversion_LINK = $(CCLD_FOR_BUILD) -o $@ -genmodule_SOURCES = -genmodule_LDADD = genmodule.$(OBJEXT) -genmodule_LINK = $(CCLD_FOR_BUILD) -o $@ -modincludedir = $(includedir)/libyasm -modinclude_HEADERS = libyasm/arch.h libyasm/assocdat.h \ - libyasm/bitvect.h libyasm/bytecode.h libyasm/compat-queue.h \ - libyasm/coretype.h libyasm/dbgfmt.h libyasm/errwarn.h \ - libyasm/expr.h libyasm/file.h libyasm/floatnum.h \ - libyasm/hamt.h libyasm/insn.h libyasm/intnum.h \ - libyasm/inttree.h libyasm/linemap.h libyasm/listfmt.h \ - libyasm/md5.h libyasm/module.h libyasm/objfmt.h \ - libyasm/parser.h libyasm/phash.h libyasm/preproc.h \ - libyasm/section.h libyasm/symrec.h libyasm/valparam.h \ - libyasm/value.h -bitvect_test_SOURCES = libyasm/tests/bitvect_test.c -bitvect_test_LDADD = libyasm.a $(INTLLIBS) -floatnum_test_SOURCES = libyasm/tests/floatnum_test.c -floatnum_test_LDADD = libyasm.a $(INTLLIBS) -leb128_test_SOURCES = libyasm/tests/leb128_test.c -leb128_test_LDADD = libyasm.a $(INTLLIBS) -splitpath_test_SOURCES = libyasm/tests/splitpath_test.c -splitpath_test_LDADD = libyasm.a $(INTLLIBS) -combpath_test_SOURCES = libyasm/tests/combpath_test.c -combpath_test_LDADD = libyasm.a $(INTLLIBS) -uncstring_test_SOURCES = libyasm/tests/uncstring_test.c -uncstring_test_LDADD = libyasm.a $(INTLLIBS) -yasm_SOURCES = frontends/yasm/yasm.c frontends/yasm/yasm-options.c \ - frontends/yasm/yasm-options.h -yasm_LDADD = libyasm.a $(INTLLIBS) -ytasm_SOURCES = frontends/tasm/tasm.c frontends/tasm/tasm-options.c \ - frontends/tasm/tasm-options.h -ytasm_LDADD = libyasm.a $(INTLLIBS) -ACLOCAL_AMFLAGS = -I m4 - -# genstring build -genstring_SOURCES = -genstring_LDADD = genstring.$(OBJEXT) -genstring_LINK = $(CCLD_FOR_BUILD) -o $@ -all: $(BUILT_SOURCES) config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .gperf .c .o .obj -am--refresh: - @: -$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(srcdir)/tools/Makefile.inc $(srcdir)/tools/re2c/Makefile.inc $(srcdir)/tools/genmacro/Makefile.inc $(srcdir)/tools/genperf/Makefile.inc $(srcdir)/tools/python-yasm/Makefile.inc $(srcdir)/tools/python-yasm/tests/Makefile.inc $(srcdir)/modules/Makefile.inc $(srcdir)/modules/arch/Makefile.inc $(srcdir)/modules/arch/x86/Makefile.inc $(srcdir)/modules/arch/x86/tests/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc $(srcdir)/modules/arch/lc3b/Makefile.inc $(srcdir)/modules/arch/lc3b/tests/Makefile.inc $(srcdir)/modules/listfmts/Makefile.inc $(srcdir)/modules/listfmts/nasm/Makefile.inc $(srcdir)/modules/parsers/Makefile.inc $(srcdir)/modules/parsers/gas/Makefile.inc $(srcdir)/modules/parsers/gas/tests/Makefile.inc $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc $(srcdir)/modules/parsers/nasm/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc $(srcdir)/modules/parsers/tasm/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc $(srcdir)/modules/preprocs/Makefile.inc $(srcdir)/modules/preprocs/nasm/Makefile.inc $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc $(srcdir)/modules/preprocs/raw/Makefile.inc $(srcdir)/modules/preprocs/raw/tests/Makefile.inc $(srcdir)/modules/preprocs/cpp/Makefile.inc $(srcdir)/modules/dbgfmts/Makefile.inc $(srcdir)/modules/dbgfmts/codeview/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc $(srcdir)/modules/dbgfmts/null/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc $(srcdir)/modules/objfmts/Makefile.inc $(srcdir)/modules/objfmts/dbg/Makefile.inc $(srcdir)/modules/objfmts/bin/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc $(srcdir)/modules/objfmts/elf/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/coff/Makefile.inc $(srcdir)/modules/objfmts/coff/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc $(srcdir)/modules/objfmts/rdf/Makefile.inc $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/win64/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/xdf/Makefile.inc $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc $(srcdir)/libyasm/Makefile.inc $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/frontends/Makefile.inc $(srcdir)/frontends/yasm/Makefile.inc $(srcdir)/frontends/tasm/Makefile.inc $(srcdir)/m4/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ - cd $(srcdir) && $(AUTOMAKE) --gnu \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: # $(am__configure_deps) - cd $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): # $(am__aclocal_m4_deps) - cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: # $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -install-libLIBRARIES: $(lib_LIBRARIES) - @$(NORMAL_INSTALL) - test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - f=$(am__strip_dir) \ - echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ - else :; fi; \ - done - @$(POST_INSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - p=$(am__strip_dir) \ - echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \ - $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \ - else :; fi; \ - done - -uninstall-libLIBRARIES: - @$(NORMAL_UNINSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - p=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - rm -f "$(DESTDIR)$(libdir)/$$p"; \ - done - -clean-libLIBRARIES: - -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES) -libyasm.a: $(libyasm_a_OBJECTS) $(libyasm_a_DEPENDENCIES) - -rm -f libyasm.a - $(libyasm_a_AR) libyasm.a $(libyasm_a_OBJECTS) $(libyasm_a_LIBADD) - $(RANLIB) libyasm.a -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) - -clean-checkPROGRAMS: - -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS) - -clean-noinstPROGRAMS: - -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) -bitvect_test$(EXEEXT): $(bitvect_test_OBJECTS) $(bitvect_test_DEPENDENCIES) - @rm -f bitvect_test$(EXEEXT) - $(LINK) $(bitvect_test_OBJECTS) $(bitvect_test_LDADD) $(LIBS) -combpath_test$(EXEEXT): $(combpath_test_OBJECTS) $(combpath_test_DEPENDENCIES) - @rm -f combpath_test$(EXEEXT) - $(LINK) $(combpath_test_OBJECTS) $(combpath_test_LDADD) $(LIBS) -floatnum_test$(EXEEXT): $(floatnum_test_OBJECTS) $(floatnum_test_DEPENDENCIES) - @rm -f floatnum_test$(EXEEXT) - $(LINK) $(floatnum_test_OBJECTS) $(floatnum_test_LDADD) $(LIBS) -genmacro$(EXEEXT): $(genmacro_OBJECTS) $(genmacro_DEPENDENCIES) - @rm -f genmacro$(EXEEXT) - $(genmacro_LINK) $(genmacro_OBJECTS) $(genmacro_LDADD) $(LIBS) -genmodule$(EXEEXT): $(genmodule_OBJECTS) $(genmodule_DEPENDENCIES) - @rm -f genmodule$(EXEEXT) - $(genmodule_LINK) $(genmodule_OBJECTS) $(genmodule_LDADD) $(LIBS) -genperf$(EXEEXT): $(genperf_OBJECTS) $(genperf_DEPENDENCIES) - @rm -f genperf$(EXEEXT) - $(genperf_LINK) $(genperf_OBJECTS) $(genperf_LDADD) $(LIBS) -genstring$(EXEEXT): $(genstring_OBJECTS) $(genstring_DEPENDENCIES) - @rm -f genstring$(EXEEXT) - $(genstring_LINK) $(genstring_OBJECTS) $(genstring_LDADD) $(LIBS) -genversion$(EXEEXT): $(genversion_OBJECTS) $(genversion_DEPENDENCIES) - @rm -f genversion$(EXEEXT) - $(genversion_LINK) $(genversion_OBJECTS) $(genversion_LDADD) $(LIBS) -leb128_test$(EXEEXT): $(leb128_test_OBJECTS) $(leb128_test_DEPENDENCIES) - @rm -f leb128_test$(EXEEXT) - $(LINK) $(leb128_test_OBJECTS) $(leb128_test_LDADD) $(LIBS) -re2c$(EXEEXT): $(re2c_OBJECTS) $(re2c_DEPENDENCIES) - @rm -f re2c$(EXEEXT) - $(re2c_LINK) $(re2c_OBJECTS) $(re2c_LDADD) $(LIBS) -splitpath_test$(EXEEXT): $(splitpath_test_OBJECTS) $(splitpath_test_DEPENDENCIES) - @rm -f splitpath_test$(EXEEXT) - $(LINK) $(splitpath_test_OBJECTS) $(splitpath_test_LDADD) $(LIBS) -test_hd$(EXEEXT): $(test_hd_OBJECTS) $(test_hd_DEPENDENCIES) - @rm -f test_hd$(EXEEXT) - $(LINK) $(test_hd_OBJECTS) $(test_hd_LDADD) $(LIBS) -uncstring_test$(EXEEXT): $(uncstring_test_OBJECTS) $(uncstring_test_DEPENDENCIES) - @rm -f uncstring_test$(EXEEXT) - $(LINK) $(uncstring_test_OBJECTS) $(uncstring_test_LDADD) $(LIBS) -yasm$(EXEEXT): $(yasm_OBJECTS) $(yasm_DEPENDENCIES) - @rm -f yasm$(EXEEXT) - $(LINK) $(yasm_OBJECTS) $(yasm_LDADD) $(LIBS) -ytasm$(EXEEXT): $(ytasm_OBJECTS) $(ytasm_DEPENDENCIES) - @rm -f ytasm$(EXEEXT) - $(LINK) $(ytasm_OBJECTS) $(ytasm_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/assocdat.Po -include ./$(DEPDIR)/bc-align.Po -include ./$(DEPDIR)/bc-data.Po -include ./$(DEPDIR)/bc-incbin.Po -include ./$(DEPDIR)/bc-org.Po -include ./$(DEPDIR)/bc-reserve.Po -include ./$(DEPDIR)/bin-objfmt.Po -include ./$(DEPDIR)/bitvect.Po -include ./$(DEPDIR)/bitvect_test.Po -include ./$(DEPDIR)/bytecode.Po -include ./$(DEPDIR)/coff-objfmt.Po -include ./$(DEPDIR)/combpath_test.Po -include ./$(DEPDIR)/cpp-preproc.Po -include ./$(DEPDIR)/cv-dbgfmt.Po -include ./$(DEPDIR)/cv-symline.Po -include ./$(DEPDIR)/cv-type.Po -include ./$(DEPDIR)/dbg-objfmt.Po -include ./$(DEPDIR)/dwarf2-aranges.Po -include ./$(DEPDIR)/dwarf2-dbgfmt.Po -include ./$(DEPDIR)/dwarf2-info.Po -include ./$(DEPDIR)/dwarf2-line.Po -include ./$(DEPDIR)/elf-objfmt.Po -include ./$(DEPDIR)/elf-x86-amd64.Po -include ./$(DEPDIR)/elf-x86-x86.Po -include ./$(DEPDIR)/elf.Po -include ./$(DEPDIR)/errwarn.Po -include ./$(DEPDIR)/expr.Po -include ./$(DEPDIR)/file.Po -include ./$(DEPDIR)/floatnum.Po -include ./$(DEPDIR)/floatnum_test.Po -include ./$(DEPDIR)/gas-parse.Po -include ./$(DEPDIR)/gas-parser.Po -include ./$(DEPDIR)/gas-token.Po -include ./$(DEPDIR)/hamt.Po -include ./$(DEPDIR)/insn.Po -include ./$(DEPDIR)/intnum.Po -include ./$(DEPDIR)/inttree.Po -include ./$(DEPDIR)/lc3barch.Po -include ./$(DEPDIR)/lc3bbc.Po -include ./$(DEPDIR)/lc3bid.Po -include ./$(DEPDIR)/leb128_test.Po -include ./$(DEPDIR)/linemap.Po -include ./$(DEPDIR)/macho-objfmt.Po -include ./$(DEPDIR)/md5.Po -include ./$(DEPDIR)/mergesort.Po -include ./$(DEPDIR)/module.Po -include ./$(DEPDIR)/nasm-eval.Po -include ./$(DEPDIR)/nasm-listfmt.Po -include ./$(DEPDIR)/nasm-parse.Po -include ./$(DEPDIR)/nasm-parser.Po -include ./$(DEPDIR)/nasm-pp.Po -include ./$(DEPDIR)/nasm-preproc.Po -include ./$(DEPDIR)/nasm-token.Po -include ./$(DEPDIR)/nasmlib.Po -include ./$(DEPDIR)/null-dbgfmt.Po -include ./$(DEPDIR)/phash.Po -include ./$(DEPDIR)/raw-preproc.Po -include ./$(DEPDIR)/rdf-objfmt.Po -include ./$(DEPDIR)/section.Po -include ./$(DEPDIR)/splitpath_test.Po -include ./$(DEPDIR)/stabs-dbgfmt.Po -include ./$(DEPDIR)/strcasecmp.Po -include ./$(DEPDIR)/strsep.Po -include ./$(DEPDIR)/symrec.Po -include ./$(DEPDIR)/tasm-options.Po -include ./$(DEPDIR)/tasm.Po -include ./$(DEPDIR)/test_hd.Po -include ./$(DEPDIR)/uncstring_test.Po -include ./$(DEPDIR)/valparam.Po -include ./$(DEPDIR)/value.Po -include ./$(DEPDIR)/win64-except.Po -include ./$(DEPDIR)/x86arch.Po -include ./$(DEPDIR)/x86bc.Po -include ./$(DEPDIR)/x86cpu.Po -include ./$(DEPDIR)/x86expr.Po -include ./$(DEPDIR)/x86id.Po -include ./$(DEPDIR)/x86regtmod.Po -include ./$(DEPDIR)/xdf-objfmt.Po -include ./$(DEPDIR)/xmalloc.Po -include ./$(DEPDIR)/xstrdup.Po -include ./$(DEPDIR)/yasm-options.Po -include ./$(DEPDIR)/yasm.Po - -.c.o: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -x86arch.o: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.o -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - -x86arch.obj: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.obj -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - -x86bc.o: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.o -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - -x86bc.obj: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.obj -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - -x86expr.o: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.o -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - -x86expr.obj: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.obj -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - -x86id.o: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.o -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - -x86id.obj: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.obj -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - -lc3barch.o: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.o -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - -lc3barch.obj: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.obj -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - -lc3bbc.o: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.o -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - -lc3bbc.obj: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.obj -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - -nasm-listfmt.o: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.o -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - -nasm-listfmt.obj: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.obj -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - -gas-parser.o: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.o -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - -gas-parser.obj: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.obj -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - -gas-parse.o: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.o -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - -gas-parse.obj: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.obj -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - -nasm-parser.o: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.o -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - -nasm-parser.obj: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.obj -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - -nasm-parse.o: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.o -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - -nasm-parse.obj: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.obj -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - -nasm-preproc.o: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.o -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - -nasm-preproc.obj: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.obj -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - -nasm-pp.o: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.o -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - -nasm-pp.obj: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.obj -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - -nasmlib.o: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.o -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - -nasmlib.obj: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.obj -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - -nasm-eval.o: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.o -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - -nasm-eval.obj: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.obj -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - -raw-preproc.o: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.o -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - -raw-preproc.obj: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.obj -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - -cpp-preproc.o: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.o -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - -cpp-preproc.obj: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.obj -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - -cv-dbgfmt.o: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.o -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - -cv-dbgfmt.obj: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.obj -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - -cv-symline.o: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.o -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - -cv-symline.obj: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.obj -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - -cv-type.o: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.o -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - -cv-type.obj: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.obj -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - -dwarf2-dbgfmt.o: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.o -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - -dwarf2-dbgfmt.obj: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.obj -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - -dwarf2-line.o: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.o -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - -dwarf2-line.obj: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.obj -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - -dwarf2-aranges.o: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.o -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - -dwarf2-aranges.obj: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.obj -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - -dwarf2-info.o: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.o -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - -dwarf2-info.obj: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.obj -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - -null-dbgfmt.o: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.o -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - -null-dbgfmt.obj: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.obj -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - -stabs-dbgfmt.o: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.o -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - -stabs-dbgfmt.obj: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.obj -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - -dbg-objfmt.o: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.o -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - -dbg-objfmt.obj: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.obj -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - -bin-objfmt.o: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.o -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - -bin-objfmt.obj: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.obj -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - -elf.o: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.o -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - -elf.obj: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.obj -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - -elf-objfmt.o: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.o -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - -elf-objfmt.obj: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.obj -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - -elf-x86-x86.o: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.o -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - -elf-x86-x86.obj: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.obj -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - -elf-x86-amd64.o: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.o -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - -elf-x86-amd64.obj: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.obj -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - -coff-objfmt.o: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.o -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - -coff-objfmt.obj: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.obj -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - -win64-except.o: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.o -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - -win64-except.obj: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.obj -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - -macho-objfmt.o: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.o -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - -macho-objfmt.obj: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.obj -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - -rdf-objfmt.o: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.o -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - -rdf-objfmt.obj: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.obj -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - -xdf-objfmt.o: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.o -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - -xdf-objfmt.obj: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.obj -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - -assocdat.o: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.o -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - -assocdat.obj: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.obj -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - -bitvect.o: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.o -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - -bitvect.obj: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.obj -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - -bc-align.o: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.o -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - -bc-align.obj: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.obj -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - -bc-data.o: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.o -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - -bc-data.obj: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.obj -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - -bc-incbin.o: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.o -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - -bc-incbin.obj: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.obj -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - -bc-org.o: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.o -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - -bc-org.obj: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.obj -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - -bc-reserve.o: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.o -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - -bc-reserve.obj: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.obj -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - -bytecode.o: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.o -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - -bytecode.obj: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.obj -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - -errwarn.o: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.o -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - -errwarn.obj: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.obj -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - -expr.o: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.o -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - -expr.obj: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.obj -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - -file.o: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.o -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - -file.obj: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.obj -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - -floatnum.o: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.o -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - -floatnum.obj: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.obj -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - -hamt.o: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.o -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - -hamt.obj: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.obj -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - -insn.o: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.o -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - -insn.obj: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.obj -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - -intnum.o: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.o -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - -intnum.obj: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.obj -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - -inttree.o: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.o -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - -inttree.obj: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.obj -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - -linemap.o: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.o -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - -linemap.obj: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.obj -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - -md5.o: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.o -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - -md5.obj: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.obj -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - -mergesort.o: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.o -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - -mergesort.obj: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.obj -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - -phash.o: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.o -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - -phash.obj: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.obj -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - -section.o: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.o -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - -section.obj: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.obj -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - -strcasecmp.o: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.o -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - -strcasecmp.obj: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.obj -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - -strsep.o: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.o -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - -strsep.obj: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.obj -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - -symrec.o: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.o -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - -symrec.obj: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.obj -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - -valparam.o: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.o -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - -valparam.obj: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.obj -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - -value.o: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.o -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - -value.obj: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.obj -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - -xmalloc.o: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.o -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - -xmalloc.obj: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.obj -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - -xstrdup.o: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.o -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - -xstrdup.obj: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.obj -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - -bitvect_test.o: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.o -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - -bitvect_test.obj: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.obj -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - -combpath_test.o: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.o -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - -combpath_test.obj: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.obj -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - -floatnum_test.o: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.o -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - -floatnum_test.obj: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.obj -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - -leb128_test.o: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.o -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - -leb128_test.obj: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.obj -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - -splitpath_test.o: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.o -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - -splitpath_test.obj: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.obj -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - -uncstring_test.o: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.o -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - -uncstring_test.obj: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.obj -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - -yasm.o: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.o -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - -yasm.obj: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.obj -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - -yasm-options.o: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.o -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - -yasm-options.obj: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.obj -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - -tasm.o: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.o -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - -tasm.obj: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.obj -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - -tasm-options.o: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.o -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - -tasm-options.obj: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.obj -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` -install-man1: $(man1_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \ - done -uninstall-man1: - @$(NORMAL_UNINSTALL) - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man1dir)/$$inst"; \ - done -install-man7: $(man7_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \ - done -uninstall-man7: - @$(NORMAL_UNINSTALL) - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man7dir)/$$inst"; \ - done -install-includeHEADERS: $(include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done -install-modincludeHEADERS: $(modinclude_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(modincludedir)" || $(MKDIR_P) "$(DESTDIR)$(modincludedir)" - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(modincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(modincludedir)/$$f'"; \ - $(modincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(modincludedir)/$$f"; \ - done - -uninstall-modincludeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(modincludedir)/$$f'"; \ - rm -f "$(DESTDIR)$(modincludedir)/$$f"; \ - done -install-nodist_includeHEADERS: $(nodist_include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(nodist_includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(nodist_includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-nodist_includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -$(RECURSIVE_CLEAN_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; ws='[ ]'; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - echo "XPASS: $$tst"; \ - ;; \ - *) \ - echo "PASS: $$tst"; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xfail=`expr $$xfail + 1`; \ - echo "XFAIL: $$tst"; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - echo "FAIL: $$tst"; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - echo "SKIP: $$tst"; \ - fi; \ - done; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="All $$all tests passed"; \ - else \ - banner="All $$all tests behaved as expected ($$xfail expected failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all tests failed"; \ - else \ - banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - skipped="($$skip tests were not run)"; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - echo "$$dashes"; \ - echo "$$banner"; \ - test -z "$$skipped" || echo "$$skipped"; \ - test -z "$$report" || echo "$$report"; \ - echo "$$dashes"; \ - test "$$failed" -eq 0; \ - else :; fi - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d $(distdir) || mkdir $(distdir) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r $(distdir) -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 - $(am__remove_distdir) - -dist-lzma: distdir - tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma - $(am__remove_distdir) - -dist-tarZ: distdir - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__remove_distdir) - -dist-shar: distdir - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__remove_distdir) - -dist dist-all: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lzma*) \ - unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir); chmod a+w $(distdir) - mkdir $(distdir)/_build - mkdir $(distdir)/_inst - chmod a-w $(distdir) - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && cd $(distdir)/_build \ - && ../configure --srcdir=.. --prefix="$$dc_install_base" \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck - $(am__remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @cd $(distuninstallcheck_dir) \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) check-recursive -all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(MANS) $(HEADERS) config.h \ - all-local -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" "$(DESTDIR)$(includedir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-local distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-includeHEADERS install-man \ - install-modincludeHEADERS install-nodist_includeHEADERS - -install-dvi: install-dvi-recursive - -install-exec-am: install-binPROGRAMS install-libLIBRARIES - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) install-exec-hook - -install-html: install-html-recursive - -install-info: install-info-recursive - -install-man: install-man1 install-man7 - -install-pdf: install-pdf-recursive - -install-ps: install-ps-recursive - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-includeHEADERS \ - uninstall-libLIBRARIES uninstall-man \ - uninstall-modincludeHEADERS uninstall-nodist_includeHEADERS - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) uninstall-hook - -uninstall-man: uninstall-man1 uninstall-man7 - -.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ - install-exec-am install-strip uninstall-am - -.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ - all all-am all-local am--refresh check check-TESTS check-am \ - clean clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS ctags ctags-recursive \ - dist dist-all dist-bzip2 dist-gzip dist-lzma dist-shar \ - dist-tarZ dist-zip distcheck distclean distclean-compile \ - distclean-generic distclean-hdr distclean-local distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-exec-hook install-html \ - install-html-am install-includeHEADERS install-info \ - install-info-am install-libLIBRARIES install-man install-man1 \ - install-man7 install-modincludeHEADERS \ - install-nodist_includeHEADERS install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-binPROGRAMS uninstall-hook \ - uninstall-includeHEADERS uninstall-libLIBRARIES uninstall-man \ - uninstall-man1 uninstall-man7 uninstall-modincludeHEADERS \ - uninstall-nodist_includeHEADERS - - -re2c-main.$(OBJEXT): tools/re2c/main.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/main.c || echo '$(srcdir)/'`tools/re2c/main.c - -re2c-code.$(OBJEXT): tools/re2c/code.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/code.c || echo '$(srcdir)/'`tools/re2c/code.c - -re2c-dfa.$(OBJEXT): tools/re2c/dfa.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/dfa.c || echo '$(srcdir)/'`tools/re2c/dfa.c - -re2c-parser.$(OBJEXT): tools/re2c/parser.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/parser.c || echo '$(srcdir)/'`tools/re2c/parser.c - -re2c-actions.$(OBJEXT): tools/re2c/actions.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/actions.c || echo '$(srcdir)/'`tools/re2c/actions.c - -re2c-scanner.$(OBJEXT): tools/re2c/scanner.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/scanner.c || echo '$(srcdir)/'`tools/re2c/scanner.c - -re2c-mbo_getopt.$(OBJEXT): tools/re2c/mbo_getopt.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/mbo_getopt.c || echo '$(srcdir)/'`tools/re2c/mbo_getopt.c - -re2c-substr.$(OBJEXT): tools/re2c/substr.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/substr.c || echo '$(srcdir)/'`tools/re2c/substr.c - -re2c-translate.$(OBJEXT): tools/re2c/translate.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/translate.c || echo '$(srcdir)/'`tools/re2c/translate.c - -genmacro.$(OBJEXT): tools/genmacro/genmacro.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genmacro/genmacro.c || echo '$(srcdir)/'`tools/genmacro/genmacro.c -.gperf.c: genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $< $@ - -genperf.$(OBJEXT): tools/genperf/genperf.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/genperf.c || echo '$(srcdir)/'`tools/genperf/genperf.c - -gp-perfect.$(OBJEXT): tools/genperf/perfect.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/perfect.c || echo '$(srcdir)/'`tools/genperf/perfect.c - -gp-phash.$(OBJEXT): libyasm/phash.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/phash.c || echo '$(srcdir)/'`libyasm/phash.c - -gp-xmalloc.$(OBJEXT): libyasm/xmalloc.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xmalloc.c || echo '$(srcdir)/'`libyasm/xmalloc.c - -gp-xstrdup.$(OBJEXT): libyasm/xstrdup.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xstrdup.c || echo '$(srcdir)/'`libyasm/xstrdup.c - -# Use Pyxelator to generate Pyrex function headers. -#_yasm.pxi: ${HEADERS} -# @rm -rf .tmp -# @mkdir .tmp -# $(PYTHON) $(srcdir)/tools/python-yasm/pyxelator/wrap_yasm.py \ -# "YASM_DIR=${srcdir}" "CPP=${CPP}" "CPPFLAGS=${CPPFLAGS}" -# @rm -rf .tmp - -# Need to build a local copy of the main Pyrex input file to include _yasm.pxi -# from the build directory. Also need to fixup the other .pxi include paths. -#yasm.pyx: $(srcdir)/tools/python-yasm/yasm.pyx -# sed -e 's,^include "\([^_]\),include "${srcdir}/tools/python-yasm/\1,' \ -# $(srcdir)/tools/python-yasm/yasm.pyx > $@ - -# Actually run Pyrex -#yasm_python.c: yasm.pyx _yasm.pxi $(PYBINDING_DEPS) -# $(PYTHON) -c "from Pyrex.Compiler.Main import main; main(command_line=1)" \ -# -o $@ yasm.pyx - -# Now the Python build magic... -#python-setup.txt: Makefile -# echo "includes=${DEFS} ${DEFAULT_INCLUDES} ${INCLUDES} ${AM_CPPFLAGS} ${CPPFLAGS}" > python-setup.txt -# echo "sources=${libyasm_a_SOURCES}" >> python-setup.txt -# echo "srcdir=${srcdir}" >> python-setup.txt -# echo "gcc=${GCC}" >> python-setup.txt - -#.python-build: python-setup.txt yasm_python.c ${libyasm_a_SOURCES} -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py build -# touch .python-build -#python-build: .python-build - -#python-install: .python-build -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py install "--install-lib=$(DESTDIR)$(pythondir)" - -#python-uninstall: -# rm -f `$(PYTHON) -c "import sys;sys.path.insert(0, '${DESTDIR}${pythondir}'); import yasm; print yasm.__file__"` - -python-build: -python-install: -python-uninstall: - -modules/arch/x86/x86id.c: x86insn_nasm.c x86insn_gas.c x86insns.c - -x86insn_nasm.gperf x86insn_gas.gperf x86insns.c: $(srcdir)/modules/arch/x86/gen_x86_insn.py - $(PYTHON) $(srcdir)/modules/arch/x86/gen_x86_insn.py -#x86insn_nasm.gperf: $(srcdir)/x86insn_nasm.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_nasm.gperf $@ -#x86insn_gas.gperf: $(srcdir)/x86insn_gas.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_gas.gperf $@ - -# Use suffix rules for gperf files -x86insn_nasm.c: x86insn_nasm.gperf genperf$(EXEEXT) -x86insn_gas.c: x86insn_gas.gperf genperf$(EXEEXT) -x86cpu.c: $(srcdir)/modules/arch/x86/x86cpu.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86cpu.gperf $@ -x86regtmod.c: $(srcdir)/modules/arch/x86/x86regtmod.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86regtmod.gperf $@ - -lc3bid.c: $(srcdir)/modules/arch/lc3b/lc3bid.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -s -o $@ $(srcdir)/modules/arch/lc3b/lc3bid.re - -#yasm_arch.7: modules/arch/yasm_arch.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/arch/yasm_arch.xml - -#EXTRA_DIST += modules/listfmts/nasm/tests/Makefile.inc - -#include modules/listfmts/nasm/tests/Makefile.inc - -gas-token.c: $(srcdir)/modules/parsers/gas/gas-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/gas/gas-token.re - -nasm-token.c: $(srcdir)/modules/parsers/nasm/nasm-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/nasm/nasm-token.re - -$(top_srcdir)/modules/parsers/nasm/nasm-parser.c: nasm-macros.c - -nasm-macros.c: $(srcdir)/modules/parsers/nasm/nasm-std.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_standard_mac $(srcdir)/modules/parsers/nasm/nasm-std.mac - -#yasm_parsers.7: modules/parsers/yasm_parsers.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/parsers/yasm_parsers.xml - -$(top_srcdir)/modules/preprocs/nasm/nasm-preproc.c: nasm-version.c - -nasm-version.c: version.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_version_mac version.mac - -version.mac: genversion$(EXEEXT) - $(top_builddir)/genversion$(EXEEXT) $@ - -genversion.$(OBJEXT): modules/preprocs/nasm/genversion.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f modules/preprocs/nasm/genversion.c || echo '$(srcdir)/'`modules/preprocs/nasm/genversion.c - -#EXTRA_DIST += modules/dbgfmts/codeview/tests/Makefile.inc -#include modules/dbgfmts/codeview/tests/Makefile.inc - -#yasm_dbgfmts.7: modules/dbgfmts/yasm_dbgfmts.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/dbgfmts/yasm_dbgfmts.xml - -$(top_srcdir)/modules/objfmts/coff/coff-objfmt.c: win64-nasm.c win64-gas.c - -win64-nasm.c: $(srcdir)/modules/objfmts/coff/win64-nasm.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_nasm_stdmac $(srcdir)/modules/objfmts/coff/win64-nasm.mac - -win64-gas.c: $(srcdir)/modules/objfmts/coff/win64-gas.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_gas_stdmac $(srcdir)/modules/objfmts/coff/win64-gas.mac - -#yasm_objfmts.7: modules/objfmts/yasm_objfmts.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/objfmts/yasm_objfmts.xml - -module.c: $(top_srcdir)/libyasm/module.in genmodule$(EXEEXT) Makefile - $(top_builddir)/genmodule$(EXEEXT) $(top_srcdir)/libyasm/module.in Makefile - -genmodule.$(OBJEXT): libyasm/genmodule.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/genmodule.c || echo '$(srcdir)/'`libyasm/genmodule.c - -#yasm.1: frontends/yasm/yasm.xml -# $(XMLTO) -o $(top_builddir) man $(srcdir)/frontends/yasm/yasm.xml - -$(srcdir)/frontends/yasm/yasm.c: license.c - -license.c: $(srcdir)/COPYING genstring$(EXEEXT) - $(top_builddir)/genstring$(EXEEXT) license_msg $@ $(srcdir)/COPYING - -distclean-local: - -rm -rf results - -rm -rf build - -all-local: python-build -install-exec-hook: python-install -uninstall-hook: python-uninstall - -genstring.$(OBJEXT): genstring.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f genstring.c || echo '$(srcdir)/'`genstring.c -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/packager/third_party/yasm/source/config/mac/config.h b/packager/third_party/yasm/source/config/mac/config.h deleted file mode 100644 index f3b43d235a..0000000000 --- a/packager/third_party/yasm/source/config/mac/config.h +++ /dev/null @@ -1,173 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Command name to run C preprocessor */ -#define CPP_PROG "gcc -E" - -/* */ -/* #undef ENABLE_NLS */ - -/* Define to 1 if you have the `abort' function. */ -#define HAVE_ABORT 1 - -/* */ -/* #undef HAVE_CATGETS */ - -/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the - CoreFoundation framework. */ -#define HAVE_CFLOCALECOPYCURRENT 1 - -/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in - the CoreFoundation framework. */ -#define HAVE_CFPREFERENCESCOPYAPPVALUE 1 - -/* Define if the GNU dcgettext() function is already present or preinstalled. - */ -/* #undef HAVE_DCGETTEXT */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DIRECT_H */ - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* */ -/* #undef HAVE_GETTEXT */ - -/* Define to 1 if you have the GNU C Library */ -/* #undef HAVE_GNU_C_LIBRARY */ - -/* Define if you have the iconv() function and it works. */ -#define HAVE_ICONV 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* */ -/* #undef HAVE_LC_MESSAGES */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mergesort' function. */ -#define HAVE_MERGESORT 1 - -/* Define to 1 if you have the `popen' function. */ -#define HAVE_POPEN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* */ -/* #undef HAVE_STPCPY */ - -/* Define to 1 if you have the `strcasecmp' function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the `strcmpi' function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the `stricmp' function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the `strsep' function. */ -#define HAVE_STRSEP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the `toascii' function. */ -#define HAVE_TOASCII 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to 1 if you have the `_stricmp' function. */ -/* #undef HAVE__STRICMP */ - -/* Name of package */ -#define PACKAGE "yasm" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "bug-yasm@tortall.net" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "yasm" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "yasm 1.2.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "yasm" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2.0" - -/* Define to 1 if the C compiler supports function prototypes. */ -#define PROTOTYPES 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void*', as computed by sizeof. */ -/* #undef SIZEOF_VOIDP */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.2.0" - -/* Define if using the dmalloc debugging malloc package */ -/* #undef WITH_DMALLOC */ - -/* Define like PROTOTYPES; this can be used by system headers. */ -#define __PROTOTYPES 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/packager/third_party/yasm/source/config/mac/libyasm-stdint.h b/packager/third_party/yasm/source/config/mac/libyasm-stdint.h deleted file mode 100644 index 851e85b788..0000000000 --- a/packager/third_party/yasm/source/config/mac/libyasm-stdint.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _YASM_LIBYASM_STDINT_H -#define _YASM_LIBYASM_STDINT_H 1 -#ifndef _GENERATED_STDINT_H -#define _GENERATED_STDINT_H "yasm 0.8.0" -/* generated using gcc -std=gnu99 */ -#define _STDINT_HAVE_STDINT_H 1 -#include -#endif -#endif diff --git a/packager/third_party/yasm/source/config/openbsd/Makefile b/packager/third_party/yasm/source/config/openbsd/Makefile deleted file mode 100644 index 6fccce46a9..0000000000 --- a/packager/third_party/yasm/source/config/openbsd/Makefile +++ /dev/null @@ -1,3822 +0,0 @@ -# Makefile.in generated by automake 1.10.1 from Makefile.am. -# Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - -# $Id: Makefile.am 2184 2009-03-24 05:04:15Z peter $ - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# $Id: Makefile.inc 1718 2006-12-24 00:13:19Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1939 2007-09-10 07:15:50Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1948 2007-09-13 02:53:30Z peter $ - -# $Id: Makefile.inc 1951 2007-09-14 05:19:10Z peter $ - -# $Id: Makefile.inc 1598 2006-08-10 04:02:59Z peter $ - -# $Id: Makefile.inc 1914 2007-08-20 05:13:35Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2170 2009-01-14 08:28:13Z peter $ - -# $Id: Makefile.inc 2192 2009-03-29 23:25:05Z peter $ - -# $Id: Makefile.inc 1776 2007-02-19 02:36:10Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 1928 2007-09-07 22:03:34Z peter $ - -# $Id: Makefile.inc 1152 2004-10-02 06:18:30Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1783 2007-02-22 03:40:31Z peter $ - -# $Id: Makefile.inc 2169 2009-01-02 20:46:57Z peter $ - -# $Id$ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2190 2009-03-25 03:40:59Z peter $ - -# $Id: Makefile.inc 1137 2004-09-04 01:24:57Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 1966 2007-09-20 03:54:36Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2185 2009-03-24 06:33:32Z peter $ - -# $Id: Makefile.inc 2172 2009-01-27 06:38:14Z peter $ - -# $Id: Makefile.inc 2176 2009-03-04 07:39:02Z peter $ - -# Makefile for cpp module. -# Copied from raw preprocessor module. - -# $Id: Makefile.inc 1662 2006-10-21 18:52:29Z peter $ - -# $Id: Makefile.inc 1428 2006-03-27 02:15:19Z peter $ - -# $Id: Makefile.inc 1378 2006-02-12 01:27:39Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id$ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 1252 2005-09-28 05:50:51Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 2126 2008-10-03 08:13:00Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 1168 2004-10-31 01:07:52Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1777 2007-02-19 08:21:17Z peter $ - -# $Id: Makefile.inc 1782 2007-02-21 06:45:39Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1854 2007-05-31 06:16:49Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 1331 2006-01-15 22:48:55Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2120 2008-09-04 04:45:30Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2123 2008-09-30 03:56:37Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - - - - -pkgdatadir = $(datadir)/yasm -pkglibdir = $(libdir)/yasm -pkgincludedir = $(includedir)/yasm -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = x86_64-unknown-linux-gnu -host_triplet = x86_64-unknown-linux-gnu -bin_PROGRAMS = yasm$(EXEEXT) ytasm$(EXEEXT) -TESTS = $(am__append_3) modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/xdf/tests/xdf_test.sh bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) libyasm/tests/libyasm_test.sh -noinst_PROGRAMS = genstring$(EXEEXT) re2c$(EXEEXT) genmacro$(EXEEXT) \ - genperf$(EXEEXT) genversion$(EXEEXT) genmodule$(EXEEXT) -check_PROGRAMS = test_hd$(EXEEXT) bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) -DIST_COMMON = README $(am__configure_deps) $(dist_man_MANS) \ - $(include_HEADERS) $(modinclude_HEADERS) $(noinst_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/frontends/Makefile.inc \ - $(srcdir)/frontends/tasm/Makefile.inc \ - $(srcdir)/frontends/yasm/Makefile.inc \ - $(srcdir)/libyasm/Makefile.inc \ - $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/m4/Makefile.inc \ - $(srcdir)/modules/Makefile.inc \ - $(srcdir)/modules/arch/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/Makefile.inc \ - $(srcdir)/modules/dbgfmts/codeview/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/null/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc \ - $(srcdir)/modules/listfmts/Makefile.inc \ - $(srcdir)/modules/listfmts/nasm/Makefile.inc \ - $(srcdir)/modules/objfmts/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/dbg/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc \ - $(srcdir)/modules/parsers/Makefile.inc \ - $(srcdir)/modules/parsers/gas/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc \ - $(srcdir)/modules/preprocs/Makefile.inc \ - $(srcdir)/modules/preprocs/cpp/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/tests/Makefile.inc \ - $(srcdir)/tools/Makefile.inc \ - $(srcdir)/tools/genmacro/Makefile.inc \ - $(srcdir)/tools/genperf/Makefile.inc \ - $(srcdir)/tools/python-yasm/Makefile.inc \ - $(srcdir)/tools/python-yasm/tests/Makefile.inc \ - $(srcdir)/tools/re2c/Makefile.inc $(top_srcdir)/configure \ - ABOUT-NLS AUTHORS COPYING ChangeLog INSTALL NEWS \ - config/config.guess config/config.rpath config/config.sub \ - config/depcomp config/install-sh config/ltmain.sh \ - config/missing -#am__append_1 = _yasm.pxi yasm.pyx \ -# yasm_python.c python-setup.txt \ -# .python-build -#am__append_2 = PYTHON=${PYTHON} -#am__append_3 = tools/python-yasm/tests/python_test.sh -am__append_4 = $(dist_man_MANS) -subdir = . -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_create_stdint_h.m4 \ - $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \ - $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ - $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/nls.m4 \ - $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ - $(top_srcdir)/m4/pyrex.m4 $(top_srcdir)/m4/pythonhead.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \ - "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" \ - "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" \ - "$(DESTDIR)$(includedir)" -libLIBRARIES_INSTALL = $(INSTALL_DATA) -LIBRARIES = $(lib_LIBRARIES) -AR = ar -ARFLAGS = cru -libyasm_a_AR = $(AR) $(ARFLAGS) -libyasm_a_LIBADD = -am_libyasm_a_OBJECTS = x86arch.$(OBJEXT) x86bc.$(OBJEXT) \ - x86expr.$(OBJEXT) x86id.$(OBJEXT) lc3barch.$(OBJEXT) \ - lc3bbc.$(OBJEXT) nasm-listfmt.$(OBJEXT) gas-parser.$(OBJEXT) \ - gas-parse.$(OBJEXT) nasm-parser.$(OBJEXT) nasm-parse.$(OBJEXT) \ - nasm-preproc.$(OBJEXT) nasm-pp.$(OBJEXT) nasmlib.$(OBJEXT) \ - nasm-eval.$(OBJEXT) raw-preproc.$(OBJEXT) \ - cpp-preproc.$(OBJEXT) cv-dbgfmt.$(OBJEXT) cv-symline.$(OBJEXT) \ - cv-type.$(OBJEXT) dwarf2-dbgfmt.$(OBJEXT) \ - dwarf2-line.$(OBJEXT) dwarf2-aranges.$(OBJEXT) \ - dwarf2-info.$(OBJEXT) null-dbgfmt.$(OBJEXT) \ - stabs-dbgfmt.$(OBJEXT) dbg-objfmt.$(OBJEXT) \ - bin-objfmt.$(OBJEXT) elf.$(OBJEXT) elf-objfmt.$(OBJEXT) \ - elf-x86-x86.$(OBJEXT) elf-x86-amd64.$(OBJEXT) \ - coff-objfmt.$(OBJEXT) win64-except.$(OBJEXT) \ - macho-objfmt.$(OBJEXT) rdf-objfmt.$(OBJEXT) \ - xdf-objfmt.$(OBJEXT) assocdat.$(OBJEXT) bitvect.$(OBJEXT) \ - bc-align.$(OBJEXT) bc-data.$(OBJEXT) bc-incbin.$(OBJEXT) \ - bc-org.$(OBJEXT) bc-reserve.$(OBJEXT) bytecode.$(OBJEXT) \ - errwarn.$(OBJEXT) expr.$(OBJEXT) file.$(OBJEXT) \ - floatnum.$(OBJEXT) hamt.$(OBJEXT) insn.$(OBJEXT) \ - intnum.$(OBJEXT) inttree.$(OBJEXT) linemap.$(OBJEXT) \ - md5.$(OBJEXT) mergesort.$(OBJEXT) phash.$(OBJEXT) \ - section.$(OBJEXT) strcasecmp.$(OBJEXT) strsep.$(OBJEXT) \ - symrec.$(OBJEXT) valparam.$(OBJEXT) value.$(OBJEXT) \ - xmalloc.$(OBJEXT) xstrdup.$(OBJEXT) -nodist_libyasm_a_OBJECTS = x86cpu.$(OBJEXT) x86regtmod.$(OBJEXT) \ - lc3bid.$(OBJEXT) gas-token.$(OBJEXT) nasm-token.$(OBJEXT) \ - module.$(OBJEXT) -libyasm_a_OBJECTS = $(am_libyasm_a_OBJECTS) \ - $(nodist_libyasm_a_OBJECTS) -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) -am_bitvect_test_OBJECTS = bitvect_test.$(OBJEXT) -bitvect_test_OBJECTS = $(am_bitvect_test_OBJECTS) -am__DEPENDENCIES_1 = -bitvect_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_combpath_test_OBJECTS = combpath_test.$(OBJEXT) -combpath_test_OBJECTS = $(am_combpath_test_OBJECTS) -combpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_floatnum_test_OBJECTS = floatnum_test.$(OBJEXT) -floatnum_test_OBJECTS = $(am_floatnum_test_OBJECTS) -floatnum_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_genmacro_OBJECTS = -genmacro_OBJECTS = $(am_genmacro_OBJECTS) -genmacro_DEPENDENCIES = genmacro.$(OBJEXT) -am_genmodule_OBJECTS = -genmodule_OBJECTS = $(am_genmodule_OBJECTS) -genmodule_DEPENDENCIES = genmodule.$(OBJEXT) -am_genperf_OBJECTS = -genperf_OBJECTS = $(am_genperf_OBJECTS) -genperf_DEPENDENCIES = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -am_genstring_OBJECTS = -genstring_OBJECTS = $(am_genstring_OBJECTS) -genstring_DEPENDENCIES = genstring.$(OBJEXT) -am_genversion_OBJECTS = -genversion_OBJECTS = $(am_genversion_OBJECTS) -genversion_DEPENDENCIES = genversion.$(OBJEXT) -am_leb128_test_OBJECTS = leb128_test.$(OBJEXT) -leb128_test_OBJECTS = $(am_leb128_test_OBJECTS) -leb128_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_re2c_OBJECTS = -re2c_OBJECTS = $(am_re2c_OBJECTS) -re2c_DEPENDENCIES = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -am_splitpath_test_OBJECTS = splitpath_test.$(OBJEXT) -splitpath_test_OBJECTS = $(am_splitpath_test_OBJECTS) -splitpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_test_hd_OBJECTS = test_hd.$(OBJEXT) -test_hd_OBJECTS = $(am_test_hd_OBJECTS) -test_hd_LDADD = $(LDADD) -am_uncstring_test_OBJECTS = uncstring_test.$(OBJEXT) -uncstring_test_OBJECTS = $(am_uncstring_test_OBJECTS) -uncstring_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_yasm_OBJECTS = yasm.$(OBJEXT) yasm-options.$(OBJEXT) -yasm_OBJECTS = $(am_yasm_OBJECTS) -yasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_ytasm_OBJECTS = tasm.$(OBJEXT) tasm-options.$(OBJEXT) -ytasm_OBJECTS = $(am_ytasm_OBJECTS) -ytasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -depcomp = $(SHELL) $(top_srcdir)/config/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -SOURCES = $(libyasm_a_SOURCES) $(nodist_libyasm_a_SOURCES) \ - $(bitvect_test_SOURCES) $(combpath_test_SOURCES) \ - $(floatnum_test_SOURCES) $(genmacro_SOURCES) \ - $(genmodule_SOURCES) $(genperf_SOURCES) $(genstring_SOURCES) \ - $(genversion_SOURCES) $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -DIST_SOURCES = $(libyasm_a_SOURCES) $(bitvect_test_SOURCES) \ - $(combpath_test_SOURCES) $(floatnum_test_SOURCES) \ - $(genmacro_SOURCES) $(genmodule_SOURCES) $(genperf_SOURCES) \ - $(genstring_SOURCES) $(genversion_SOURCES) \ - $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-dvi-recursive install-exec-recursive \ - install-html-recursive install-info-recursive \ - install-pdf-recursive install-ps-recursive install-recursive \ - installcheck-recursive installdirs-recursive pdf-recursive \ - ps-recursive uninstall-recursive -man1dir = $(mandir)/man1 -man7dir = $(mandir)/man7 -NROFF = nroff -MANS = $(dist_man_MANS) -includeHEADERS_INSTALL = $(INSTALL_HEADER) -modincludeHEADERS_INSTALL = $(INSTALL_HEADER) -nodist_includeHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(include_HEADERS) $(modinclude_HEADERS) \ - $(nodist_include_HEADERS) $(noinst_HEADERS) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - { test ! -d $(distdir) \ - || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr $(distdir); }; } -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -distuninstallcheck_listfiles = find . -type f -print -distcleancheck_listfiles = find . -type f -print -ACLOCAL = ${SHELL} /tmp/yasm/config/missing --run aclocal-1.10 -AMTAR = ${SHELL} /tmp/yasm/config/missing --run tar -ARCH = x86 -AUTOCONF = ${SHELL} /tmp/yasm/config/missing --run autoconf -AUTOHEADER = ${SHELL} /tmp/yasm/config/missing --run autoheader -AUTOMAKE = ${SHELL} /tmp/yasm/config/missing --run automake-1.10 -AWK = gawk -CC = gcc -std=gnu99 -CCDEPMODE = depmode=gcc3 -CCLD_FOR_BUILD = gcc -std=gnu99 -CC_FOR_BUILD = gcc -std=gnu99 -CFLAGS = -g -O2 -CPP = gcc -E -CPPFLAGS = -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -ECHO_C = -ECHO_N = -n -ECHO_T = -EGREP = /bin/grep -E -EXEEXT = -GCC = yes -GMSGFMT = /usr/bin/msgfmt -GMSGFMT_015 = /usr/bin/msgfmt -GREP = /bin/grep -HOST_CC = gcc -std=gnu99 -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTLLIBS = -INTL_MACOSX_LIBS = -LDFLAGS = -LIBICONV = -liconv -LIBINTL = -LIBOBJS = -LIBS = -LN_S = ln -s -LTLIBICONV = -liconv -LTLIBINTL = -LTLIBOBJS = -MAINT = -MAKEINFO = ${SHELL} /tmp/yasm/config/missing --run makeinfo -MKDIR_P = /bin/mkdir -p -MORE_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter -MSGFMT = /usr/bin/msgfmt -MSGFMT_015 = /usr/bin/msgfmt -MSGMERGE = /usr/bin/msgmerge -OBJEXT = o -PACKAGE = yasm -PACKAGE_BUGREPORT = bug-yasm@tortall.net -PACKAGE_NAME = yasm -PACKAGE_STRING = yasm HEAD -PACKAGE_TARNAME = yasm -PACKAGE_VERSION = HEAD -PATH_SEPARATOR = : -POSUB = po -PYTHON = /usr/bin/python -PYTHON_EXEC_PREFIX = ${exec_prefix} -PYTHON_INCLUDES = -PYTHON_PLATFORM = linux2 -PYTHON_PREFIX = ${prefix} -PYTHON_VERSION = 2.5 -RANLIB = ranlib -SET_MAKE = -SHELL = /bin/sh -STRIP = -USE_NLS = yes -VERSION = HEAD -XGETTEXT = /usr/bin/xgettext -XGETTEXT_015 = /usr/bin/xgettext -XMLTO = xmlto -abs_builddir = /tmp/yasm -abs_srcdir = /tmp/yasm -abs_top_builddir = /tmp/yasm -abs_top_srcdir = /tmp/yasm -ac_ct_CC = gcc -am__include = include -am__leading_dot = . -am__quote = -am__tar = ${AMTAR} chof - "$$tardir" -am__untar = ${AMTAR} xf - -bindir = ${exec_prefix}/bin -build = x86_64-unknown-linux-gnu -build_alias = -build_cpu = x86_64 -build_os = linux-gnu -build_vendor = unknown -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} -dvidir = ${docdir} -exec_prefix = ${prefix} -host = x86_64-unknown-linux-gnu -host_alias = -host_cpu = x86_64 -host_os = linux-gnu -host_vendor = unknown -htmldir = ${docdir} -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /tmp/yasm/config/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mkdir_p = /bin/mkdir -p -oldincludedir = /usr/include -pdfdir = ${docdir} -pkgpyexecdir = ${pyexecdir}/yasm -pkgpythondir = ${pythondir}/yasm -prefix = /usr/local -program_transform_name = s,x,x, -psdir = ${docdir} -pyexecdir = ${exec_prefix}/lib/python2.5/site-packages -pythondir = ${prefix}/lib/python2.5/site-packages -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = . -sysconfdir = ${prefix}/etc -target_alias = -top_builddir = . -top_srcdir = . -SUBDIRS = po . -AM_YFLAGS = -d -AM_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter - -#!include modules/objfmts/omf/Makefile.inc -dist_man_MANS = yasm_arch.7 yasm_parsers.7 yasm_dbgfmts.7 \ - yasm_objfmts.7 yasm.1 -TESTS_ENVIRONMENT = $(am__append_2) -test_hd_SOURCES = test_hd.c -include_HEADERS = libyasm.h -nodist_include_HEADERS = libyasm-stdint.h -noinst_HEADERS = util.h -BUILT_SOURCES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - x86insn_nasm.c x86insn_gas.c gas-token.c nasm-token.c \ - nasm-macros.c nasm-version.c version.mac win64-nasm.c \ - win64-gas.c license.c -MAINTAINERCLEANFILES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - $(am__append_4) - -# Until this gets fixed in automake -DISTCLEANFILES = libyasm/stamp-h libyasm/stamp-h[0-9]* - -# Suffix rule for genperf -SUFFIXES = .gperf - -# configure.lineno doesn't clean up after itself? -CLEANFILES = configure.lineno $(am__append_1) x86insn_nasm.c \ - x86insn_gas.c x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c nasm-macros.c nasm-version.c version.mac \ - win64-nasm.c win64-gas.c module.c license.c - -# automake doesn't distribute mkinstalldirs? -#!EXTRA_DIST += modules/objfmts/omf/Makefile.inc -EXTRA_DIST = config/config.rpath config/mkinstalldirs \ - tools/Makefile.inc libyasm/Makefile.inc modules/Makefile.inc \ - frontends/Makefile.inc tools/re2c/Makefile.inc \ - tools/genmacro/Makefile.inc tools/genperf/Makefile.inc \ - tools/python-yasm/Makefile.inc tools/re2c/main.c \ - tools/re2c/basics.h tools/re2c/globals.h tools/re2c/ins.h \ - tools/re2c/re.h tools/re2c/token.h tools/re2c/code.c \ - tools/re2c/dfa.h tools/re2c/dfa.c tools/re2c/parse.h \ - tools/re2c/parser.h tools/re2c/parser.c tools/re2c/actions.c \ - tools/re2c/scanner.h tools/re2c/scanner.c \ - tools/re2c/mbo_getopt.h tools/re2c/mbo_getopt.c \ - tools/re2c/substr.h tools/re2c/substr.c tools/re2c/translate.c \ - tools/re2c/CHANGELOG tools/re2c/NO_WARRANTY tools/re2c/README \ - tools/re2c/scanner.re tools/re2c/re2c.1 \ - tools/re2c/bootstrap/scanner.c tools/re2c/doc/loplas.ps.gz \ - tools/re2c/doc/sample.bib tools/re2c/examples/basemmap.c \ - tools/re2c/examples/c.re tools/re2c/examples/cmmap.re \ - tools/re2c/examples/cnokw.re tools/re2c/examples/cunroll.re \ - tools/re2c/examples/modula.re tools/re2c/examples/repeater.re \ - tools/re2c/examples/sample.re tools/re2c/examples/simple.re \ - tools/re2c/examples/rexx/README \ - tools/re2c/examples/rexx/rexx.l \ - tools/re2c/examples/rexx/scanio.c tools/genmacro/genmacro.c \ - tools/genperf/genperf.c tools/genperf/perfect.c \ - tools/genperf/perfect.h tools/genperf/standard.h \ - tools/python-yasm/pyxelator/cparse.py \ - tools/python-yasm/pyxelator/genpyx.py \ - tools/python-yasm/pyxelator/ir.py \ - tools/python-yasm/pyxelator/lexer.py \ - tools/python-yasm/pyxelator/node.py \ - tools/python-yasm/pyxelator/parse_core.py \ - tools/python-yasm/pyxelator/work_unit.py \ - tools/python-yasm/pyxelator/wrap_yasm.py \ - tools/python-yasm/setup.py tools/python-yasm/yasm.pyx \ - $(PYBINDING_DEPS) tools/python-yasm/tests/Makefile.inc \ - tools/python-yasm/tests/python_test.sh \ - tools/python-yasm/tests/__init__.py \ - tools/python-yasm/tests/test_bytecode.py \ - tools/python-yasm/tests/test_expr.py \ - tools/python-yasm/tests/test_intnum.py \ - tools/python-yasm/tests/test_symrec.py \ - modules/arch/Makefile.inc modules/listfmts/Makefile.inc \ - modules/parsers/Makefile.inc modules/preprocs/Makefile.inc \ - modules/objfmts/Makefile.inc modules/arch/x86/Makefile.inc \ - modules/arch/lc3b/Makefile.inc \ - modules/arch/x86/gen_x86_insn.py x86insns.c x86insn_nasm.gperf \ - x86insn_gas.gperf modules/arch/x86/x86cpu.gperf \ - modules/arch/x86/x86regtmod.gperf \ - modules/arch/x86/tests/Makefile.inc \ - modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gen-fma-test.py \ - modules/arch/x86/tests/addbyte.asm \ - modules/arch/x86/tests/addbyte.errwarn \ - modules/arch/x86/tests/addbyte.hex \ - modules/arch/x86/tests/addrop.asm \ - modules/arch/x86/tests/addrop.errwarn \ - modules/arch/x86/tests/addrop.hex \ - modules/arch/x86/tests/addrop-err.asm \ - modules/arch/x86/tests/addrop-err.errwarn \ - modules/arch/x86/tests/aes.asm modules/arch/x86/tests/aes.hex \ - modules/arch/x86/tests/amd200707.asm \ - modules/arch/x86/tests/amd200707.hex \ - modules/arch/x86/tests/arithsmall.asm \ - modules/arch/x86/tests/arithsmall.errwarn \ - modules/arch/x86/tests/arithsmall.hex \ - modules/arch/x86/tests/avx.asm modules/arch/x86/tests/avx.hex \ - modules/arch/x86/tests/avxcc.asm \ - modules/arch/x86/tests/avxcc.hex \ - modules/arch/x86/tests/bittest.asm \ - modules/arch/x86/tests/bittest.hex \ - modules/arch/x86/tests/bswap64.asm \ - modules/arch/x86/tests/bswap64.hex \ - modules/arch/x86/tests/clmul.asm \ - modules/arch/x86/tests/clmul.hex \ - modules/arch/x86/tests/cmpxchg.asm \ - modules/arch/x86/tests/cmpxchg.hex \ - modules/arch/x86/tests/cpubasic-err.asm \ - modules/arch/x86/tests/cpubasic-err.errwarn \ - modules/arch/x86/tests/cyrix.asm \ - modules/arch/x86/tests/cyrix.hex \ - modules/arch/x86/tests/div-err.asm \ - modules/arch/x86/tests/div-err.errwarn \ - modules/arch/x86/tests/ea-nonzero.asm \ - modules/arch/x86/tests/ea-nonzero.hex \ - modules/arch/x86/tests/ea-over.asm \ - modules/arch/x86/tests/ea-over.errwarn \ - modules/arch/x86/tests/ea-over.hex \ - modules/arch/x86/tests/ea-warn.asm \ - modules/arch/x86/tests/ea-warn.errwarn \ - modules/arch/x86/tests/ea-warn.hex \ - modules/arch/x86/tests/ebpindex.asm \ - modules/arch/x86/tests/ebpindex.hex \ - modules/arch/x86/tests/effaddr.asm \ - modules/arch/x86/tests/effaddr.hex \ - modules/arch/x86/tests/enter.asm \ - modules/arch/x86/tests/enter.errwarn \ - modules/arch/x86/tests/enter.hex \ - modules/arch/x86/tests/far64.asm \ - modules/arch/x86/tests/far64.hex \ - modules/arch/x86/tests/farbasic.asm \ - modules/arch/x86/tests/farbasic.hex \ - modules/arch/x86/tests/farithr.asm \ - modules/arch/x86/tests/farithr.hex \ - modules/arch/x86/tests/fcmov.asm \ - modules/arch/x86/tests/fcmov.hex \ - modules/arch/x86/tests/fma.asm modules/arch/x86/tests/fma.hex \ - modules/arch/x86/tests/fwdequ64.asm \ - modules/arch/x86/tests/fwdequ64.hex \ - modules/arch/x86/tests/genopcode.asm \ - modules/arch/x86/tests/genopcode.hex \ - modules/arch/x86/tests/imm64.asm \ - modules/arch/x86/tests/imm64.errwarn \ - modules/arch/x86/tests/imm64.hex \ - modules/arch/x86/tests/iret.asm \ - modules/arch/x86/tests/iret.hex \ - modules/arch/x86/tests/jmp64-1.asm \ - modules/arch/x86/tests/jmp64-1.hex \ - modules/arch/x86/tests/jmp64-2.asm \ - modules/arch/x86/tests/jmp64-2.hex \ - modules/arch/x86/tests/jmp64-3.asm \ - modules/arch/x86/tests/jmp64-3.hex \ - modules/arch/x86/tests/jmp64-4.asm \ - modules/arch/x86/tests/jmp64-4.hex \ - modules/arch/x86/tests/jmp64-5.asm \ - modules/arch/x86/tests/jmp64-5.hex \ - modules/arch/x86/tests/jmp64-6.asm \ - modules/arch/x86/tests/jmp64-6.hex \ - modules/arch/x86/tests/jmpfar.asm \ - modules/arch/x86/tests/jmpfar.hex \ - modules/arch/x86/tests/lds.asm modules/arch/x86/tests/lds.hex \ - modules/arch/x86/tests/loopadsz.asm \ - modules/arch/x86/tests/loopadsz.hex \ - modules/arch/x86/tests/lsahf.asm \ - modules/arch/x86/tests/lsahf.hex \ - modules/arch/x86/tests/mem64-err.asm \ - modules/arch/x86/tests/mem64-err.errwarn \ - modules/arch/x86/tests/mem64.asm \ - modules/arch/x86/tests/mem64.errwarn \ - modules/arch/x86/tests/mem64.hex \ - modules/arch/x86/tests/mem64hi32.asm \ - modules/arch/x86/tests/mem64hi32.hex \ - modules/arch/x86/tests/mem64rip.asm \ - modules/arch/x86/tests/mem64rip.hex \ - modules/arch/x86/tests/mixcase.asm \ - modules/arch/x86/tests/mixcase.hex \ - modules/arch/x86/tests/movbe.asm \ - modules/arch/x86/tests/movbe.hex \ - modules/arch/x86/tests/movdq32.asm \ - modules/arch/x86/tests/movdq32.hex \ - modules/arch/x86/tests/movdq64.asm \ - modules/arch/x86/tests/movdq64.hex \ - modules/arch/x86/tests/negequ.asm \ - modules/arch/x86/tests/negequ.hex \ - modules/arch/x86/tests/nomem64-err.asm \ - modules/arch/x86/tests/nomem64-err.errwarn \ - modules/arch/x86/tests/nomem64-err2.asm \ - modules/arch/x86/tests/nomem64-err2.errwarn \ - modules/arch/x86/tests/nomem64.asm \ - modules/arch/x86/tests/nomem64.errwarn \ - modules/arch/x86/tests/nomem64.hex \ - modules/arch/x86/tests/o64.asm modules/arch/x86/tests/o64.hex \ - modules/arch/x86/tests/o64loop.asm \ - modules/arch/x86/tests/o64loop.errwarn \ - modules/arch/x86/tests/o64loop.hex \ - modules/arch/x86/tests/opersize.asm \ - modules/arch/x86/tests/opersize.hex \ - modules/arch/x86/tests/opsize-err.asm \ - modules/arch/x86/tests/opsize-err.errwarn \ - modules/arch/x86/tests/overflow.asm \ - modules/arch/x86/tests/overflow.errwarn \ - modules/arch/x86/tests/overflow.hex \ - modules/arch/x86/tests/padlock.asm \ - modules/arch/x86/tests/padlock.hex \ - modules/arch/x86/tests/pshift.asm \ - modules/arch/x86/tests/pshift.hex \ - modules/arch/x86/tests/push64.asm \ - modules/arch/x86/tests/push64.errwarn \ - modules/arch/x86/tests/push64.hex \ - modules/arch/x86/tests/pushf.asm \ - modules/arch/x86/tests/pushf.hex \ - modules/arch/x86/tests/pushf-err.asm \ - modules/arch/x86/tests/pushf-err.errwarn \ - modules/arch/x86/tests/pushnosize.asm \ - modules/arch/x86/tests/pushnosize.errwarn \ - modules/arch/x86/tests/pushnosize.hex \ - modules/arch/x86/tests/rep.asm modules/arch/x86/tests/rep.hex \ - modules/arch/x86/tests/ret.asm modules/arch/x86/tests/ret.hex \ - modules/arch/x86/tests/riprel1.asm \ - modules/arch/x86/tests/riprel1.hex \ - modules/arch/x86/tests/riprel2.asm \ - modules/arch/x86/tests/riprel2.errwarn \ - modules/arch/x86/tests/riprel2.hex \ - modules/arch/x86/tests/ripseg.asm \ - modules/arch/x86/tests/ripseg.errwarn \ - modules/arch/x86/tests/ripseg.hex \ - modules/arch/x86/tests/segmov.asm \ - modules/arch/x86/tests/segmov.hex \ - modules/arch/x86/tests/segoff.asm \ - modules/arch/x86/tests/segoff.hex \ - modules/arch/x86/tests/segoff-err.asm \ - modules/arch/x86/tests/segoff-err.errwarn \ - modules/arch/x86/tests/shift.asm \ - modules/arch/x86/tests/shift.hex \ - modules/arch/x86/tests/simd-1.asm \ - modules/arch/x86/tests/simd-1.hex \ - modules/arch/x86/tests/simd-2.asm \ - modules/arch/x86/tests/simd-2.hex \ - modules/arch/x86/tests/simd64-1.asm \ - modules/arch/x86/tests/simd64-1.hex \ - modules/arch/x86/tests/simd64-2.asm \ - modules/arch/x86/tests/simd64-2.hex \ - modules/arch/x86/tests/sse-prefix.asm \ - modules/arch/x86/tests/sse-prefix.hex \ - modules/arch/x86/tests/sse3.asm \ - modules/arch/x86/tests/sse3.hex \ - modules/arch/x86/tests/sse4.asm \ - modules/arch/x86/tests/sse4.hex \ - modules/arch/x86/tests/sse4-err.asm \ - modules/arch/x86/tests/sse4-err.errwarn \ - modules/arch/x86/tests/sse5-all.asm \ - modules/arch/x86/tests/sse5-all.hex \ - modules/arch/x86/tests/sse5-basic.asm \ - modules/arch/x86/tests/sse5-basic.hex \ - modules/arch/x86/tests/sse5-cc.asm \ - modules/arch/x86/tests/sse5-cc.hex \ - modules/arch/x86/tests/sse5-err.asm \ - modules/arch/x86/tests/sse5-err.errwarn \ - modules/arch/x86/tests/ssewidth.asm \ - modules/arch/x86/tests/ssewidth.hex \ - modules/arch/x86/tests/ssse3.asm \ - modules/arch/x86/tests/ssse3.c \ - modules/arch/x86/tests/ssse3.hex \ - modules/arch/x86/tests/stos.asm \ - modules/arch/x86/tests/stos.hex modules/arch/x86/tests/str.asm \ - modules/arch/x86/tests/str.hex \ - modules/arch/x86/tests/strict.asm \ - modules/arch/x86/tests/strict.errwarn \ - modules/arch/x86/tests/strict.hex \ - modules/arch/x86/tests/strict-err.asm \ - modules/arch/x86/tests/strict-err.errwarn \ - modules/arch/x86/tests/stringseg.asm \ - modules/arch/x86/tests/stringseg.errwarn \ - modules/arch/x86/tests/stringseg.hex \ - modules/arch/x86/tests/svm.asm modules/arch/x86/tests/svm.hex \ - modules/arch/x86/tests/twobytemem.asm \ - modules/arch/x86/tests/twobytemem.errwarn \ - modules/arch/x86/tests/twobytemem.hex \ - modules/arch/x86/tests/vmx.asm modules/arch/x86/tests/vmx.hex \ - modules/arch/x86/tests/vmx-err.asm \ - modules/arch/x86/tests/vmx-err.errwarn \ - modules/arch/x86/tests/x86label.asm \ - modules/arch/x86/tests/x86label.hex \ - modules/arch/x86/tests/xchg64.asm \ - modules/arch/x86/tests/xchg64.hex \ - modules/arch/x86/tests/xmm64.asm \ - modules/arch/x86/tests/xmm64.hex \ - modules/arch/x86/tests/xsave.asm \ - modules/arch/x86/tests/xsave.hex \ - modules/arch/x86/tests/gas32/Makefile.inc \ - modules/arch/x86/tests/gas64/Makefile.inc \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas32/align32.asm \ - modules/arch/x86/tests/gas32/align32.hex \ - modules/arch/x86/tests/gas32/gas-farithr.asm \ - modules/arch/x86/tests/gas32/gas-farithr.hex \ - modules/arch/x86/tests/gas32/gas-fpmem.asm \ - modules/arch/x86/tests/gas32/gas-fpmem.hex \ - modules/arch/x86/tests/gas32/gas-movdq32.asm \ - modules/arch/x86/tests/gas32/gas-movdq32.hex \ - modules/arch/x86/tests/gas32/gas-movsd.asm \ - modules/arch/x86/tests/gas32/gas-movsd.hex \ - modules/arch/x86/tests/gas32/gas32-jmpcall.asm \ - modules/arch/x86/tests/gas32/gas32-jmpcall.hex \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/x86/tests/gas64/align64.asm \ - modules/arch/x86/tests/gas64/align64.hex \ - modules/arch/x86/tests/gas64/gas-cbw.asm \ - modules/arch/x86/tests/gas64/gas-cbw.hex \ - modules/arch/x86/tests/gas64/gas-fp.asm \ - modules/arch/x86/tests/gas64/gas-fp.hex \ - modules/arch/x86/tests/gas64/gas-inout.asm \ - modules/arch/x86/tests/gas64/gas-inout.hex \ - modules/arch/x86/tests/gas64/gas-moreinsn.asm \ - modules/arch/x86/tests/gas64/gas-moreinsn.hex \ - modules/arch/x86/tests/gas64/gas-movabs.asm \ - modules/arch/x86/tests/gas64/gas-movabs.hex \ - modules/arch/x86/tests/gas64/gas-movdq64.asm \ - modules/arch/x86/tests/gas64/gas-movdq64.hex \ - modules/arch/x86/tests/gas64/gas-movsxs.asm \ - modules/arch/x86/tests/gas64/gas-movsxs.hex \ - modules/arch/x86/tests/gas64/gas-muldiv.asm \ - modules/arch/x86/tests/gas64/gas-muldiv.hex \ - modules/arch/x86/tests/gas64/gas-prefix.asm \ - modules/arch/x86/tests/gas64/gas-prefix.errwarn \ - modules/arch/x86/tests/gas64/gas-prefix.hex \ - modules/arch/x86/tests/gas64/gas-retenter.asm \ - modules/arch/x86/tests/gas64/gas-retenter.hex \ - modules/arch/x86/tests/gas64/gas-shift.asm \ - modules/arch/x86/tests/gas64/gas-shift.hex \ - modules/arch/x86/tests/gas64/gas64-jmpcall.asm \ - modules/arch/x86/tests/gas64/gas64-jmpcall.hex \ - modules/arch/x86/tests/gas64/riprel.asm \ - modules/arch/x86/tests/gas64/riprel.hex \ - modules/arch/lc3b/tests/Makefile.inc \ - modules/arch/lc3b/lc3bid.re \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/arch/lc3b/tests/lc3b-basic.asm \ - modules/arch/lc3b/tests/lc3b-basic.errwarn \ - modules/arch/lc3b/tests/lc3b-basic.hex \ - modules/arch/lc3b/tests/lc3b-br.asm \ - modules/arch/lc3b/tests/lc3b-br.hex \ - modules/arch/lc3b/tests/lc3b-ea-err.asm \ - modules/arch/lc3b/tests/lc3b-ea-err.errwarn \ - modules/arch/lc3b/tests/lc3b-mp22NC.asm \ - modules/arch/lc3b/tests/lc3b-mp22NC.hex \ - modules/arch/yasm_arch.xml modules/listfmts/nasm/Makefile.inc \ - modules/parsers/gas/Makefile.inc \ - modules/parsers/nasm/Makefile.inc \ - modules/parsers/gas/tests/Makefile.inc \ - modules/parsers/gas/gas-token.re \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/dataref-imm.asm \ - modules/parsers/gas/tests/dataref-imm.hex \ - modules/parsers/gas/tests/datavis.asm \ - modules/parsers/gas/tests/datavis.errwarn \ - modules/parsers/gas/tests/datavis.hex \ - modules/parsers/gas/tests/datavis2.asm \ - modules/parsers/gas/tests/datavis2.hex \ - modules/parsers/gas/tests/execsect.asm \ - modules/parsers/gas/tests/execsect.hex \ - modules/parsers/gas/tests/gas-fill.asm \ - modules/parsers/gas/tests/gas-fill.hex \ - modules/parsers/gas/tests/gas-float.asm \ - modules/parsers/gas/tests/gas-float.hex \ - modules/parsers/gas/tests/gas-instlabel.asm \ - modules/parsers/gas/tests/gas-instlabel.hex \ - modules/parsers/gas/tests/gas-line-err.asm \ - modules/parsers/gas/tests/gas-line-err.errwarn \ - modules/parsers/gas/tests/gas-line2-err.asm \ - modules/parsers/gas/tests/gas-line2-err.errwarn \ - modules/parsers/gas/tests/gas-push.asm \ - modules/parsers/gas/tests/gas-push.hex \ - modules/parsers/gas/tests/gas-segprefix.asm \ - modules/parsers/gas/tests/gas-segprefix.hex \ - modules/parsers/gas/tests/gas-semi.asm \ - modules/parsers/gas/tests/gas-semi.hex \ - modules/parsers/gas/tests/gassectalign.asm \ - modules/parsers/gas/tests/gassectalign.hex \ - modules/parsers/gas/tests/jmpcall.asm \ - modules/parsers/gas/tests/jmpcall.errwarn \ - modules/parsers/gas/tests/jmpcall.hex \ - modules/parsers/gas/tests/leb128.asm \ - modules/parsers/gas/tests/leb128.hex \ - modules/parsers/gas/tests/localcomm.asm \ - modules/parsers/gas/tests/localcomm.hex \ - modules/parsers/gas/tests/reggroup-err.asm \ - modules/parsers/gas/tests/reggroup-err.errwarn \ - modules/parsers/gas/tests/reggroup.asm \ - modules/parsers/gas/tests/reggroup.hex \ - modules/parsers/gas/tests/strzero.asm \ - modules/parsers/gas/tests/strzero.hex \ - modules/parsers/gas/tests/varinsn.asm \ - modules/parsers/gas/tests/varinsn.hex \ - modules/parsers/gas/tests/bin/Makefile.inc \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/gas/tests/bin/gas-comment.asm \ - modules/parsers/gas/tests/bin/gas-comment.errwarn \ - modules/parsers/gas/tests/bin/gas-comment.hex \ - modules/parsers/gas/tests/bin/gas-llabel.asm \ - modules/parsers/gas/tests/bin/gas-llabel.hex \ - modules/parsers/gas/tests/bin/gas-set.asm \ - modules/parsers/gas/tests/bin/gas-set.hex \ - modules/parsers/gas/tests/bin/rept-err.asm \ - modules/parsers/gas/tests/bin/rept-err.errwarn \ - modules/parsers/gas/tests/bin/reptempty.asm \ - modules/parsers/gas/tests/bin/reptempty.hex \ - modules/parsers/gas/tests/bin/reptlong.asm \ - modules/parsers/gas/tests/bin/reptlong.hex \ - modules/parsers/gas/tests/bin/reptnested-err.asm \ - modules/parsers/gas/tests/bin/reptnested-err.errwarn \ - modules/parsers/gas/tests/bin/reptsimple.asm \ - modules/parsers/gas/tests/bin/reptsimple.hex \ - modules/parsers/gas/tests/bin/reptwarn.asm \ - modules/parsers/gas/tests/bin/reptwarn.errwarn \ - modules/parsers/gas/tests/bin/reptwarn.hex \ - modules/parsers/gas/tests/bin/reptzero.asm \ - modules/parsers/gas/tests/bin/reptzero.hex \ - modules/parsers/nasm/nasm-token.re \ - modules/parsers/nasm/nasm-std.mac \ - modules/parsers/nasm/tests/Makefile.inc \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/alignnop16.asm \ - modules/parsers/nasm/tests/alignnop16.hex \ - modules/parsers/nasm/tests/alignnop32.asm \ - modules/parsers/nasm/tests/alignnop32.hex \ - modules/parsers/nasm/tests/charconstmath.asm \ - modules/parsers/nasm/tests/charconstmath.hex \ - modules/parsers/nasm/tests/dy.asm \ - modules/parsers/nasm/tests/dy.hex \ - modules/parsers/nasm/tests/endcomma.asm \ - modules/parsers/nasm/tests/endcomma.hex \ - modules/parsers/nasm/tests/equcolon.asm \ - modules/parsers/nasm/tests/equcolon.hex \ - modules/parsers/nasm/tests/equlocal.asm \ - modules/parsers/nasm/tests/equlocal.hex \ - modules/parsers/nasm/tests/hexconst.asm \ - modules/parsers/nasm/tests/hexconst.hex \ - modules/parsers/nasm/tests/long.asm \ - modules/parsers/nasm/tests/long.hex \ - modules/parsers/nasm/tests/locallabel.asm \ - modules/parsers/nasm/tests/locallabel.hex \ - modules/parsers/nasm/tests/locallabel2.asm \ - modules/parsers/nasm/tests/locallabel2.hex \ - modules/parsers/nasm/tests/nasm-prefix.asm \ - modules/parsers/nasm/tests/nasm-prefix.hex \ - modules/parsers/nasm/tests/newsect.asm \ - modules/parsers/nasm/tests/newsect.hex \ - modules/parsers/nasm/tests/orphannowarn.asm \ - modules/parsers/nasm/tests/orphannowarn.hex \ - modules/parsers/nasm/tests/prevlocalwarn.asm \ - modules/parsers/nasm/tests/prevlocalwarn.errwarn \ - modules/parsers/nasm/tests/prevlocalwarn.hex \ - modules/parsers/nasm/tests/strucalign.asm \ - modules/parsers/nasm/tests/strucalign.hex \ - modules/parsers/nasm/tests/struczero.asm \ - modules/parsers/nasm/tests/struczero.hex \ - modules/parsers/nasm/tests/syntax-err.asm \ - modules/parsers/nasm/tests/syntax-err.errwarn \ - modules/parsers/nasm/tests/uscore.asm \ - modules/parsers/nasm/tests/uscore.hex \ - modules/parsers/nasm/tests/worphan/Makefile.inc \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/nasm/tests/worphan/orphanwarn.asm \ - modules/parsers/nasm/tests/worphan/orphanwarn.errwarn \ - modules/parsers/nasm/tests/worphan/orphanwarn.hex \ - modules/parsers/tasm/tests/Makefile.inc \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/array.asm \ - modules/parsers/tasm/tests/array.hex \ - modules/parsers/tasm/tests/case.asm \ - modules/parsers/tasm/tests/case.hex \ - modules/parsers/tasm/tests/charstr.asm \ - modules/parsers/tasm/tests/charstr.hex \ - modules/parsers/tasm/tests/dup.asm \ - modules/parsers/tasm/tests/dup.hex \ - modules/parsers/tasm/tests/equal.asm \ - modules/parsers/tasm/tests/equal.hex \ - modules/parsers/tasm/tests/expr.asm \ - modules/parsers/tasm/tests/expr.hex \ - modules/parsers/tasm/tests/irp.asm \ - modules/parsers/tasm/tests/irp.hex \ - modules/parsers/tasm/tests/label.asm \ - modules/parsers/tasm/tests/label.hex \ - modules/parsers/tasm/tests/les.asm \ - modules/parsers/tasm/tests/les.hex \ - modules/parsers/tasm/tests/lidt.asm \ - modules/parsers/tasm/tests/lidt.hex \ - modules/parsers/tasm/tests/macro.asm \ - modules/parsers/tasm/tests/macro.hex \ - modules/parsers/tasm/tests/offset.asm \ - modules/parsers/tasm/tests/offset.hex \ - modules/parsers/tasm/tests/quote.asm \ - modules/parsers/tasm/tests/quote.hex \ - modules/parsers/tasm/tests/res.asm \ - modules/parsers/tasm/tests/res.errwarn \ - modules/parsers/tasm/tests/res.hex \ - modules/parsers/tasm/tests/segment.asm \ - modules/parsers/tasm/tests/segment.hex \ - modules/parsers/tasm/tests/size.asm \ - modules/parsers/tasm/tests/size.hex \ - modules/parsers/tasm/tests/struc.asm \ - modules/parsers/tasm/tests/struc.errwarn \ - modules/parsers/tasm/tests/struc.hex \ - modules/parsers/tasm/tests/exe/Makefile.inc \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/parsers/tasm/tests/exe/exe.asm \ - modules/parsers/tasm/tests/exe/exe.hex \ - modules/parsers/yasm_parsers.xml \ - modules/preprocs/nasm/Makefile.inc \ - modules/preprocs/raw/Makefile.inc \ - modules/preprocs/cpp/Makefile.inc \ - modules/preprocs/nasm/genversion.c \ - modules/preprocs/nasm/tests/Makefile.inc \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/nasm/tests/16args.asm \ - modules/preprocs/nasm/tests/16args.hex \ - modules/preprocs/nasm/tests/ifcritical-err.asm \ - modules/preprocs/nasm/tests/ifcritical-err.errwarn \ - modules/preprocs/nasm/tests/longline.asm \ - modules/preprocs/nasm/tests/longline.hex \ - modules/preprocs/nasm/tests/macroeof-err.asm \ - modules/preprocs/nasm/tests/macroeof-err.errwarn \ - modules/preprocs/nasm/tests/noinclude-err.asm \ - modules/preprocs/nasm/tests/noinclude-err.errwarn \ - modules/preprocs/nasm/tests/nasmpp-bigint.asm \ - modules/preprocs/nasm/tests/nasmpp-bigint.hex \ - modules/preprocs/nasm/tests/nasmpp-decimal.asm \ - modules/preprocs/nasm/tests/nasmpp-decimal.hex \ - modules/preprocs/nasm/tests/nasmpp-nested.asm \ - modules/preprocs/nasm/tests/nasmpp-nested.errwarn \ - modules/preprocs/nasm/tests/nasmpp-nested.hex \ - modules/preprocs/nasm/tests/orgsect.asm \ - modules/preprocs/nasm/tests/orgsect.hex \ - modules/preprocs/raw/tests/Makefile.inc \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/preprocs/raw/tests/longline.asm \ - modules/preprocs/raw/tests/longline.hex \ - modules/dbgfmts/codeview/Makefile.inc \ - modules/dbgfmts/dwarf2/Makefile.inc \ - modules/dbgfmts/null/Makefile.inc \ - modules/dbgfmts/stabs/Makefile.inc \ - modules/dbgfmts/codeview/cv8.txt \ - modules/dbgfmts/dwarf2/tests/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.errwarn \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.hex \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.asm \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.errwarn \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.hex \ - modules/dbgfmts/stabs/tests/Makefile.inc \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/dbgfmts/stabs/tests/stabs-elf.asm \ - modules/dbgfmts/stabs/tests/stabs-elf.hex \ - modules/dbgfmts/yasm_dbgfmts.xml \ - modules/objfmts/dbg/Makefile.inc \ - modules/objfmts/bin/Makefile.inc \ - modules/objfmts/elf/Makefile.inc \ - modules/objfmts/coff/Makefile.inc \ - modules/objfmts/macho/Makefile.inc \ - modules/objfmts/rdf/Makefile.inc \ - modules/objfmts/win32/Makefile.inc \ - modules/objfmts/win64/Makefile.inc \ - modules/objfmts/xdf/Makefile.inc \ - modules/objfmts/bin/tests/Makefile.inc \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/abs.asm \ - modules/objfmts/bin/tests/abs.hex \ - modules/objfmts/bin/tests/bigorg.asm \ - modules/objfmts/bin/tests/bigorg.hex \ - modules/objfmts/bin/tests/bigorg.errwarn \ - modules/objfmts/bin/tests/bin-farabs.asm \ - modules/objfmts/bin/tests/bin-farabs.hex \ - modules/objfmts/bin/tests/bin-rip.asm \ - modules/objfmts/bin/tests/bin-rip.hex \ - modules/objfmts/bin/tests/bintest.asm \ - modules/objfmts/bin/tests/bintest.hex \ - modules/objfmts/bin/tests/float-err.asm \ - modules/objfmts/bin/tests/float-err.errwarn \ - modules/objfmts/bin/tests/float.asm \ - modules/objfmts/bin/tests/float.hex \ - modules/objfmts/bin/tests/integer-warn.asm \ - modules/objfmts/bin/tests/integer-warn.hex \ - modules/objfmts/bin/tests/integer-warn.errwarn \ - modules/objfmts/bin/tests/integer.asm \ - modules/objfmts/bin/tests/integer.hex \ - modules/objfmts/bin/tests/levelop.asm \ - modules/objfmts/bin/tests/levelop.hex \ - modules/objfmts/bin/tests/reserve.asm \ - modules/objfmts/bin/tests/reserve.hex \ - modules/objfmts/bin/tests/reserve.errwarn \ - modules/objfmts/bin/tests/shr.asm \ - modules/objfmts/bin/tests/shr.hex \ - modules/objfmts/bin/tests/multisect/Makefile.inc \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/bin/tests/multisect/bin-align.asm \ - modules/objfmts/bin/tests/multisect/bin-align.errwarn \ - modules/objfmts/bin/tests/multisect/bin-align.hex \ - modules/objfmts/bin/tests/multisect/bin-align.map \ - modules/objfmts/bin/tests/multisect/bin-ssym.asm \ - modules/objfmts/bin/tests/multisect/bin-ssym.hex \ - modules/objfmts/bin/tests/multisect/bin-ssym.map \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.asm \ - modules/objfmts/bin/tests/multisect/initbss.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.hex \ - modules/objfmts/bin/tests/multisect/initbss.map \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.asm \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.hex \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.map \ - modules/objfmts/bin/tests/multisect/multisect1.asm \ - modules/objfmts/bin/tests/multisect/multisect1.hex \ - modules/objfmts/bin/tests/multisect/multisect1.map \ - modules/objfmts/bin/tests/multisect/multisect2.asm \ - modules/objfmts/bin/tests/multisect/multisect2.hex \ - modules/objfmts/bin/tests/multisect/multisect2.map \ - modules/objfmts/bin/tests/multisect/multisect3.asm \ - modules/objfmts/bin/tests/multisect/multisect3.hex \ - modules/objfmts/bin/tests/multisect/multisect3.map \ - modules/objfmts/bin/tests/multisect/multisect4.asm \ - modules/objfmts/bin/tests/multisect/multisect4.hex \ - modules/objfmts/bin/tests/multisect/multisect4.map \ - modules/objfmts/bin/tests/multisect/multisect5.asm \ - modules/objfmts/bin/tests/multisect/multisect5.hex \ - modules/objfmts/bin/tests/multisect/multisect5.map \ - modules/objfmts/bin/tests/multisect/nomultisect1.asm \ - modules/objfmts/bin/tests/multisect/nomultisect1.hex \ - modules/objfmts/bin/tests/multisect/nomultisect1.map \ - modules/objfmts/bin/tests/multisect/nomultisect2.asm \ - modules/objfmts/bin/tests/multisect/nomultisect2.hex \ - modules/objfmts/bin/tests/multisect/nomultisect2.map \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.errwarn \ - modules/objfmts/elf/tests/Makefile.inc \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/curpos.asm \ - modules/objfmts/elf/tests/curpos.hex \ - modules/objfmts/elf/tests/curpos-err.asm \ - modules/objfmts/elf/tests/curpos-err.errwarn \ - modules/objfmts/elf/tests/elf-overdef.asm \ - modules/objfmts/elf/tests/elf-overdef.hex \ - modules/objfmts/elf/tests/elf-x86id.asm \ - modules/objfmts/elf/tests/elf-x86id.hex \ - modules/objfmts/elf/tests/elfabssect.asm \ - modules/objfmts/elf/tests/elfabssect.hex \ - modules/objfmts/elf/tests/elfcond.asm \ - modules/objfmts/elf/tests/elfcond.hex \ - modules/objfmts/elf/tests/elfequabs.asm \ - modules/objfmts/elf/tests/elfequabs.hex \ - modules/objfmts/elf/tests/elfglobal.asm \ - modules/objfmts/elf/tests/elfglobal.hex \ - modules/objfmts/elf/tests/elfglobext.asm \ - modules/objfmts/elf/tests/elfglobext.hex \ - modules/objfmts/elf/tests/elfglobext2.asm \ - modules/objfmts/elf/tests/elfglobext2.hex \ - modules/objfmts/elf/tests/elfmanysym.asm \ - modules/objfmts/elf/tests/elfmanysym.hex \ - modules/objfmts/elf/tests/elfreloc.asm \ - modules/objfmts/elf/tests/elfreloc.hex \ - modules/objfmts/elf/tests/elfreloc-ext.asm \ - modules/objfmts/elf/tests/elfreloc-ext.hex \ - modules/objfmts/elf/tests/elfsectalign.asm \ - modules/objfmts/elf/tests/elfsectalign.hex \ - modules/objfmts/elf/tests/elfso.asm \ - modules/objfmts/elf/tests/elfso.hex \ - modules/objfmts/elf/tests/elftest.c \ - modules/objfmts/elf/tests/elftest.asm \ - modules/objfmts/elf/tests/elftest.hex \ - modules/objfmts/elf/tests/elftimes.asm \ - modules/objfmts/elf/tests/elftimes.hex \ - modules/objfmts/elf/tests/elftypesize.asm \ - modules/objfmts/elf/tests/elftypesize.hex \ - modules/objfmts/elf/tests/elfvisibility.asm \ - modules/objfmts/elf/tests/elfvisibility.errwarn \ - modules/objfmts/elf/tests/elfvisibility.hex \ - modules/objfmts/elf/tests/nasm-sectname.asm \ - modules/objfmts/elf/tests/nasm-sectname.hex \ - modules/objfmts/elf/tests/nasm-forceident.asm \ - modules/objfmts/elf/tests/nasm-forceident.hex \ - modules/objfmts/elf/tests/amd64/Makefile.inc \ - modules/objfmts/elf/tests/gas32/Makefile.inc \ - modules/objfmts/elf/tests/gas64/Makefile.inc \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/amd64/elf-rip.asm \ - modules/objfmts/elf/tests/amd64/elf-rip.hex \ - modules/objfmts/elf/tests/amd64/elfso64.asm \ - modules/objfmts/elf/tests/amd64/elfso64.hex \ - modules/objfmts/elf/tests/amd64/gotpcrel.asm \ - modules/objfmts/elf/tests/amd64/gotpcrel.hex \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.asm \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/elf/tests/gas64/crosssect.asm \ - modules/objfmts/elf/tests/gas64/crosssect.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.hex \ - modules/objfmts/coff/win64-nasm.mac \ - modules/objfmts/coff/win64-gas.mac \ - modules/objfmts/coff/tests/Makefile.inc \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/coff/tests/cofftest.c \ - modules/objfmts/coff/tests/cofftest.asm \ - modules/objfmts/coff/tests/cofftest.hex \ - modules/objfmts/coff/tests/cofftimes.asm \ - modules/objfmts/coff/tests/cofftimes.hex \ - modules/objfmts/coff/tests/x86id.asm \ - modules/objfmts/coff/tests/x86id.hex \ - modules/objfmts/coff/tests/x86id.errwarn \ - modules/objfmts/macho/tests/Makefile.inc \ - modules/objfmts/macho/tests/gas32/Makefile.inc \ - modules/objfmts/macho/tests/gas64/Makefile.inc \ - modules/objfmts/macho/tests/nasm32/Makefile.inc \ - modules/objfmts/macho/tests/nasm64/Makefile.inc \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas32/gas-macho32.asm \ - modules/objfmts/macho/tests/gas32/gas-macho32.hex \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/gas64/gas-macho64.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64.hex \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm32/machotest.c \ - modules/objfmts/macho/tests/nasm32/machotest.asm \ - modules/objfmts/macho/tests/nasm32/machotest.hex \ - modules/objfmts/macho/tests/nasm32/macho-reloc.asm \ - modules/objfmts/macho/tests/nasm32/macho-reloc.hex \ - modules/objfmts/macho/tests/nasm32/macho32-sect.asm \ - modules/objfmts/macho/tests/nasm32/macho32-sect.errwarn \ - modules/objfmts/macho/tests/nasm32/macho32-sect.hex \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.asm \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/macho/tests/nasm64/machotest64.c \ - modules/objfmts/macho/tests/nasm64/machotest64.asm \ - modules/objfmts/macho/tests/nasm64/machotest64.hex \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.asm \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.errwarn \ - modules/objfmts/rdf/tests/Makefile.inc \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/rdf/tests/rdfabs.asm \ - modules/objfmts/rdf/tests/rdfabs.errwarn \ - modules/objfmts/rdf/tests/rdfabs.hex \ - modules/objfmts/rdf/tests/rdfext.asm \ - modules/objfmts/rdf/tests/rdfext.hex \ - modules/objfmts/rdf/tests/rdfseg.asm \ - modules/objfmts/rdf/tests/rdfseg.hex \ - modules/objfmts/rdf/tests/rdfseg2.asm \ - modules/objfmts/rdf/tests/rdfseg2.hex \ - modules/objfmts/rdf/tests/rdftest1.asm \ - modules/objfmts/rdf/tests/rdftest1.hex \ - modules/objfmts/rdf/tests/rdftest2.asm \ - modules/objfmts/rdf/tests/rdftest2.hex \ - modules/objfmts/rdf/tests/rdtlib.asm \ - modules/objfmts/rdf/tests/rdtlib.hex \ - modules/objfmts/rdf/tests/rdtmain.asm \ - modules/objfmts/rdf/tests/rdtmain.hex \ - modules/objfmts/rdf/tests/testlib.asm \ - modules/objfmts/rdf/tests/testlib.hex \ - modules/objfmts/win32/tests/Makefile.inc \ - modules/objfmts/win32/tests/export.asm \ - modules/objfmts/win32/tests/export.hex \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/win32-curpos.asm \ - modules/objfmts/win32/tests/win32-curpos.hex \ - modules/objfmts/win32/tests/win32-overdef.asm \ - modules/objfmts/win32/tests/win32-overdef.hex \ - modules/objfmts/win32/tests/win32-safeseh.asm \ - modules/objfmts/win32/tests/win32-safeseh.hex \ - modules/objfmts/win32/tests/win32-safeseh.masm \ - modules/objfmts/win32/tests/win32-segof.asm \ - modules/objfmts/win32/tests/win32-segof.hex \ - modules/objfmts/win32/tests/win32test.c \ - modules/objfmts/win32/tests/win32test.asm \ - modules/objfmts/win32/tests/win32test.hex \ - modules/objfmts/win32/tests/gas/Makefile.inc \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win32/tests/gas/win32at.asm \ - modules/objfmts/win32/tests/gas/win32at.hex \ - modules/objfmts/win64/tests/Makefile.inc \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/sce1.asm \ - modules/objfmts/win64/tests/sce1.hex \ - modules/objfmts/win64/tests/sce1-err.asm \ - modules/objfmts/win64/tests/sce1-err.errwarn \ - modules/objfmts/win64/tests/sce2.asm \ - modules/objfmts/win64/tests/sce2.hex \ - modules/objfmts/win64/tests/sce2-err.asm \ - modules/objfmts/win64/tests/sce2-err.errwarn \ - modules/objfmts/win64/tests/sce3.asm \ - modules/objfmts/win64/tests/sce3.hex \ - modules/objfmts/win64/tests/sce3.masm \ - modules/objfmts/win64/tests/sce4.asm \ - modules/objfmts/win64/tests/sce4.hex \ - modules/objfmts/win64/tests/sce4.masm \ - modules/objfmts/win64/tests/sce4-err.asm \ - modules/objfmts/win64/tests/sce4-err.errwarn \ - modules/objfmts/win64/tests/win64-abs.asm \ - modules/objfmts/win64/tests/win64-abs.hex \ - modules/objfmts/win64/tests/win64-curpos.asm \ - modules/objfmts/win64/tests/win64-curpos.hex \ - modules/objfmts/win64/tests/win64-dataref.asm \ - modules/objfmts/win64/tests/win64-dataref.hex \ - modules/objfmts/win64/tests/win64-dataref.masm \ - modules/objfmts/win64/tests/win64-dataref2.asm \ - modules/objfmts/win64/tests/win64-dataref2.hex \ - modules/objfmts/win64/tests/win64-dataref2.masm \ - modules/objfmts/win64/tests/gas/Makefile.inc \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/win64/tests/gas/win64-gas-sce.asm \ - modules/objfmts/win64/tests/gas/win64-gas-sce.hex \ - modules/objfmts/xdf/tests/Makefile.inc \ - modules/objfmts/xdf/tests/xdf_test.sh \ - modules/objfmts/xdf/tests/xdf-overdef.asm \ - modules/objfmts/xdf/tests/xdf-overdef.hex \ - modules/objfmts/xdf/tests/xdflong.asm \ - modules/objfmts/xdf/tests/xdflong.hex \ - modules/objfmts/xdf/tests/xdflong.errwarn \ - modules/objfmts/xdf/tests/xdfother.asm \ - modules/objfmts/xdf/tests/xdfother.hex \ - modules/objfmts/xdf/tests/xdfprotect.asm \ - modules/objfmts/xdf/tests/xdfprotect.hex \ - modules/objfmts/xdf/tests/xdfsect.asm \ - modules/objfmts/xdf/tests/xdfsect.hex \ - modules/objfmts/xdf/tests/xdfsect-err.asm \ - modules/objfmts/xdf/tests/xdfsect-err.errwarn \ - modules/objfmts/xdf/tests/xdfvirtual.asm \ - modules/objfmts/xdf/tests/xdfvirtual.hex \ - modules/objfmts/yasm_objfmts.xml libyasm/genmodule.c \ - libyasm/module.in libyasm/tests/Makefile.inc \ - libyasm/tests/libyasm_test.sh libyasm/tests/1shl0.asm \ - libyasm/tests/1shl0.hex libyasm/tests/absloop-err.asm \ - libyasm/tests/absloop-err.errwarn \ - libyasm/tests/charconst64.asm libyasm/tests/charconst64.hex \ - libyasm/tests/data-rawvalue.asm \ - libyasm/tests/data-rawvalue.hex libyasm/tests/duplabel-err.asm \ - libyasm/tests/duplabel-err.errwarn libyasm/tests/emptydata.asm \ - libyasm/tests/emptydata.hex libyasm/tests/equ-expand.asm \ - libyasm/tests/equ-expand.hex libyasm/tests/expr-fold-level.asm \ - libyasm/tests/expr-fold-level.hex \ - libyasm/tests/expr-wide-ident.asm \ - libyasm/tests/expr-wide-ident.hex libyasm/tests/externdef.asm \ - libyasm/tests/externdef.errwarn libyasm/tests/externdef.hex \ - libyasm/tests/incbin.asm libyasm/tests/incbin.hex \ - libyasm/tests/jmpsize1.asm libyasm/tests/jmpsize1.hex \ - libyasm/tests/jmpsize1-err.asm \ - libyasm/tests/jmpsize1-err.errwarn \ - libyasm/tests/opt-align1.asm libyasm/tests/opt-align1.hex \ - libyasm/tests/opt-align2.asm libyasm/tests/opt-align2.hex \ - libyasm/tests/opt-align3.asm libyasm/tests/opt-align3.hex \ - libyasm/tests/opt-circular1-err.asm \ - libyasm/tests/opt-circular1-err.errwarn \ - libyasm/tests/opt-circular2-err.asm \ - libyasm/tests/opt-circular2-err.errwarn \ - libyasm/tests/opt-circular3-err.asm \ - libyasm/tests/opt-circular3-err.errwarn \ - libyasm/tests/opt-gvmat64.asm libyasm/tests/opt-gvmat64.hex \ - libyasm/tests/opt-immexpand.asm \ - libyasm/tests/opt-immexpand.hex \ - libyasm/tests/opt-immnoexpand.asm \ - libyasm/tests/opt-immnoexpand.hex \ - libyasm/tests/opt-oldalign.asm libyasm/tests/opt-oldalign.hex \ - libyasm/tests/opt-struc.asm libyasm/tests/opt-struc.hex \ - libyasm/tests/reserve-err1.asm \ - libyasm/tests/reserve-err1.errwarn \ - libyasm/tests/reserve-err2.asm \ - libyasm/tests/reserve-err2.errwarn libyasm/tests/strucsize.asm \ - libyasm/tests/strucsize.hex libyasm/tests/times0.asm \ - libyasm/tests/times0.hex libyasm/tests/timesover-err.asm \ - libyasm/tests/timesover-err.errwarn \ - libyasm/tests/timesunder.asm libyasm/tests/timesunder.hex \ - libyasm/tests/times-res.asm libyasm/tests/times-res.errwarn \ - libyasm/tests/times-res.hex libyasm/tests/unary.asm \ - libyasm/tests/unary.hex libyasm/tests/value-err.asm \ - libyasm/tests/value-err.errwarn \ - libyasm/tests/value-samesym.asm \ - libyasm/tests/value-samesym.errwarn \ - libyasm/tests/value-samesym.hex libyasm/tests/value-mask.asm \ - libyasm/tests/value-mask.errwarn libyasm/tests/value-mask.hex \ - frontends/yasm/Makefile.inc frontends/tasm/Makefile.inc \ - frontends/yasm/yasm.xml m4/intmax.m4 m4/longdouble.m4 \ - m4/nls.m4 m4/po.m4 m4/printf-posix.m4 m4/signed.m4 \ - m4/size_max.m4 m4/ulonglong.m4 m4/wchar_t.m4 m4/wint_t.m4 \ - m4/xsize.m4 m4/codeset.m4 m4/gettext.m4 m4/glibc21.m4 \ - m4/iconv.m4 m4/intdiv0.m4 m4/inttypes.m4 m4/inttypes_h.m4 \ - m4/inttypes-pri.m4 m4/isc-posix.m4 m4/lcmessage.m4 \ - m4/lib-ld.m4 m4/lib-link.m4 m4/lib-prefix.m4 m4/longlong.m4 \ - m4/progtest.m4 m4/stdint_h.m4 m4/uintmax_t.m4 m4/pythonhead.m4 \ - m4/pyrex.m4 out_test.sh Artistic.txt BSD.txt GNU_GPL-2.0 \ - GNU_LGPL-2.0 splint.sh Mkfiles/Makefile.flat \ - Mkfiles/Makefile.dj Mkfiles/dj/config.h \ - Mkfiles/dj/libyasm-stdint.h \ - Mkfiles/vc9/crt_secure_no_deprecate.vsprops \ - Mkfiles/vc9/yasm.sln Mkfiles/vc9/yasm.vcproj \ - Mkfiles/vc9/ytasm.vcproj Mkfiles/vc9/config.h \ - Mkfiles/vc9/libyasm-stdint.h Mkfiles/vc9/readme.vc9.txt \ - Mkfiles/vc9/yasm.rules Mkfiles/vc9/vc98_swap.py \ - Mkfiles/vc9/genmacro/genmacro.vcproj \ - Mkfiles/vc9/genmacro/run.bat \ - Mkfiles/vc9/genmodule/genmodule.vcproj \ - Mkfiles/vc9/genmodule/run.bat \ - Mkfiles/vc9/genstring/genstring.vcproj \ - Mkfiles/vc9/genstring/run.bat \ - Mkfiles/vc9/genversion/genversion.vcproj \ - Mkfiles/vc9/genversion/run.bat \ - Mkfiles/vc9/libyasm/libyasm.vcproj \ - Mkfiles/vc9/modules/modules.vcproj \ - Mkfiles/vc9/re2c/re2c.vcproj Mkfiles/vc9/re2c/run.bat \ - Mkfiles/vc9/genperf/genperf.vcproj Mkfiles/vc9/genperf/run.bat \ - genstring.c - -# libyasm-stdint.h doesn't clean up after itself? -CONFIG_CLEAN_FILES = libyasm-stdint.h -re2c_SOURCES = -re2c_LDADD = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -re2c_LINK = $(CCLD_FOR_BUILD) -o $@ -genmacro_SOURCES = -genmacro_LDADD = genmacro.$(OBJEXT) -genmacro_LINK = $(CCLD_FOR_BUILD) -o $@ -genperf_SOURCES = -genperf_LDADD = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -genperf_LINK = $(CCLD_FOR_BUILD) -o $@ -PYBINDING_DEPS = tools/python-yasm/bytecode.pxi \ - tools/python-yasm/errwarn.pxi tools/python-yasm/expr.pxi \ - tools/python-yasm/floatnum.pxi tools/python-yasm/intnum.pxi \ - tools/python-yasm/symrec.pxi tools/python-yasm/value.pxi -YASM_MODULES = arch_x86 arch_lc3b listfmt_nasm parser_gas parser_gnu \ - parser_nasm parser_tasm preproc_nasm preproc_tasm preproc_raw \ - preproc_cpp dbgfmt_cv8 dbgfmt_dwarf2 dbgfmt_null dbgfmt_stabs \ - objfmt_dbg objfmt_bin objfmt_dosexe objfmt_elf objfmt_elf32 \ - objfmt_elf64 objfmt_coff objfmt_macho objfmt_macho32 \ - objfmt_macho64 objfmt_rdf objfmt_win32 objfmt_win64 objfmt_x64 \ - objfmt_xdf -lib_LIBRARIES = libyasm.a -libyasm_a_SOURCES = modules/arch/x86/x86arch.c \ - modules/arch/x86/x86arch.h modules/arch/x86/x86bc.c \ - modules/arch/x86/x86expr.c modules/arch/x86/x86id.c \ - modules/arch/lc3b/lc3barch.c modules/arch/lc3b/lc3barch.h \ - modules/arch/lc3b/lc3bbc.c \ - modules/listfmts/nasm/nasm-listfmt.c \ - modules/parsers/gas/gas-parser.c \ - modules/parsers/gas/gas-parser.h \ - modules/parsers/gas/gas-parse.c \ - modules/parsers/nasm/nasm-parser.c \ - modules/parsers/nasm/nasm-parser.h \ - modules/parsers/nasm/nasm-parse.c \ - modules/preprocs/nasm/nasm-preproc.c \ - modules/preprocs/nasm/nasm-pp.h \ - modules/preprocs/nasm/nasm-pp.c modules/preprocs/nasm/nasm.h \ - modules/preprocs/nasm/nasmlib.h \ - modules/preprocs/nasm/nasmlib.c \ - modules/preprocs/nasm/nasm-eval.h \ - modules/preprocs/nasm/nasm-eval.c \ - modules/preprocs/raw/raw-preproc.c \ - modules/preprocs/cpp/cpp-preproc.c \ - modules/dbgfmts/codeview/cv-dbgfmt.h \ - modules/dbgfmts/codeview/cv-dbgfmt.c \ - modules/dbgfmts/codeview/cv-symline.c \ - modules/dbgfmts/codeview/cv-type.c \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.h \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c \ - modules/dbgfmts/dwarf2/dwarf2-line.c \ - modules/dbgfmts/dwarf2/dwarf2-aranges.c \ - modules/dbgfmts/dwarf2/dwarf2-info.c \ - modules/dbgfmts/null/null-dbgfmt.c \ - modules/dbgfmts/stabs/stabs-dbgfmt.c \ - modules/objfmts/dbg/dbg-objfmt.c \ - modules/objfmts/bin/bin-objfmt.c modules/objfmts/elf/elf.c \ - modules/objfmts/elf/elf.h modules/objfmts/elf/elf-objfmt.c \ - modules/objfmts/elf/elf-machine.h \ - modules/objfmts/elf/elf-x86-x86.c \ - modules/objfmts/elf/elf-x86-amd64.c \ - modules/objfmts/coff/coff-objfmt.c \ - modules/objfmts/coff/coff-objfmt.h \ - modules/objfmts/coff/win64-except.c \ - modules/objfmts/macho/macho-objfmt.c \ - modules/objfmts/rdf/rdf-objfmt.c \ - modules/objfmts/xdf/xdf-objfmt.c libyasm/assocdat.c \ - libyasm/bitvect.c libyasm/bc-align.c libyasm/bc-data.c \ - libyasm/bc-incbin.c libyasm/bc-org.c libyasm/bc-reserve.c \ - libyasm/bytecode.c libyasm/errwarn.c libyasm/expr.c \ - libyasm/file.c libyasm/floatnum.c libyasm/hamt.c \ - libyasm/insn.c libyasm/intnum.c libyasm/inttree.c \ - libyasm/linemap.c libyasm/md5.c libyasm/mergesort.c \ - libyasm/phash.c libyasm/section.c libyasm/strcasecmp.c \ - libyasm/strsep.c libyasm/symrec.c libyasm/valparam.c \ - libyasm/value.c libyasm/xmalloc.c libyasm/xstrdup.c -nodist_libyasm_a_SOURCES = x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c module.c -genversion_SOURCES = -genversion_LDADD = genversion.$(OBJEXT) -genversion_LINK = $(CCLD_FOR_BUILD) -o $@ -genmodule_SOURCES = -genmodule_LDADD = genmodule.$(OBJEXT) -genmodule_LINK = $(CCLD_FOR_BUILD) -o $@ -modincludedir = $(includedir)/libyasm -modinclude_HEADERS = libyasm/arch.h libyasm/assocdat.h \ - libyasm/bitvect.h libyasm/bytecode.h libyasm/compat-queue.h \ - libyasm/coretype.h libyasm/dbgfmt.h libyasm/errwarn.h \ - libyasm/expr.h libyasm/file.h libyasm/floatnum.h \ - libyasm/hamt.h libyasm/insn.h libyasm/intnum.h \ - libyasm/inttree.h libyasm/linemap.h libyasm/listfmt.h \ - libyasm/md5.h libyasm/module.h libyasm/objfmt.h \ - libyasm/parser.h libyasm/phash.h libyasm/preproc.h \ - libyasm/section.h libyasm/symrec.h libyasm/valparam.h \ - libyasm/value.h -bitvect_test_SOURCES = libyasm/tests/bitvect_test.c -bitvect_test_LDADD = libyasm.a $(INTLLIBS) -floatnum_test_SOURCES = libyasm/tests/floatnum_test.c -floatnum_test_LDADD = libyasm.a $(INTLLIBS) -leb128_test_SOURCES = libyasm/tests/leb128_test.c -leb128_test_LDADD = libyasm.a $(INTLLIBS) -splitpath_test_SOURCES = libyasm/tests/splitpath_test.c -splitpath_test_LDADD = libyasm.a $(INTLLIBS) -combpath_test_SOURCES = libyasm/tests/combpath_test.c -combpath_test_LDADD = libyasm.a $(INTLLIBS) -uncstring_test_SOURCES = libyasm/tests/uncstring_test.c -uncstring_test_LDADD = libyasm.a $(INTLLIBS) -yasm_SOURCES = frontends/yasm/yasm.c frontends/yasm/yasm-options.c \ - frontends/yasm/yasm-options.h -yasm_LDADD = libyasm.a $(INTLLIBS) -ytasm_SOURCES = frontends/tasm/tasm.c frontends/tasm/tasm-options.c \ - frontends/tasm/tasm-options.h -ytasm_LDADD = libyasm.a $(INTLLIBS) -ACLOCAL_AMFLAGS = -I m4 - -# genstring build -genstring_SOURCES = -genstring_LDADD = genstring.$(OBJEXT) -genstring_LINK = $(CCLD_FOR_BUILD) -o $@ -all: $(BUILT_SOURCES) config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .gperf .c .o .obj -am--refresh: - @: -$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/tools/Makefile.inc $(srcdir)/tools/re2c/Makefile.inc $(srcdir)/tools/genmacro/Makefile.inc $(srcdir)/tools/genperf/Makefile.inc $(srcdir)/tools/python-yasm/Makefile.inc $(srcdir)/tools/python-yasm/tests/Makefile.inc $(srcdir)/modules/Makefile.inc $(srcdir)/modules/arch/Makefile.inc $(srcdir)/modules/arch/x86/Makefile.inc $(srcdir)/modules/arch/x86/tests/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc $(srcdir)/modules/arch/lc3b/Makefile.inc $(srcdir)/modules/arch/lc3b/tests/Makefile.inc $(srcdir)/modules/listfmts/Makefile.inc $(srcdir)/modules/listfmts/nasm/Makefile.inc $(srcdir)/modules/parsers/Makefile.inc $(srcdir)/modules/parsers/gas/Makefile.inc $(srcdir)/modules/parsers/gas/tests/Makefile.inc $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc $(srcdir)/modules/parsers/nasm/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc $(srcdir)/modules/parsers/tasm/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc $(srcdir)/modules/preprocs/Makefile.inc $(srcdir)/modules/preprocs/nasm/Makefile.inc $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc $(srcdir)/modules/preprocs/raw/Makefile.inc $(srcdir)/modules/preprocs/raw/tests/Makefile.inc $(srcdir)/modules/preprocs/cpp/Makefile.inc $(srcdir)/modules/dbgfmts/Makefile.inc $(srcdir)/modules/dbgfmts/codeview/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc $(srcdir)/modules/dbgfmts/null/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc $(srcdir)/modules/objfmts/Makefile.inc $(srcdir)/modules/objfmts/dbg/Makefile.inc $(srcdir)/modules/objfmts/bin/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc $(srcdir)/modules/objfmts/elf/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/coff/Makefile.inc $(srcdir)/modules/objfmts/coff/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc $(srcdir)/modules/objfmts/rdf/Makefile.inc $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/win64/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/xdf/Makefile.inc $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc $(srcdir)/libyasm/Makefile.inc $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/frontends/Makefile.inc $(srcdir)/frontends/yasm/Makefile.inc $(srcdir)/frontends/tasm/Makefile.inc $(srcdir)/m4/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ - cd $(srcdir) && $(AUTOMAKE) --gnu \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: $(am__configure_deps) - cd $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): $(am__aclocal_m4_deps) - cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -install-libLIBRARIES: $(lib_LIBRARIES) - @$(NORMAL_INSTALL) - test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - f=$(am__strip_dir) \ - echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ - else :; fi; \ - done - @$(POST_INSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - p=$(am__strip_dir) \ - echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \ - $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \ - else :; fi; \ - done - -uninstall-libLIBRARIES: - @$(NORMAL_UNINSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - p=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - rm -f "$(DESTDIR)$(libdir)/$$p"; \ - done - -clean-libLIBRARIES: - -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES) -libyasm.a: $(libyasm_a_OBJECTS) $(libyasm_a_DEPENDENCIES) - -rm -f libyasm.a - $(libyasm_a_AR) libyasm.a $(libyasm_a_OBJECTS) $(libyasm_a_LIBADD) - $(RANLIB) libyasm.a -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) - -clean-checkPROGRAMS: - -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS) - -clean-noinstPROGRAMS: - -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) -bitvect_test$(EXEEXT): $(bitvect_test_OBJECTS) $(bitvect_test_DEPENDENCIES) - @rm -f bitvect_test$(EXEEXT) - $(LINK) $(bitvect_test_OBJECTS) $(bitvect_test_LDADD) $(LIBS) -combpath_test$(EXEEXT): $(combpath_test_OBJECTS) $(combpath_test_DEPENDENCIES) - @rm -f combpath_test$(EXEEXT) - $(LINK) $(combpath_test_OBJECTS) $(combpath_test_LDADD) $(LIBS) -floatnum_test$(EXEEXT): $(floatnum_test_OBJECTS) $(floatnum_test_DEPENDENCIES) - @rm -f floatnum_test$(EXEEXT) - $(LINK) $(floatnum_test_OBJECTS) $(floatnum_test_LDADD) $(LIBS) -genmacro$(EXEEXT): $(genmacro_OBJECTS) $(genmacro_DEPENDENCIES) - @rm -f genmacro$(EXEEXT) - $(genmacro_LINK) $(genmacro_OBJECTS) $(genmacro_LDADD) $(LIBS) -genmodule$(EXEEXT): $(genmodule_OBJECTS) $(genmodule_DEPENDENCIES) - @rm -f genmodule$(EXEEXT) - $(genmodule_LINK) $(genmodule_OBJECTS) $(genmodule_LDADD) $(LIBS) -genperf$(EXEEXT): $(genperf_OBJECTS) $(genperf_DEPENDENCIES) - @rm -f genperf$(EXEEXT) - $(genperf_LINK) $(genperf_OBJECTS) $(genperf_LDADD) $(LIBS) -genstring$(EXEEXT): $(genstring_OBJECTS) $(genstring_DEPENDENCIES) - @rm -f genstring$(EXEEXT) - $(genstring_LINK) $(genstring_OBJECTS) $(genstring_LDADD) $(LIBS) -genversion$(EXEEXT): $(genversion_OBJECTS) $(genversion_DEPENDENCIES) - @rm -f genversion$(EXEEXT) - $(genversion_LINK) $(genversion_OBJECTS) $(genversion_LDADD) $(LIBS) -leb128_test$(EXEEXT): $(leb128_test_OBJECTS) $(leb128_test_DEPENDENCIES) - @rm -f leb128_test$(EXEEXT) - $(LINK) $(leb128_test_OBJECTS) $(leb128_test_LDADD) $(LIBS) -re2c$(EXEEXT): $(re2c_OBJECTS) $(re2c_DEPENDENCIES) - @rm -f re2c$(EXEEXT) - $(re2c_LINK) $(re2c_OBJECTS) $(re2c_LDADD) $(LIBS) -splitpath_test$(EXEEXT): $(splitpath_test_OBJECTS) $(splitpath_test_DEPENDENCIES) - @rm -f splitpath_test$(EXEEXT) - $(LINK) $(splitpath_test_OBJECTS) $(splitpath_test_LDADD) $(LIBS) -test_hd$(EXEEXT): $(test_hd_OBJECTS) $(test_hd_DEPENDENCIES) - @rm -f test_hd$(EXEEXT) - $(LINK) $(test_hd_OBJECTS) $(test_hd_LDADD) $(LIBS) -uncstring_test$(EXEEXT): $(uncstring_test_OBJECTS) $(uncstring_test_DEPENDENCIES) - @rm -f uncstring_test$(EXEEXT) - $(LINK) $(uncstring_test_OBJECTS) $(uncstring_test_LDADD) $(LIBS) -yasm$(EXEEXT): $(yasm_OBJECTS) $(yasm_DEPENDENCIES) - @rm -f yasm$(EXEEXT) - $(LINK) $(yasm_OBJECTS) $(yasm_LDADD) $(LIBS) -ytasm$(EXEEXT): $(ytasm_OBJECTS) $(ytasm_DEPENDENCIES) - @rm -f ytasm$(EXEEXT) - $(LINK) $(ytasm_OBJECTS) $(ytasm_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/assocdat.Po -include ./$(DEPDIR)/bc-align.Po -include ./$(DEPDIR)/bc-data.Po -include ./$(DEPDIR)/bc-incbin.Po -include ./$(DEPDIR)/bc-org.Po -include ./$(DEPDIR)/bc-reserve.Po -include ./$(DEPDIR)/bin-objfmt.Po -include ./$(DEPDIR)/bitvect.Po -include ./$(DEPDIR)/bitvect_test.Po -include ./$(DEPDIR)/bytecode.Po -include ./$(DEPDIR)/coff-objfmt.Po -include ./$(DEPDIR)/combpath_test.Po -include ./$(DEPDIR)/cpp-preproc.Po -include ./$(DEPDIR)/cv-dbgfmt.Po -include ./$(DEPDIR)/cv-symline.Po -include ./$(DEPDIR)/cv-type.Po -include ./$(DEPDIR)/dbg-objfmt.Po -include ./$(DEPDIR)/dwarf2-aranges.Po -include ./$(DEPDIR)/dwarf2-dbgfmt.Po -include ./$(DEPDIR)/dwarf2-info.Po -include ./$(DEPDIR)/dwarf2-line.Po -include ./$(DEPDIR)/elf-objfmt.Po -include ./$(DEPDIR)/elf-x86-amd64.Po -include ./$(DEPDIR)/elf-x86-x86.Po -include ./$(DEPDIR)/elf.Po -include ./$(DEPDIR)/errwarn.Po -include ./$(DEPDIR)/expr.Po -include ./$(DEPDIR)/file.Po -include ./$(DEPDIR)/floatnum.Po -include ./$(DEPDIR)/floatnum_test.Po -include ./$(DEPDIR)/gas-parse.Po -include ./$(DEPDIR)/gas-parser.Po -include ./$(DEPDIR)/gas-token.Po -include ./$(DEPDIR)/hamt.Po -include ./$(DEPDIR)/insn.Po -include ./$(DEPDIR)/intnum.Po -include ./$(DEPDIR)/inttree.Po -include ./$(DEPDIR)/lc3barch.Po -include ./$(DEPDIR)/lc3bbc.Po -include ./$(DEPDIR)/lc3bid.Po -include ./$(DEPDIR)/leb128_test.Po -include ./$(DEPDIR)/linemap.Po -include ./$(DEPDIR)/macho-objfmt.Po -include ./$(DEPDIR)/md5.Po -include ./$(DEPDIR)/mergesort.Po -include ./$(DEPDIR)/module.Po -include ./$(DEPDIR)/nasm-eval.Po -include ./$(DEPDIR)/nasm-listfmt.Po -include ./$(DEPDIR)/nasm-parse.Po -include ./$(DEPDIR)/nasm-parser.Po -include ./$(DEPDIR)/nasm-pp.Po -include ./$(DEPDIR)/nasm-preproc.Po -include ./$(DEPDIR)/nasm-token.Po -include ./$(DEPDIR)/nasmlib.Po -include ./$(DEPDIR)/null-dbgfmt.Po -include ./$(DEPDIR)/phash.Po -include ./$(DEPDIR)/raw-preproc.Po -include ./$(DEPDIR)/rdf-objfmt.Po -include ./$(DEPDIR)/section.Po -include ./$(DEPDIR)/splitpath_test.Po -include ./$(DEPDIR)/stabs-dbgfmt.Po -include ./$(DEPDIR)/strcasecmp.Po -include ./$(DEPDIR)/strsep.Po -include ./$(DEPDIR)/symrec.Po -include ./$(DEPDIR)/tasm-options.Po -include ./$(DEPDIR)/tasm.Po -include ./$(DEPDIR)/test_hd.Po -include ./$(DEPDIR)/uncstring_test.Po -include ./$(DEPDIR)/valparam.Po -include ./$(DEPDIR)/value.Po -include ./$(DEPDIR)/win64-except.Po -include ./$(DEPDIR)/x86arch.Po -include ./$(DEPDIR)/x86bc.Po -include ./$(DEPDIR)/x86cpu.Po -include ./$(DEPDIR)/x86expr.Po -include ./$(DEPDIR)/x86id.Po -include ./$(DEPDIR)/x86regtmod.Po -include ./$(DEPDIR)/xdf-objfmt.Po -include ./$(DEPDIR)/xmalloc.Po -include ./$(DEPDIR)/xstrdup.Po -include ./$(DEPDIR)/yasm-options.Po -include ./$(DEPDIR)/yasm.Po - -.c.o: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -x86arch.o: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.o -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - -x86arch.obj: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.obj -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - -x86bc.o: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.o -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - -x86bc.obj: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.obj -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - -x86expr.o: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.o -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - -x86expr.obj: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.obj -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - -x86id.o: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.o -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - -x86id.obj: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.obj -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - -lc3barch.o: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.o -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - -lc3barch.obj: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.obj -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - -lc3bbc.o: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.o -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - -lc3bbc.obj: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.obj -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - -nasm-listfmt.o: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.o -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - -nasm-listfmt.obj: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.obj -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - -gas-parser.o: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.o -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - -gas-parser.obj: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.obj -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - -gas-parse.o: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.o -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - -gas-parse.obj: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.obj -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - -nasm-parser.o: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.o -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - -nasm-parser.obj: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.obj -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - -nasm-parse.o: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.o -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - -nasm-parse.obj: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.obj -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - -nasm-preproc.o: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.o -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - -nasm-preproc.obj: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.obj -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - -nasm-pp.o: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.o -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - -nasm-pp.obj: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.obj -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - -nasmlib.o: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.o -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - -nasmlib.obj: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.obj -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - -nasm-eval.o: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.o -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - -nasm-eval.obj: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.obj -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - -raw-preproc.o: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.o -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - -raw-preproc.obj: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.obj -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - -cpp-preproc.o: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.o -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - -cpp-preproc.obj: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.obj -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - -cv-dbgfmt.o: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.o -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - -cv-dbgfmt.obj: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.obj -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - -cv-symline.o: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.o -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - -cv-symline.obj: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.obj -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - -cv-type.o: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.o -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - -cv-type.obj: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.obj -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - -dwarf2-dbgfmt.o: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.o -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - -dwarf2-dbgfmt.obj: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.obj -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - -dwarf2-line.o: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.o -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - -dwarf2-line.obj: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.obj -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - -dwarf2-aranges.o: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.o -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - -dwarf2-aranges.obj: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.obj -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - -dwarf2-info.o: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.o -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - -dwarf2-info.obj: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.obj -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - -null-dbgfmt.o: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.o -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - -null-dbgfmt.obj: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.obj -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - -stabs-dbgfmt.o: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.o -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - -stabs-dbgfmt.obj: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.obj -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - -dbg-objfmt.o: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.o -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - -dbg-objfmt.obj: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.obj -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - -bin-objfmt.o: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.o -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - -bin-objfmt.obj: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.obj -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - -elf.o: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.o -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - -elf.obj: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.obj -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - -elf-objfmt.o: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.o -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - -elf-objfmt.obj: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.obj -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - -elf-x86-x86.o: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.o -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - -elf-x86-x86.obj: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.obj -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - -elf-x86-amd64.o: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.o -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - -elf-x86-amd64.obj: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.obj -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - -coff-objfmt.o: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.o -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - -coff-objfmt.obj: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.obj -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - -win64-except.o: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.o -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - -win64-except.obj: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.obj -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - -macho-objfmt.o: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.o -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - -macho-objfmt.obj: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.obj -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - -rdf-objfmt.o: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.o -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - -rdf-objfmt.obj: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.obj -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - -xdf-objfmt.o: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.o -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - -xdf-objfmt.obj: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.obj -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - -assocdat.o: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.o -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - -assocdat.obj: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.obj -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - -bitvect.o: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.o -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - -bitvect.obj: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.obj -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - -bc-align.o: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.o -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - -bc-align.obj: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.obj -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - -bc-data.o: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.o -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - -bc-data.obj: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.obj -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - -bc-incbin.o: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.o -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - -bc-incbin.obj: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.obj -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - -bc-org.o: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.o -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - -bc-org.obj: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.obj -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - -bc-reserve.o: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.o -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - -bc-reserve.obj: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.obj -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - -bytecode.o: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.o -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - -bytecode.obj: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.obj -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - -errwarn.o: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.o -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - -errwarn.obj: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.obj -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - -expr.o: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.o -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - -expr.obj: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.obj -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - -file.o: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.o -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - -file.obj: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.obj -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - -floatnum.o: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.o -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - -floatnum.obj: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.obj -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - -hamt.o: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.o -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - -hamt.obj: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.obj -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - -insn.o: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.o -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - -insn.obj: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.obj -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - -intnum.o: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.o -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - -intnum.obj: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.obj -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - -inttree.o: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.o -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - -inttree.obj: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.obj -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - -linemap.o: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.o -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - -linemap.obj: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.obj -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - -md5.o: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.o -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - -md5.obj: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.obj -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - -mergesort.o: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.o -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - -mergesort.obj: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.obj -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - -phash.o: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.o -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - -phash.obj: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.obj -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - -section.o: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.o -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - -section.obj: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.obj -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - -strcasecmp.o: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.o -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - -strcasecmp.obj: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.obj -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - -strsep.o: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.o -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - -strsep.obj: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.obj -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - -symrec.o: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.o -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - -symrec.obj: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.obj -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - -valparam.o: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.o -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - -valparam.obj: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.obj -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - -value.o: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.o -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - -value.obj: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.obj -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - -xmalloc.o: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.o -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - -xmalloc.obj: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.obj -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - -xstrdup.o: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.o -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - -xstrdup.obj: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.obj -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - -bitvect_test.o: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.o -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - -bitvect_test.obj: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.obj -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - -combpath_test.o: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.o -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - -combpath_test.obj: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.obj -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - -floatnum_test.o: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.o -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - -floatnum_test.obj: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.obj -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - -leb128_test.o: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.o -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - -leb128_test.obj: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.obj -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - -splitpath_test.o: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.o -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - -splitpath_test.obj: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.obj -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - -uncstring_test.o: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.o -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - -uncstring_test.obj: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.obj -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - -yasm.o: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.o -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - -yasm.obj: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.obj -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - -yasm-options.o: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.o -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - -yasm-options.obj: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.obj -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - -tasm.o: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.o -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - -tasm.obj: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.obj -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - -tasm-options.o: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.o -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - -tasm-options.obj: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.obj -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` -install-man1: $(man1_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \ - done -uninstall-man1: - @$(NORMAL_UNINSTALL) - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man1dir)/$$inst"; \ - done -install-man7: $(man7_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \ - done -uninstall-man7: - @$(NORMAL_UNINSTALL) - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man7dir)/$$inst"; \ - done -install-includeHEADERS: $(include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done -install-modincludeHEADERS: $(modinclude_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(modincludedir)" || $(MKDIR_P) "$(DESTDIR)$(modincludedir)" - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(modincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(modincludedir)/$$f'"; \ - $(modincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(modincludedir)/$$f"; \ - done - -uninstall-modincludeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(modincludedir)/$$f'"; \ - rm -f "$(DESTDIR)$(modincludedir)/$$f"; \ - done -install-nodist_includeHEADERS: $(nodist_include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(nodist_includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(nodist_includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-nodist_includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -$(RECURSIVE_CLEAN_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; ws='[ ]'; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - echo "XPASS: $$tst"; \ - ;; \ - *) \ - echo "PASS: $$tst"; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xfail=`expr $$xfail + 1`; \ - echo "XFAIL: $$tst"; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - echo "FAIL: $$tst"; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - echo "SKIP: $$tst"; \ - fi; \ - done; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="All $$all tests passed"; \ - else \ - banner="All $$all tests behaved as expected ($$xfail expected failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all tests failed"; \ - else \ - banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - skipped="($$skip tests were not run)"; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - echo "$$dashes"; \ - echo "$$banner"; \ - test -z "$$skipped" || echo "$$skipped"; \ - test -z "$$report" || echo "$$report"; \ - echo "$$dashes"; \ - test "$$failed" -eq 0; \ - else :; fi - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d $(distdir) || mkdir $(distdir) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r $(distdir) -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 - $(am__remove_distdir) - -dist-lzma: distdir - tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma - $(am__remove_distdir) - -dist-tarZ: distdir - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__remove_distdir) - -dist-shar: distdir - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__remove_distdir) - -dist dist-all: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lzma*) \ - unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir); chmod a+w $(distdir) - mkdir $(distdir)/_build - mkdir $(distdir)/_inst - chmod a-w $(distdir) - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && cd $(distdir)/_build \ - && ../configure --srcdir=.. --prefix="$$dc_install_base" \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck - $(am__remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @cd $(distuninstallcheck_dir) \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) check-recursive -all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(MANS) $(HEADERS) config.h \ - all-local -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" "$(DESTDIR)$(includedir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-local distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-includeHEADERS install-man \ - install-modincludeHEADERS install-nodist_includeHEADERS - -install-dvi: install-dvi-recursive - -install-exec-am: install-binPROGRAMS install-libLIBRARIES - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) install-exec-hook - -install-html: install-html-recursive - -install-info: install-info-recursive - -install-man: install-man1 install-man7 - -install-pdf: install-pdf-recursive - -install-ps: install-ps-recursive - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-includeHEADERS \ - uninstall-libLIBRARIES uninstall-man \ - uninstall-modincludeHEADERS uninstall-nodist_includeHEADERS - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) uninstall-hook - -uninstall-man: uninstall-man1 uninstall-man7 - -.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ - install-exec-am install-strip uninstall-am - -.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ - all all-am all-local am--refresh check check-TESTS check-am \ - clean clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS ctags ctags-recursive \ - dist dist-all dist-bzip2 dist-gzip dist-lzma dist-shar \ - dist-tarZ dist-zip distcheck distclean distclean-compile \ - distclean-generic distclean-hdr distclean-local distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-exec-hook install-html \ - install-html-am install-includeHEADERS install-info \ - install-info-am install-libLIBRARIES install-man install-man1 \ - install-man7 install-modincludeHEADERS \ - install-nodist_includeHEADERS install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-binPROGRAMS uninstall-hook \ - uninstall-includeHEADERS uninstall-libLIBRARIES uninstall-man \ - uninstall-man1 uninstall-man7 uninstall-modincludeHEADERS \ - uninstall-nodist_includeHEADERS - - -re2c-main.$(OBJEXT): tools/re2c/main.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/main.c || echo '$(srcdir)/'`tools/re2c/main.c - -re2c-code.$(OBJEXT): tools/re2c/code.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/code.c || echo '$(srcdir)/'`tools/re2c/code.c - -re2c-dfa.$(OBJEXT): tools/re2c/dfa.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/dfa.c || echo '$(srcdir)/'`tools/re2c/dfa.c - -re2c-parser.$(OBJEXT): tools/re2c/parser.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/parser.c || echo '$(srcdir)/'`tools/re2c/parser.c - -re2c-actions.$(OBJEXT): tools/re2c/actions.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/actions.c || echo '$(srcdir)/'`tools/re2c/actions.c - -re2c-scanner.$(OBJEXT): tools/re2c/scanner.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/scanner.c || echo '$(srcdir)/'`tools/re2c/scanner.c - -re2c-mbo_getopt.$(OBJEXT): tools/re2c/mbo_getopt.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/mbo_getopt.c || echo '$(srcdir)/'`tools/re2c/mbo_getopt.c - -re2c-substr.$(OBJEXT): tools/re2c/substr.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/substr.c || echo '$(srcdir)/'`tools/re2c/substr.c - -re2c-translate.$(OBJEXT): tools/re2c/translate.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/translate.c || echo '$(srcdir)/'`tools/re2c/translate.c - -genmacro.$(OBJEXT): tools/genmacro/genmacro.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genmacro/genmacro.c || echo '$(srcdir)/'`tools/genmacro/genmacro.c -.gperf.c: genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $< $@ - -genperf.$(OBJEXT): tools/genperf/genperf.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/genperf.c || echo '$(srcdir)/'`tools/genperf/genperf.c - -gp-perfect.$(OBJEXT): tools/genperf/perfect.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/perfect.c || echo '$(srcdir)/'`tools/genperf/perfect.c - -gp-phash.$(OBJEXT): libyasm/phash.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/phash.c || echo '$(srcdir)/'`libyasm/phash.c - -gp-xmalloc.$(OBJEXT): libyasm/xmalloc.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xmalloc.c || echo '$(srcdir)/'`libyasm/xmalloc.c - -gp-xstrdup.$(OBJEXT): libyasm/xstrdup.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xstrdup.c || echo '$(srcdir)/'`libyasm/xstrdup.c - -# Use Pyxelator to generate Pyrex function headers. -#_yasm.pxi: ${HEADERS} -# @rm -rf .tmp -# @mkdir .tmp -# $(PYTHON) $(srcdir)/tools/python-yasm/pyxelator/wrap_yasm.py \ -# "YASM_DIR=${srcdir}" "CPP=${CPP}" "CPPFLAGS=${CPPFLAGS}" -# @rm -rf .tmp - -# Need to build a local copy of the main Pyrex input file to include _yasm.pxi -# from the build directory. Also need to fixup the other .pxi include paths. -#yasm.pyx: $(srcdir)/tools/python-yasm/yasm.pyx -# sed -e 's,^include "\([^_]\),include "${srcdir}/tools/python-yasm/\1,' \ -# $(srcdir)/tools/python-yasm/yasm.pyx > $@ - -# Actually run Pyrex -#yasm_python.c: yasm.pyx _yasm.pxi $(PYBINDING_DEPS) -# $(PYTHON) -c "from Pyrex.Compiler.Main import main; main(command_line=1)" \ -# -o $@ yasm.pyx - -# Now the Python build magic... -#python-setup.txt: Makefile -# echo "includes=${DEFS} ${DEFAULT_INCLUDES} ${INCLUDES} ${AM_CPPFLAGS} ${CPPFLAGS}" > python-setup.txt -# echo "sources=${libyasm_a_SOURCES}" >> python-setup.txt -# echo "srcdir=${srcdir}" >> python-setup.txt -# echo "gcc=${GCC}" >> python-setup.txt - -#.python-build: python-setup.txt yasm_python.c ${libyasm_a_SOURCES} -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py build -# touch .python-build -#python-build: .python-build - -#python-install: .python-build -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py install "--install-lib=$(DESTDIR)$(pythondir)" - -#python-uninstall: -# rm -f `$(PYTHON) -c "import sys;sys.path.insert(0, '${DESTDIR}${pythondir}'); import yasm; print yasm.__file__"` - -python-build: -python-install: -python-uninstall: - -modules/arch/x86/x86id.c: x86insn_nasm.c x86insn_gas.c x86insns.c - -x86insn_nasm.gperf x86insn_gas.gperf x86insns.c: $(srcdir)/modules/arch/x86/gen_x86_insn.py - $(PYTHON) $(srcdir)/modules/arch/x86/gen_x86_insn.py -#x86insn_nasm.gperf: $(srcdir)/x86insn_nasm.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_nasm.gperf $@ -#x86insn_gas.gperf: $(srcdir)/x86insn_gas.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_gas.gperf $@ - -# Use suffix rules for gperf files -x86insn_nasm.c: x86insn_nasm.gperf genperf$(EXEEXT) -x86insn_gas.c: x86insn_gas.gperf genperf$(EXEEXT) -x86cpu.c: $(srcdir)/modules/arch/x86/x86cpu.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86cpu.gperf $@ -x86regtmod.c: $(srcdir)/modules/arch/x86/x86regtmod.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86regtmod.gperf $@ - -lc3bid.c: $(srcdir)/modules/arch/lc3b/lc3bid.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -s -o $@ $(srcdir)/modules/arch/lc3b/lc3bid.re - -yasm_arch.7: modules/arch/yasm_arch.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/arch/yasm_arch.xml - -#EXTRA_DIST += modules/listfmts/nasm/tests/Makefile.inc - -#include modules/listfmts/nasm/tests/Makefile.inc - -gas-token.c: $(srcdir)/modules/parsers/gas/gas-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/gas/gas-token.re - -nasm-token.c: $(srcdir)/modules/parsers/nasm/nasm-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/nasm/nasm-token.re - -$(top_srcdir)/modules/parsers/nasm/nasm-parser.c: nasm-macros.c - -nasm-macros.c: $(srcdir)/modules/parsers/nasm/nasm-std.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_standard_mac $(srcdir)/modules/parsers/nasm/nasm-std.mac - -yasm_parsers.7: modules/parsers/yasm_parsers.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/parsers/yasm_parsers.xml - -$(top_srcdir)/modules/preprocs/nasm/nasm-preproc.c: nasm-version.c - -nasm-version.c: version.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_version_mac version.mac - -version.mac: genversion$(EXEEXT) - $(top_builddir)/genversion$(EXEEXT) $@ - -genversion.$(OBJEXT): modules/preprocs/nasm/genversion.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f modules/preprocs/nasm/genversion.c || echo '$(srcdir)/'`modules/preprocs/nasm/genversion.c - -#EXTRA_DIST += modules/dbgfmts/codeview/tests/Makefile.inc -#include modules/dbgfmts/codeview/tests/Makefile.inc - -yasm_dbgfmts.7: modules/dbgfmts/yasm_dbgfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/dbgfmts/yasm_dbgfmts.xml - -$(top_srcdir)/modules/objfmts/coff/coff-objfmt.c: win64-nasm.c win64-gas.c - -win64-nasm.c: $(srcdir)/modules/objfmts/coff/win64-nasm.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_nasm_stdmac $(srcdir)/modules/objfmts/coff/win64-nasm.mac - -win64-gas.c: $(srcdir)/modules/objfmts/coff/win64-gas.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_gas_stdmac $(srcdir)/modules/objfmts/coff/win64-gas.mac - -yasm_objfmts.7: modules/objfmts/yasm_objfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/objfmts/yasm_objfmts.xml - -module.c: $(top_srcdir)/libyasm/module.in genmodule$(EXEEXT) Makefile - $(top_builddir)/genmodule$(EXEEXT) $(top_srcdir)/libyasm/module.in Makefile - -genmodule.$(OBJEXT): libyasm/genmodule.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/genmodule.c || echo '$(srcdir)/'`libyasm/genmodule.c - -yasm.1: frontends/yasm/yasm.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/frontends/yasm/yasm.xml - -$(srcdir)/frontends/yasm/yasm.c: license.c - -license.c: $(srcdir)/COPYING genstring$(EXEEXT) - $(top_builddir)/genstring$(EXEEXT) license_msg $@ $(srcdir)/COPYING - -distclean-local: - -rm -rf results - -rm -rf build - -all-local: python-build -install-exec-hook: python-install -uninstall-hook: python-uninstall - -genstring.$(OBJEXT): genstring.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f genstring.c || echo '$(srcdir)/'`genstring.c -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/packager/third_party/yasm/source/config/openbsd/config.h b/packager/third_party/yasm/source/config/openbsd/config.h deleted file mode 100644 index 4f742881c8..0000000000 --- a/packager/third_party/yasm/source/config/openbsd/config.h +++ /dev/null @@ -1,165 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Command name to run C preprocessor */ -#define CPP_PROG "cc -E" - -/* */ -/* #undef ENABLE_NLS */ - -/* Define to 1 if you have the `abort' function. */ -#define HAVE_ABORT 1 - -/* */ -/* #undef HAVE_CATGETS */ - -/* Define if the GNU dcgettext() function is already present or preinstalled. - */ -/* #undef HAVE_DCGETTEXT */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DIRECT_H */ - -/* Define to 1 if you have the `ftruncate' function. */ -#define HAVE_FTRUNCATE 1 - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* */ -/* #undef HAVE_GETTEXT */ - -/* Define to 1 if you have the GNU C Library */ -/* #undef HAVE_GNU_C_LIBRARY */ - -/* Define if you have the iconv() function and it works. */ -/* #undef HAVE_ICONV */ - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* */ -/* #undef HAVE_LC_MESSAGES */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mergesort' function. */ -#define HAVE_MERGESORT 1 - -/* Define to 1 if you have the `popen' function. */ -#define HAVE_POPEN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* */ -/* #undef HAVE_STPCPY */ - -/* Define to 1 if you have the `strcasecmp' function. */ -#define HAVE_STRCASECMP 1 - -/* Define to 1 if you have the `strcmpi' function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the `stricmp' function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the `strsep' function. */ -#define HAVE_STRSEP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the `toascii' function. */ -#define HAVE_TOASCII 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to 1 if you have the `_stricmp' function. */ -/* #undef HAVE__STRICMP */ - -/* Name of package */ -#define PACKAGE "yasm" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "bug-yasm@tortall.net" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "yasm" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "yasm 1.2.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "yasm" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2.0" - -/* Define to 1 if the C compiler supports function prototypes. */ -#define PROTOTYPES 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void*', as computed by sizeof. */ -/* #undef SIZEOF_VOIDP */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.2.0" - -/* Define if using the dmalloc debugging malloc package */ -/* #undef WITH_DMALLOC */ - -/* Define like PROTOTYPES; this can be used by system headers. */ -#define __PROTOTYPES 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/packager/third_party/yasm/source/config/openbsd/libyasm-stdint.h b/packager/third_party/yasm/source/config/openbsd/libyasm-stdint.h deleted file mode 100644 index b875214c89..0000000000 --- a/packager/third_party/yasm/source/config/openbsd/libyasm-stdint.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _YASM_LIBYASM_STDINT_H -#define _YASM_LIBYASM_STDINT_H 1 -#ifndef _GENERATED_STDINT_H -#define _GENERATED_STDINT_H "yasm 1.1.0" -/* generated using gcc -std=gnu99 */ -#define _STDINT_HAVE_STDINT_H 1 -#include -#endif -#endif diff --git a/packager/third_party/yasm/source/config/win/Makefile b/packager/third_party/yasm/source/config/win/Makefile deleted file mode 100644 index 6fccce46a9..0000000000 --- a/packager/third_party/yasm/source/config/win/Makefile +++ /dev/null @@ -1,3822 +0,0 @@ -# Makefile.in generated by automake 1.10.1 from Makefile.am. -# Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - -# $Id: Makefile.am 2184 2009-03-24 05:04:15Z peter $ - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# $Id: Makefile.inc 1718 2006-12-24 00:13:19Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 2084 2008-05-09 07:08:17Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1939 2007-09-10 07:15:50Z peter $ - -# These utility programs have to be built for BUILD host in cross-build. -# This makes things rather non-standard automake - -# $Id: Makefile.inc 1948 2007-09-13 02:53:30Z peter $ - -# $Id: Makefile.inc 1951 2007-09-14 05:19:10Z peter $ - -# $Id: Makefile.inc 1598 2006-08-10 04:02:59Z peter $ - -# $Id: Makefile.inc 1914 2007-08-20 05:13:35Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2170 2009-01-14 08:28:13Z peter $ - -# $Id: Makefile.inc 2192 2009-03-29 23:25:05Z peter $ - -# $Id: Makefile.inc 1776 2007-02-19 02:36:10Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 1928 2007-09-07 22:03:34Z peter $ - -# $Id: Makefile.inc 1152 2004-10-02 06:18:30Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1783 2007-02-22 03:40:31Z peter $ - -# $Id: Makefile.inc 2169 2009-01-02 20:46:57Z peter $ - -# $Id$ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2190 2009-03-25 03:40:59Z peter $ - -# $Id: Makefile.inc 1137 2004-09-04 01:24:57Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 2063 2008-04-12 08:30:22Z peter $ - -# $Id: Makefile.inc 1966 2007-09-20 03:54:36Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2185 2009-03-24 06:33:32Z peter $ - -# $Id: Makefile.inc 2172 2009-01-27 06:38:14Z peter $ - -# $Id: Makefile.inc 2176 2009-03-04 07:39:02Z peter $ - -# Makefile for cpp module. -# Copied from raw preprocessor module. - -# $Id: Makefile.inc 1662 2006-10-21 18:52:29Z peter $ - -# $Id: Makefile.inc 1428 2006-03-27 02:15:19Z peter $ - -# $Id: Makefile.inc 1378 2006-02-12 01:27:39Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1350 2006-01-29 21:11:03Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id$ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 2010 2007-11-14 08:33:32Z peter $ - -# $Id: Makefile.inc 1252 2005-09-28 05:50:51Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 2126 2008-10-03 08:13:00Z peter $ - -# $Id: Makefile.inc 2036 2008-02-09 04:06:47Z peter $ - -# $Id: Makefile.inc 1168 2004-10-31 01:07:52Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1617 2006-09-16 05:43:41Z peter $ - -# $Id: Makefile.inc 1732 2007-01-13 19:34:04Z peter $ - -# $Id: Makefile.inc 1777 2007-02-19 08:21:17Z peter $ - -# $Id: Makefile.inc 1782 2007-02-21 06:45:39Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1854 2007-05-31 06:16:49Z peter $ - -# $Id: Makefile.inc 2108 2008-06-05 08:48:21Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1653 2006-10-17 06:58:41Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 2166 2009-01-02 08:33:21Z peter $ - -# $Id: Makefile.inc 1331 2006-01-15 22:48:55Z peter $ - -# Assume objfmt_coff is included - -# $Id: Makefile.inc 2120 2008-09-04 04:45:30Z peter $ - -# $Id: Makefile.inc 2082 2008-05-09 06:46:02Z peter $ - -# $Id: Makefile.inc 1183 2004-12-01 07:49:18Z peter $ - -# $Id: Makefile.inc 1851 2007-05-26 17:56:36Z peter $ - -# $Id: Makefile.inc 2111 2008-07-06 22:26:49Z peter $ - -# $Id: Makefile.inc 2123 2008-09-30 03:56:37Z peter $ - -# $Id: Makefile.inc 2130 2008-10-07 05:38:11Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - -# $Id: Makefile.inc 1463 2006-04-05 05:39:23Z peter $ - - - - -pkgdatadir = $(datadir)/yasm -pkglibdir = $(libdir)/yasm -pkgincludedir = $(includedir)/yasm -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = x86_64-unknown-linux-gnu -host_triplet = x86_64-unknown-linux-gnu -bin_PROGRAMS = yasm$(EXEEXT) ytasm$(EXEEXT) -TESTS = $(am__append_3) modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/xdf/tests/xdf_test.sh bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) libyasm/tests/libyasm_test.sh -noinst_PROGRAMS = genstring$(EXEEXT) re2c$(EXEEXT) genmacro$(EXEEXT) \ - genperf$(EXEEXT) genversion$(EXEEXT) genmodule$(EXEEXT) -check_PROGRAMS = test_hd$(EXEEXT) bitvect_test$(EXEEXT) \ - floatnum_test$(EXEEXT) leb128_test$(EXEEXT) \ - splitpath_test$(EXEEXT) combpath_test$(EXEEXT) \ - uncstring_test$(EXEEXT) -DIST_COMMON = README $(am__configure_deps) $(dist_man_MANS) \ - $(include_HEADERS) $(modinclude_HEADERS) $(noinst_HEADERS) \ - $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/frontends/Makefile.inc \ - $(srcdir)/frontends/tasm/Makefile.inc \ - $(srcdir)/frontends/yasm/Makefile.inc \ - $(srcdir)/libyasm/Makefile.inc \ - $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/m4/Makefile.inc \ - $(srcdir)/modules/Makefile.inc \ - $(srcdir)/modules/arch/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/Makefile.inc \ - $(srcdir)/modules/arch/lc3b/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc \ - $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/Makefile.inc \ - $(srcdir)/modules/dbgfmts/codeview/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - $(srcdir)/modules/dbgfmts/null/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/Makefile.inc \ - $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc \ - $(srcdir)/modules/listfmts/Makefile.inc \ - $(srcdir)/modules/listfmts/nasm/Makefile.inc \ - $(srcdir)/modules/objfmts/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/Makefile.inc \ - $(srcdir)/modules/objfmts/coff/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/dbg/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc \ - $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/Makefile.inc \ - $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/Makefile.inc \ - $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/Makefile.inc \ - $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc \ - $(srcdir)/modules/parsers/Makefile.inc \ - $(srcdir)/modules/parsers/gas/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/Makefile.inc \ - $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/Makefile.inc \ - $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc \ - $(srcdir)/modules/preprocs/Makefile.inc \ - $(srcdir)/modules/preprocs/cpp/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/Makefile.inc \ - $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/Makefile.inc \ - $(srcdir)/modules/preprocs/raw/tests/Makefile.inc \ - $(srcdir)/tools/Makefile.inc \ - $(srcdir)/tools/genmacro/Makefile.inc \ - $(srcdir)/tools/genperf/Makefile.inc \ - $(srcdir)/tools/python-yasm/Makefile.inc \ - $(srcdir)/tools/python-yasm/tests/Makefile.inc \ - $(srcdir)/tools/re2c/Makefile.inc $(top_srcdir)/configure \ - ABOUT-NLS AUTHORS COPYING ChangeLog INSTALL NEWS \ - config/config.guess config/config.rpath config/config.sub \ - config/depcomp config/install-sh config/ltmain.sh \ - config/missing -#am__append_1 = _yasm.pxi yasm.pyx \ -# yasm_python.c python-setup.txt \ -# .python-build -#am__append_2 = PYTHON=${PYTHON} -#am__append_3 = tools/python-yasm/tests/python_test.sh -am__append_4 = $(dist_man_MANS) -subdir = . -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_create_stdint_h.m4 \ - $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \ - $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ - $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/nls.m4 \ - $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ - $(top_srcdir)/m4/pyrex.m4 $(top_srcdir)/m4/pythonhead.m4 \ - $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \ - "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" \ - "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" \ - "$(DESTDIR)$(includedir)" -libLIBRARIES_INSTALL = $(INSTALL_DATA) -LIBRARIES = $(lib_LIBRARIES) -AR = ar -ARFLAGS = cru -libyasm_a_AR = $(AR) $(ARFLAGS) -libyasm_a_LIBADD = -am_libyasm_a_OBJECTS = x86arch.$(OBJEXT) x86bc.$(OBJEXT) \ - x86expr.$(OBJEXT) x86id.$(OBJEXT) lc3barch.$(OBJEXT) \ - lc3bbc.$(OBJEXT) nasm-listfmt.$(OBJEXT) gas-parser.$(OBJEXT) \ - gas-parse.$(OBJEXT) nasm-parser.$(OBJEXT) nasm-parse.$(OBJEXT) \ - nasm-preproc.$(OBJEXT) nasm-pp.$(OBJEXT) nasmlib.$(OBJEXT) \ - nasm-eval.$(OBJEXT) raw-preproc.$(OBJEXT) \ - cpp-preproc.$(OBJEXT) cv-dbgfmt.$(OBJEXT) cv-symline.$(OBJEXT) \ - cv-type.$(OBJEXT) dwarf2-dbgfmt.$(OBJEXT) \ - dwarf2-line.$(OBJEXT) dwarf2-aranges.$(OBJEXT) \ - dwarf2-info.$(OBJEXT) null-dbgfmt.$(OBJEXT) \ - stabs-dbgfmt.$(OBJEXT) dbg-objfmt.$(OBJEXT) \ - bin-objfmt.$(OBJEXT) elf.$(OBJEXT) elf-objfmt.$(OBJEXT) \ - elf-x86-x86.$(OBJEXT) elf-x86-amd64.$(OBJEXT) \ - coff-objfmt.$(OBJEXT) win64-except.$(OBJEXT) \ - macho-objfmt.$(OBJEXT) rdf-objfmt.$(OBJEXT) \ - xdf-objfmt.$(OBJEXT) assocdat.$(OBJEXT) bitvect.$(OBJEXT) \ - bc-align.$(OBJEXT) bc-data.$(OBJEXT) bc-incbin.$(OBJEXT) \ - bc-org.$(OBJEXT) bc-reserve.$(OBJEXT) bytecode.$(OBJEXT) \ - errwarn.$(OBJEXT) expr.$(OBJEXT) file.$(OBJEXT) \ - floatnum.$(OBJEXT) hamt.$(OBJEXT) insn.$(OBJEXT) \ - intnum.$(OBJEXT) inttree.$(OBJEXT) linemap.$(OBJEXT) \ - md5.$(OBJEXT) mergesort.$(OBJEXT) phash.$(OBJEXT) \ - section.$(OBJEXT) strcasecmp.$(OBJEXT) strsep.$(OBJEXT) \ - symrec.$(OBJEXT) valparam.$(OBJEXT) value.$(OBJEXT) \ - xmalloc.$(OBJEXT) xstrdup.$(OBJEXT) -nodist_libyasm_a_OBJECTS = x86cpu.$(OBJEXT) x86regtmod.$(OBJEXT) \ - lc3bid.$(OBJEXT) gas-token.$(OBJEXT) nasm-token.$(OBJEXT) \ - module.$(OBJEXT) -libyasm_a_OBJECTS = $(am_libyasm_a_OBJECTS) \ - $(nodist_libyasm_a_OBJECTS) -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) -am_bitvect_test_OBJECTS = bitvect_test.$(OBJEXT) -bitvect_test_OBJECTS = $(am_bitvect_test_OBJECTS) -am__DEPENDENCIES_1 = -bitvect_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_combpath_test_OBJECTS = combpath_test.$(OBJEXT) -combpath_test_OBJECTS = $(am_combpath_test_OBJECTS) -combpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_floatnum_test_OBJECTS = floatnum_test.$(OBJEXT) -floatnum_test_OBJECTS = $(am_floatnum_test_OBJECTS) -floatnum_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_genmacro_OBJECTS = -genmacro_OBJECTS = $(am_genmacro_OBJECTS) -genmacro_DEPENDENCIES = genmacro.$(OBJEXT) -am_genmodule_OBJECTS = -genmodule_OBJECTS = $(am_genmodule_OBJECTS) -genmodule_DEPENDENCIES = genmodule.$(OBJEXT) -am_genperf_OBJECTS = -genperf_OBJECTS = $(am_genperf_OBJECTS) -genperf_DEPENDENCIES = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -am_genstring_OBJECTS = -genstring_OBJECTS = $(am_genstring_OBJECTS) -genstring_DEPENDENCIES = genstring.$(OBJEXT) -am_genversion_OBJECTS = -genversion_OBJECTS = $(am_genversion_OBJECTS) -genversion_DEPENDENCIES = genversion.$(OBJEXT) -am_leb128_test_OBJECTS = leb128_test.$(OBJEXT) -leb128_test_OBJECTS = $(am_leb128_test_OBJECTS) -leb128_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_re2c_OBJECTS = -re2c_OBJECTS = $(am_re2c_OBJECTS) -re2c_DEPENDENCIES = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -am_splitpath_test_OBJECTS = splitpath_test.$(OBJEXT) -splitpath_test_OBJECTS = $(am_splitpath_test_OBJECTS) -splitpath_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_test_hd_OBJECTS = test_hd.$(OBJEXT) -test_hd_OBJECTS = $(am_test_hd_OBJECTS) -test_hd_LDADD = $(LDADD) -am_uncstring_test_OBJECTS = uncstring_test.$(OBJEXT) -uncstring_test_OBJECTS = $(am_uncstring_test_OBJECTS) -uncstring_test_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_yasm_OBJECTS = yasm.$(OBJEXT) yasm-options.$(OBJEXT) -yasm_OBJECTS = $(am_yasm_OBJECTS) -yasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -am_ytasm_OBJECTS = tasm.$(OBJEXT) tasm-options.$(OBJEXT) -ytasm_OBJECTS = $(am_ytasm_OBJECTS) -ytasm_DEPENDENCIES = libyasm.a $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -depcomp = $(SHELL) $(top_srcdir)/config/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -SOURCES = $(libyasm_a_SOURCES) $(nodist_libyasm_a_SOURCES) \ - $(bitvect_test_SOURCES) $(combpath_test_SOURCES) \ - $(floatnum_test_SOURCES) $(genmacro_SOURCES) \ - $(genmodule_SOURCES) $(genperf_SOURCES) $(genstring_SOURCES) \ - $(genversion_SOURCES) $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -DIST_SOURCES = $(libyasm_a_SOURCES) $(bitvect_test_SOURCES) \ - $(combpath_test_SOURCES) $(floatnum_test_SOURCES) \ - $(genmacro_SOURCES) $(genmodule_SOURCES) $(genperf_SOURCES) \ - $(genstring_SOURCES) $(genversion_SOURCES) \ - $(leb128_test_SOURCES) $(re2c_SOURCES) \ - $(splitpath_test_SOURCES) $(test_hd_SOURCES) \ - $(uncstring_test_SOURCES) $(yasm_SOURCES) $(ytasm_SOURCES) -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-dvi-recursive install-exec-recursive \ - install-html-recursive install-info-recursive \ - install-pdf-recursive install-ps-recursive install-recursive \ - installcheck-recursive installdirs-recursive pdf-recursive \ - ps-recursive uninstall-recursive -man1dir = $(mandir)/man1 -man7dir = $(mandir)/man7 -NROFF = nroff -MANS = $(dist_man_MANS) -includeHEADERS_INSTALL = $(INSTALL_HEADER) -modincludeHEADERS_INSTALL = $(INSTALL_HEADER) -nodist_includeHEADERS_INSTALL = $(INSTALL_HEADER) -HEADERS = $(include_HEADERS) $(modinclude_HEADERS) \ - $(nodist_include_HEADERS) $(noinst_HEADERS) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - { test ! -d $(distdir) \ - || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr $(distdir); }; } -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -distuninstallcheck_listfiles = find . -type f -print -distcleancheck_listfiles = find . -type f -print -ACLOCAL = ${SHELL} /tmp/yasm/config/missing --run aclocal-1.10 -AMTAR = ${SHELL} /tmp/yasm/config/missing --run tar -ARCH = x86 -AUTOCONF = ${SHELL} /tmp/yasm/config/missing --run autoconf -AUTOHEADER = ${SHELL} /tmp/yasm/config/missing --run autoheader -AUTOMAKE = ${SHELL} /tmp/yasm/config/missing --run automake-1.10 -AWK = gawk -CC = gcc -std=gnu99 -CCDEPMODE = depmode=gcc3 -CCLD_FOR_BUILD = gcc -std=gnu99 -CC_FOR_BUILD = gcc -std=gnu99 -CFLAGS = -g -O2 -CPP = gcc -E -CPPFLAGS = -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -ECHO_C = -ECHO_N = -n -ECHO_T = -EGREP = /bin/grep -E -EXEEXT = -GCC = yes -GMSGFMT = /usr/bin/msgfmt -GMSGFMT_015 = /usr/bin/msgfmt -GREP = /bin/grep -HOST_CC = gcc -std=gnu99 -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTLLIBS = -INTL_MACOSX_LIBS = -LDFLAGS = -LIBICONV = -liconv -LIBINTL = -LIBOBJS = -LIBS = -LN_S = ln -s -LTLIBICONV = -liconv -LTLIBINTL = -LTLIBOBJS = -MAINT = -MAKEINFO = ${SHELL} /tmp/yasm/config/missing --run makeinfo -MKDIR_P = /bin/mkdir -p -MORE_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter -MSGFMT = /usr/bin/msgfmt -MSGFMT_015 = /usr/bin/msgfmt -MSGMERGE = /usr/bin/msgmerge -OBJEXT = o -PACKAGE = yasm -PACKAGE_BUGREPORT = bug-yasm@tortall.net -PACKAGE_NAME = yasm -PACKAGE_STRING = yasm HEAD -PACKAGE_TARNAME = yasm -PACKAGE_VERSION = HEAD -PATH_SEPARATOR = : -POSUB = po -PYTHON = /usr/bin/python -PYTHON_EXEC_PREFIX = ${exec_prefix} -PYTHON_INCLUDES = -PYTHON_PLATFORM = linux2 -PYTHON_PREFIX = ${prefix} -PYTHON_VERSION = 2.5 -RANLIB = ranlib -SET_MAKE = -SHELL = /bin/sh -STRIP = -USE_NLS = yes -VERSION = HEAD -XGETTEXT = /usr/bin/xgettext -XGETTEXT_015 = /usr/bin/xgettext -XMLTO = xmlto -abs_builddir = /tmp/yasm -abs_srcdir = /tmp/yasm -abs_top_builddir = /tmp/yasm -abs_top_srcdir = /tmp/yasm -ac_ct_CC = gcc -am__include = include -am__leading_dot = . -am__quote = -am__tar = ${AMTAR} chof - "$$tardir" -am__untar = ${AMTAR} xf - -bindir = ${exec_prefix}/bin -build = x86_64-unknown-linux-gnu -build_alias = -build_cpu = x86_64 -build_os = linux-gnu -build_vendor = unknown -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} -dvidir = ${docdir} -exec_prefix = ${prefix} -host = x86_64-unknown-linux-gnu -host_alias = -host_cpu = x86_64 -host_os = linux-gnu -host_vendor = unknown -htmldir = ${docdir} -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /tmp/yasm/config/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mkdir_p = /bin/mkdir -p -oldincludedir = /usr/include -pdfdir = ${docdir} -pkgpyexecdir = ${pyexecdir}/yasm -pkgpythondir = ${pythondir}/yasm -prefix = /usr/local -program_transform_name = s,x,x, -psdir = ${docdir} -pyexecdir = ${exec_prefix}/lib/python2.5/site-packages -pythondir = ${prefix}/lib/python2.5/site-packages -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = . -sysconfdir = ${prefix}/etc -target_alias = -top_builddir = . -top_srcdir = . -SUBDIRS = po . -AM_YFLAGS = -d -AM_CFLAGS = -ansi -pedantic -Wall -W -Waggregate-return -Wbad-function-cast -Wcast-align -Wcast-qual -Wchar-subscripts -Winline -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes -Wswitch -Wwrite-strings -Wno-undef -Wno-unused-parameter - -#!include modules/objfmts/omf/Makefile.inc -dist_man_MANS = yasm_arch.7 yasm_parsers.7 yasm_dbgfmts.7 \ - yasm_objfmts.7 yasm.1 -TESTS_ENVIRONMENT = $(am__append_2) -test_hd_SOURCES = test_hd.c -include_HEADERS = libyasm.h -nodist_include_HEADERS = libyasm-stdint.h -noinst_HEADERS = util.h -BUILT_SOURCES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - x86insn_nasm.c x86insn_gas.c gas-token.c nasm-token.c \ - nasm-macros.c nasm-version.c version.mac win64-nasm.c \ - win64-gas.c license.c -MAINTAINERCLEANFILES = x86insns.c x86insn_nasm.gperf x86insn_gas.gperf \ - $(am__append_4) - -# Until this gets fixed in automake -DISTCLEANFILES = libyasm/stamp-h libyasm/stamp-h[0-9]* - -# Suffix rule for genperf -SUFFIXES = .gperf - -# configure.lineno doesn't clean up after itself? -CLEANFILES = configure.lineno $(am__append_1) x86insn_nasm.c \ - x86insn_gas.c x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c nasm-macros.c nasm-version.c version.mac \ - win64-nasm.c win64-gas.c module.c license.c - -# automake doesn't distribute mkinstalldirs? -#!EXTRA_DIST += modules/objfmts/omf/Makefile.inc -EXTRA_DIST = config/config.rpath config/mkinstalldirs \ - tools/Makefile.inc libyasm/Makefile.inc modules/Makefile.inc \ - frontends/Makefile.inc tools/re2c/Makefile.inc \ - tools/genmacro/Makefile.inc tools/genperf/Makefile.inc \ - tools/python-yasm/Makefile.inc tools/re2c/main.c \ - tools/re2c/basics.h tools/re2c/globals.h tools/re2c/ins.h \ - tools/re2c/re.h tools/re2c/token.h tools/re2c/code.c \ - tools/re2c/dfa.h tools/re2c/dfa.c tools/re2c/parse.h \ - tools/re2c/parser.h tools/re2c/parser.c tools/re2c/actions.c \ - tools/re2c/scanner.h tools/re2c/scanner.c \ - tools/re2c/mbo_getopt.h tools/re2c/mbo_getopt.c \ - tools/re2c/substr.h tools/re2c/substr.c tools/re2c/translate.c \ - tools/re2c/CHANGELOG tools/re2c/NO_WARRANTY tools/re2c/README \ - tools/re2c/scanner.re tools/re2c/re2c.1 \ - tools/re2c/bootstrap/scanner.c tools/re2c/doc/loplas.ps.gz \ - tools/re2c/doc/sample.bib tools/re2c/examples/basemmap.c \ - tools/re2c/examples/c.re tools/re2c/examples/cmmap.re \ - tools/re2c/examples/cnokw.re tools/re2c/examples/cunroll.re \ - tools/re2c/examples/modula.re tools/re2c/examples/repeater.re \ - tools/re2c/examples/sample.re tools/re2c/examples/simple.re \ - tools/re2c/examples/rexx/README \ - tools/re2c/examples/rexx/rexx.l \ - tools/re2c/examples/rexx/scanio.c tools/genmacro/genmacro.c \ - tools/genperf/genperf.c tools/genperf/perfect.c \ - tools/genperf/perfect.h tools/genperf/standard.h \ - tools/python-yasm/pyxelator/cparse.py \ - tools/python-yasm/pyxelator/genpyx.py \ - tools/python-yasm/pyxelator/ir.py \ - tools/python-yasm/pyxelator/lexer.py \ - tools/python-yasm/pyxelator/node.py \ - tools/python-yasm/pyxelator/parse_core.py \ - tools/python-yasm/pyxelator/work_unit.py \ - tools/python-yasm/pyxelator/wrap_yasm.py \ - tools/python-yasm/setup.py tools/python-yasm/yasm.pyx \ - $(PYBINDING_DEPS) tools/python-yasm/tests/Makefile.inc \ - tools/python-yasm/tests/python_test.sh \ - tools/python-yasm/tests/__init__.py \ - tools/python-yasm/tests/test_bytecode.py \ - tools/python-yasm/tests/test_expr.py \ - tools/python-yasm/tests/test_intnum.py \ - tools/python-yasm/tests/test_symrec.py \ - modules/arch/Makefile.inc modules/listfmts/Makefile.inc \ - modules/parsers/Makefile.inc modules/preprocs/Makefile.inc \ - modules/objfmts/Makefile.inc modules/arch/x86/Makefile.inc \ - modules/arch/lc3b/Makefile.inc \ - modules/arch/x86/gen_x86_insn.py x86insns.c x86insn_nasm.gperf \ - x86insn_gas.gperf modules/arch/x86/x86cpu.gperf \ - modules/arch/x86/x86regtmod.gperf \ - modules/arch/x86/tests/Makefile.inc \ - modules/arch/x86/tests/x86_test.sh \ - modules/arch/x86/tests/gen-fma-test.py \ - modules/arch/x86/tests/addbyte.asm \ - modules/arch/x86/tests/addbyte.errwarn \ - modules/arch/x86/tests/addbyte.hex \ - modules/arch/x86/tests/addrop.asm \ - modules/arch/x86/tests/addrop.errwarn \ - modules/arch/x86/tests/addrop.hex \ - modules/arch/x86/tests/addrop-err.asm \ - modules/arch/x86/tests/addrop-err.errwarn \ - modules/arch/x86/tests/aes.asm modules/arch/x86/tests/aes.hex \ - modules/arch/x86/tests/amd200707.asm \ - modules/arch/x86/tests/amd200707.hex \ - modules/arch/x86/tests/arithsmall.asm \ - modules/arch/x86/tests/arithsmall.errwarn \ - modules/arch/x86/tests/arithsmall.hex \ - modules/arch/x86/tests/avx.asm modules/arch/x86/tests/avx.hex \ - modules/arch/x86/tests/avxcc.asm \ - modules/arch/x86/tests/avxcc.hex \ - modules/arch/x86/tests/bittest.asm \ - modules/arch/x86/tests/bittest.hex \ - modules/arch/x86/tests/bswap64.asm \ - modules/arch/x86/tests/bswap64.hex \ - modules/arch/x86/tests/clmul.asm \ - modules/arch/x86/tests/clmul.hex \ - modules/arch/x86/tests/cmpxchg.asm \ - modules/arch/x86/tests/cmpxchg.hex \ - modules/arch/x86/tests/cpubasic-err.asm \ - modules/arch/x86/tests/cpubasic-err.errwarn \ - modules/arch/x86/tests/cyrix.asm \ - modules/arch/x86/tests/cyrix.hex \ - modules/arch/x86/tests/div-err.asm \ - modules/arch/x86/tests/div-err.errwarn \ - modules/arch/x86/tests/ea-nonzero.asm \ - modules/arch/x86/tests/ea-nonzero.hex \ - modules/arch/x86/tests/ea-over.asm \ - modules/arch/x86/tests/ea-over.errwarn \ - modules/arch/x86/tests/ea-over.hex \ - modules/arch/x86/tests/ea-warn.asm \ - modules/arch/x86/tests/ea-warn.errwarn \ - modules/arch/x86/tests/ea-warn.hex \ - modules/arch/x86/tests/ebpindex.asm \ - modules/arch/x86/tests/ebpindex.hex \ - modules/arch/x86/tests/effaddr.asm \ - modules/arch/x86/tests/effaddr.hex \ - modules/arch/x86/tests/enter.asm \ - modules/arch/x86/tests/enter.errwarn \ - modules/arch/x86/tests/enter.hex \ - modules/arch/x86/tests/far64.asm \ - modules/arch/x86/tests/far64.hex \ - modules/arch/x86/tests/farbasic.asm \ - modules/arch/x86/tests/farbasic.hex \ - modules/arch/x86/tests/farithr.asm \ - modules/arch/x86/tests/farithr.hex \ - modules/arch/x86/tests/fcmov.asm \ - modules/arch/x86/tests/fcmov.hex \ - modules/arch/x86/tests/fma.asm modules/arch/x86/tests/fma.hex \ - modules/arch/x86/tests/fwdequ64.asm \ - modules/arch/x86/tests/fwdequ64.hex \ - modules/arch/x86/tests/genopcode.asm \ - modules/arch/x86/tests/genopcode.hex \ - modules/arch/x86/tests/imm64.asm \ - modules/arch/x86/tests/imm64.errwarn \ - modules/arch/x86/tests/imm64.hex \ - modules/arch/x86/tests/iret.asm \ - modules/arch/x86/tests/iret.hex \ - modules/arch/x86/tests/jmp64-1.asm \ - modules/arch/x86/tests/jmp64-1.hex \ - modules/arch/x86/tests/jmp64-2.asm \ - modules/arch/x86/tests/jmp64-2.hex \ - modules/arch/x86/tests/jmp64-3.asm \ - modules/arch/x86/tests/jmp64-3.hex \ - modules/arch/x86/tests/jmp64-4.asm \ - modules/arch/x86/tests/jmp64-4.hex \ - modules/arch/x86/tests/jmp64-5.asm \ - modules/arch/x86/tests/jmp64-5.hex \ - modules/arch/x86/tests/jmp64-6.asm \ - modules/arch/x86/tests/jmp64-6.hex \ - modules/arch/x86/tests/jmpfar.asm \ - modules/arch/x86/tests/jmpfar.hex \ - modules/arch/x86/tests/lds.asm modules/arch/x86/tests/lds.hex \ - modules/arch/x86/tests/loopadsz.asm \ - modules/arch/x86/tests/loopadsz.hex \ - modules/arch/x86/tests/lsahf.asm \ - modules/arch/x86/tests/lsahf.hex \ - modules/arch/x86/tests/mem64-err.asm \ - modules/arch/x86/tests/mem64-err.errwarn \ - modules/arch/x86/tests/mem64.asm \ - modules/arch/x86/tests/mem64.errwarn \ - modules/arch/x86/tests/mem64.hex \ - modules/arch/x86/tests/mem64hi32.asm \ - modules/arch/x86/tests/mem64hi32.hex \ - modules/arch/x86/tests/mem64rip.asm \ - modules/arch/x86/tests/mem64rip.hex \ - modules/arch/x86/tests/mixcase.asm \ - modules/arch/x86/tests/mixcase.hex \ - modules/arch/x86/tests/movbe.asm \ - modules/arch/x86/tests/movbe.hex \ - modules/arch/x86/tests/movdq32.asm \ - modules/arch/x86/tests/movdq32.hex \ - modules/arch/x86/tests/movdq64.asm \ - modules/arch/x86/tests/movdq64.hex \ - modules/arch/x86/tests/negequ.asm \ - modules/arch/x86/tests/negequ.hex \ - modules/arch/x86/tests/nomem64-err.asm \ - modules/arch/x86/tests/nomem64-err.errwarn \ - modules/arch/x86/tests/nomem64-err2.asm \ - modules/arch/x86/tests/nomem64-err2.errwarn \ - modules/arch/x86/tests/nomem64.asm \ - modules/arch/x86/tests/nomem64.errwarn \ - modules/arch/x86/tests/nomem64.hex \ - modules/arch/x86/tests/o64.asm modules/arch/x86/tests/o64.hex \ - modules/arch/x86/tests/o64loop.asm \ - modules/arch/x86/tests/o64loop.errwarn \ - modules/arch/x86/tests/o64loop.hex \ - modules/arch/x86/tests/opersize.asm \ - modules/arch/x86/tests/opersize.hex \ - modules/arch/x86/tests/opsize-err.asm \ - modules/arch/x86/tests/opsize-err.errwarn \ - modules/arch/x86/tests/overflow.asm \ - modules/arch/x86/tests/overflow.errwarn \ - modules/arch/x86/tests/overflow.hex \ - modules/arch/x86/tests/padlock.asm \ - modules/arch/x86/tests/padlock.hex \ - modules/arch/x86/tests/pshift.asm \ - modules/arch/x86/tests/pshift.hex \ - modules/arch/x86/tests/push64.asm \ - modules/arch/x86/tests/push64.errwarn \ - modules/arch/x86/tests/push64.hex \ - modules/arch/x86/tests/pushf.asm \ - modules/arch/x86/tests/pushf.hex \ - modules/arch/x86/tests/pushf-err.asm \ - modules/arch/x86/tests/pushf-err.errwarn \ - modules/arch/x86/tests/pushnosize.asm \ - modules/arch/x86/tests/pushnosize.errwarn \ - modules/arch/x86/tests/pushnosize.hex \ - modules/arch/x86/tests/rep.asm modules/arch/x86/tests/rep.hex \ - modules/arch/x86/tests/ret.asm modules/arch/x86/tests/ret.hex \ - modules/arch/x86/tests/riprel1.asm \ - modules/arch/x86/tests/riprel1.hex \ - modules/arch/x86/tests/riprel2.asm \ - modules/arch/x86/tests/riprel2.errwarn \ - modules/arch/x86/tests/riprel2.hex \ - modules/arch/x86/tests/ripseg.asm \ - modules/arch/x86/tests/ripseg.errwarn \ - modules/arch/x86/tests/ripseg.hex \ - modules/arch/x86/tests/segmov.asm \ - modules/arch/x86/tests/segmov.hex \ - modules/arch/x86/tests/segoff.asm \ - modules/arch/x86/tests/segoff.hex \ - modules/arch/x86/tests/segoff-err.asm \ - modules/arch/x86/tests/segoff-err.errwarn \ - modules/arch/x86/tests/shift.asm \ - modules/arch/x86/tests/shift.hex \ - modules/arch/x86/tests/simd-1.asm \ - modules/arch/x86/tests/simd-1.hex \ - modules/arch/x86/tests/simd-2.asm \ - modules/arch/x86/tests/simd-2.hex \ - modules/arch/x86/tests/simd64-1.asm \ - modules/arch/x86/tests/simd64-1.hex \ - modules/arch/x86/tests/simd64-2.asm \ - modules/arch/x86/tests/simd64-2.hex \ - modules/arch/x86/tests/sse-prefix.asm \ - modules/arch/x86/tests/sse-prefix.hex \ - modules/arch/x86/tests/sse3.asm \ - modules/arch/x86/tests/sse3.hex \ - modules/arch/x86/tests/sse4.asm \ - modules/arch/x86/tests/sse4.hex \ - modules/arch/x86/tests/sse4-err.asm \ - modules/arch/x86/tests/sse4-err.errwarn \ - modules/arch/x86/tests/sse5-all.asm \ - modules/arch/x86/tests/sse5-all.hex \ - modules/arch/x86/tests/sse5-basic.asm \ - modules/arch/x86/tests/sse5-basic.hex \ - modules/arch/x86/tests/sse5-cc.asm \ - modules/arch/x86/tests/sse5-cc.hex \ - modules/arch/x86/tests/sse5-err.asm \ - modules/arch/x86/tests/sse5-err.errwarn \ - modules/arch/x86/tests/ssewidth.asm \ - modules/arch/x86/tests/ssewidth.hex \ - modules/arch/x86/tests/ssse3.asm \ - modules/arch/x86/tests/ssse3.c \ - modules/arch/x86/tests/ssse3.hex \ - modules/arch/x86/tests/stos.asm \ - modules/arch/x86/tests/stos.hex modules/arch/x86/tests/str.asm \ - modules/arch/x86/tests/str.hex \ - modules/arch/x86/tests/strict.asm \ - modules/arch/x86/tests/strict.errwarn \ - modules/arch/x86/tests/strict.hex \ - modules/arch/x86/tests/strict-err.asm \ - modules/arch/x86/tests/strict-err.errwarn \ - modules/arch/x86/tests/stringseg.asm \ - modules/arch/x86/tests/stringseg.errwarn \ - modules/arch/x86/tests/stringseg.hex \ - modules/arch/x86/tests/svm.asm modules/arch/x86/tests/svm.hex \ - modules/arch/x86/tests/twobytemem.asm \ - modules/arch/x86/tests/twobytemem.errwarn \ - modules/arch/x86/tests/twobytemem.hex \ - modules/arch/x86/tests/vmx.asm modules/arch/x86/tests/vmx.hex \ - modules/arch/x86/tests/vmx-err.asm \ - modules/arch/x86/tests/vmx-err.errwarn \ - modules/arch/x86/tests/x86label.asm \ - modules/arch/x86/tests/x86label.hex \ - modules/arch/x86/tests/xchg64.asm \ - modules/arch/x86/tests/xchg64.hex \ - modules/arch/x86/tests/xmm64.asm \ - modules/arch/x86/tests/xmm64.hex \ - modules/arch/x86/tests/xsave.asm \ - modules/arch/x86/tests/xsave.hex \ - modules/arch/x86/tests/gas32/Makefile.inc \ - modules/arch/x86/tests/gas64/Makefile.inc \ - modules/arch/x86/tests/gas32/x86_gas32_test.sh \ - modules/arch/x86/tests/gas32/align32.asm \ - modules/arch/x86/tests/gas32/align32.hex \ - modules/arch/x86/tests/gas32/gas-farithr.asm \ - modules/arch/x86/tests/gas32/gas-farithr.hex \ - modules/arch/x86/tests/gas32/gas-fpmem.asm \ - modules/arch/x86/tests/gas32/gas-fpmem.hex \ - modules/arch/x86/tests/gas32/gas-movdq32.asm \ - modules/arch/x86/tests/gas32/gas-movdq32.hex \ - modules/arch/x86/tests/gas32/gas-movsd.asm \ - modules/arch/x86/tests/gas32/gas-movsd.hex \ - modules/arch/x86/tests/gas32/gas32-jmpcall.asm \ - modules/arch/x86/tests/gas32/gas32-jmpcall.hex \ - modules/arch/x86/tests/gas64/x86_gas64_test.sh \ - modules/arch/x86/tests/gas64/align64.asm \ - modules/arch/x86/tests/gas64/align64.hex \ - modules/arch/x86/tests/gas64/gas-cbw.asm \ - modules/arch/x86/tests/gas64/gas-cbw.hex \ - modules/arch/x86/tests/gas64/gas-fp.asm \ - modules/arch/x86/tests/gas64/gas-fp.hex \ - modules/arch/x86/tests/gas64/gas-inout.asm \ - modules/arch/x86/tests/gas64/gas-inout.hex \ - modules/arch/x86/tests/gas64/gas-moreinsn.asm \ - modules/arch/x86/tests/gas64/gas-moreinsn.hex \ - modules/arch/x86/tests/gas64/gas-movabs.asm \ - modules/arch/x86/tests/gas64/gas-movabs.hex \ - modules/arch/x86/tests/gas64/gas-movdq64.asm \ - modules/arch/x86/tests/gas64/gas-movdq64.hex \ - modules/arch/x86/tests/gas64/gas-movsxs.asm \ - modules/arch/x86/tests/gas64/gas-movsxs.hex \ - modules/arch/x86/tests/gas64/gas-muldiv.asm \ - modules/arch/x86/tests/gas64/gas-muldiv.hex \ - modules/arch/x86/tests/gas64/gas-prefix.asm \ - modules/arch/x86/tests/gas64/gas-prefix.errwarn \ - modules/arch/x86/tests/gas64/gas-prefix.hex \ - modules/arch/x86/tests/gas64/gas-retenter.asm \ - modules/arch/x86/tests/gas64/gas-retenter.hex \ - modules/arch/x86/tests/gas64/gas-shift.asm \ - modules/arch/x86/tests/gas64/gas-shift.hex \ - modules/arch/x86/tests/gas64/gas64-jmpcall.asm \ - modules/arch/x86/tests/gas64/gas64-jmpcall.hex \ - modules/arch/x86/tests/gas64/riprel.asm \ - modules/arch/x86/tests/gas64/riprel.hex \ - modules/arch/lc3b/tests/Makefile.inc \ - modules/arch/lc3b/lc3bid.re \ - modules/arch/lc3b/tests/lc3b_test.sh \ - modules/arch/lc3b/tests/lc3b-basic.asm \ - modules/arch/lc3b/tests/lc3b-basic.errwarn \ - modules/arch/lc3b/tests/lc3b-basic.hex \ - modules/arch/lc3b/tests/lc3b-br.asm \ - modules/arch/lc3b/tests/lc3b-br.hex \ - modules/arch/lc3b/tests/lc3b-ea-err.asm \ - modules/arch/lc3b/tests/lc3b-ea-err.errwarn \ - modules/arch/lc3b/tests/lc3b-mp22NC.asm \ - modules/arch/lc3b/tests/lc3b-mp22NC.hex \ - modules/arch/yasm_arch.xml modules/listfmts/nasm/Makefile.inc \ - modules/parsers/gas/Makefile.inc \ - modules/parsers/nasm/Makefile.inc \ - modules/parsers/gas/tests/Makefile.inc \ - modules/parsers/gas/gas-token.re \ - modules/parsers/gas/tests/gas_test.sh \ - modules/parsers/gas/tests/dataref-imm.asm \ - modules/parsers/gas/tests/dataref-imm.hex \ - modules/parsers/gas/tests/datavis.asm \ - modules/parsers/gas/tests/datavis.errwarn \ - modules/parsers/gas/tests/datavis.hex \ - modules/parsers/gas/tests/datavis2.asm \ - modules/parsers/gas/tests/datavis2.hex \ - modules/parsers/gas/tests/execsect.asm \ - modules/parsers/gas/tests/execsect.hex \ - modules/parsers/gas/tests/gas-fill.asm \ - modules/parsers/gas/tests/gas-fill.hex \ - modules/parsers/gas/tests/gas-float.asm \ - modules/parsers/gas/tests/gas-float.hex \ - modules/parsers/gas/tests/gas-instlabel.asm \ - modules/parsers/gas/tests/gas-instlabel.hex \ - modules/parsers/gas/tests/gas-line-err.asm \ - modules/parsers/gas/tests/gas-line-err.errwarn \ - modules/parsers/gas/tests/gas-line2-err.asm \ - modules/parsers/gas/tests/gas-line2-err.errwarn \ - modules/parsers/gas/tests/gas-push.asm \ - modules/parsers/gas/tests/gas-push.hex \ - modules/parsers/gas/tests/gas-segprefix.asm \ - modules/parsers/gas/tests/gas-segprefix.hex \ - modules/parsers/gas/tests/gas-semi.asm \ - modules/parsers/gas/tests/gas-semi.hex \ - modules/parsers/gas/tests/gassectalign.asm \ - modules/parsers/gas/tests/gassectalign.hex \ - modules/parsers/gas/tests/jmpcall.asm \ - modules/parsers/gas/tests/jmpcall.errwarn \ - modules/parsers/gas/tests/jmpcall.hex \ - modules/parsers/gas/tests/leb128.asm \ - modules/parsers/gas/tests/leb128.hex \ - modules/parsers/gas/tests/localcomm.asm \ - modules/parsers/gas/tests/localcomm.hex \ - modules/parsers/gas/tests/reggroup-err.asm \ - modules/parsers/gas/tests/reggroup-err.errwarn \ - modules/parsers/gas/tests/reggroup.asm \ - modules/parsers/gas/tests/reggroup.hex \ - modules/parsers/gas/tests/strzero.asm \ - modules/parsers/gas/tests/strzero.hex \ - modules/parsers/gas/tests/varinsn.asm \ - modules/parsers/gas/tests/varinsn.hex \ - modules/parsers/gas/tests/bin/Makefile.inc \ - modules/parsers/gas/tests/bin/gas_bin_test.sh \ - modules/parsers/gas/tests/bin/gas-comment.asm \ - modules/parsers/gas/tests/bin/gas-comment.errwarn \ - modules/parsers/gas/tests/bin/gas-comment.hex \ - modules/parsers/gas/tests/bin/gas-llabel.asm \ - modules/parsers/gas/tests/bin/gas-llabel.hex \ - modules/parsers/gas/tests/bin/gas-set.asm \ - modules/parsers/gas/tests/bin/gas-set.hex \ - modules/parsers/gas/tests/bin/rept-err.asm \ - modules/parsers/gas/tests/bin/rept-err.errwarn \ - modules/parsers/gas/tests/bin/reptempty.asm \ - modules/parsers/gas/tests/bin/reptempty.hex \ - modules/parsers/gas/tests/bin/reptlong.asm \ - modules/parsers/gas/tests/bin/reptlong.hex \ - modules/parsers/gas/tests/bin/reptnested-err.asm \ - modules/parsers/gas/tests/bin/reptnested-err.errwarn \ - modules/parsers/gas/tests/bin/reptsimple.asm \ - modules/parsers/gas/tests/bin/reptsimple.hex \ - modules/parsers/gas/tests/bin/reptwarn.asm \ - modules/parsers/gas/tests/bin/reptwarn.errwarn \ - modules/parsers/gas/tests/bin/reptwarn.hex \ - modules/parsers/gas/tests/bin/reptzero.asm \ - modules/parsers/gas/tests/bin/reptzero.hex \ - modules/parsers/nasm/nasm-token.re \ - modules/parsers/nasm/nasm-std.mac \ - modules/parsers/nasm/tests/Makefile.inc \ - modules/parsers/nasm/tests/nasm_test.sh \ - modules/parsers/nasm/tests/alignnop16.asm \ - modules/parsers/nasm/tests/alignnop16.hex \ - modules/parsers/nasm/tests/alignnop32.asm \ - modules/parsers/nasm/tests/alignnop32.hex \ - modules/parsers/nasm/tests/charconstmath.asm \ - modules/parsers/nasm/tests/charconstmath.hex \ - modules/parsers/nasm/tests/dy.asm \ - modules/parsers/nasm/tests/dy.hex \ - modules/parsers/nasm/tests/endcomma.asm \ - modules/parsers/nasm/tests/endcomma.hex \ - modules/parsers/nasm/tests/equcolon.asm \ - modules/parsers/nasm/tests/equcolon.hex \ - modules/parsers/nasm/tests/equlocal.asm \ - modules/parsers/nasm/tests/equlocal.hex \ - modules/parsers/nasm/tests/hexconst.asm \ - modules/parsers/nasm/tests/hexconst.hex \ - modules/parsers/nasm/tests/long.asm \ - modules/parsers/nasm/tests/long.hex \ - modules/parsers/nasm/tests/locallabel.asm \ - modules/parsers/nasm/tests/locallabel.hex \ - modules/parsers/nasm/tests/locallabel2.asm \ - modules/parsers/nasm/tests/locallabel2.hex \ - modules/parsers/nasm/tests/nasm-prefix.asm \ - modules/parsers/nasm/tests/nasm-prefix.hex \ - modules/parsers/nasm/tests/newsect.asm \ - modules/parsers/nasm/tests/newsect.hex \ - modules/parsers/nasm/tests/orphannowarn.asm \ - modules/parsers/nasm/tests/orphannowarn.hex \ - modules/parsers/nasm/tests/prevlocalwarn.asm \ - modules/parsers/nasm/tests/prevlocalwarn.errwarn \ - modules/parsers/nasm/tests/prevlocalwarn.hex \ - modules/parsers/nasm/tests/strucalign.asm \ - modules/parsers/nasm/tests/strucalign.hex \ - modules/parsers/nasm/tests/struczero.asm \ - modules/parsers/nasm/tests/struczero.hex \ - modules/parsers/nasm/tests/syntax-err.asm \ - modules/parsers/nasm/tests/syntax-err.errwarn \ - modules/parsers/nasm/tests/uscore.asm \ - modules/parsers/nasm/tests/uscore.hex \ - modules/parsers/nasm/tests/worphan/Makefile.inc \ - modules/parsers/nasm/tests/worphan/nasm_worphan_test.sh \ - modules/parsers/nasm/tests/worphan/orphanwarn.asm \ - modules/parsers/nasm/tests/worphan/orphanwarn.errwarn \ - modules/parsers/nasm/tests/worphan/orphanwarn.hex \ - modules/parsers/tasm/tests/Makefile.inc \ - modules/parsers/tasm/tests/tasm_test.sh \ - modules/parsers/tasm/tests/array.asm \ - modules/parsers/tasm/tests/array.hex \ - modules/parsers/tasm/tests/case.asm \ - modules/parsers/tasm/tests/case.hex \ - modules/parsers/tasm/tests/charstr.asm \ - modules/parsers/tasm/tests/charstr.hex \ - modules/parsers/tasm/tests/dup.asm \ - modules/parsers/tasm/tests/dup.hex \ - modules/parsers/tasm/tests/equal.asm \ - modules/parsers/tasm/tests/equal.hex \ - modules/parsers/tasm/tests/expr.asm \ - modules/parsers/tasm/tests/expr.hex \ - modules/parsers/tasm/tests/irp.asm \ - modules/parsers/tasm/tests/irp.hex \ - modules/parsers/tasm/tests/label.asm \ - modules/parsers/tasm/tests/label.hex \ - modules/parsers/tasm/tests/les.asm \ - modules/parsers/tasm/tests/les.hex \ - modules/parsers/tasm/tests/lidt.asm \ - modules/parsers/tasm/tests/lidt.hex \ - modules/parsers/tasm/tests/macro.asm \ - modules/parsers/tasm/tests/macro.hex \ - modules/parsers/tasm/tests/offset.asm \ - modules/parsers/tasm/tests/offset.hex \ - modules/parsers/tasm/tests/quote.asm \ - modules/parsers/tasm/tests/quote.hex \ - modules/parsers/tasm/tests/res.asm \ - modules/parsers/tasm/tests/res.errwarn \ - modules/parsers/tasm/tests/res.hex \ - modules/parsers/tasm/tests/segment.asm \ - modules/parsers/tasm/tests/segment.hex \ - modules/parsers/tasm/tests/size.asm \ - modules/parsers/tasm/tests/size.hex \ - modules/parsers/tasm/tests/struc.asm \ - modules/parsers/tasm/tests/struc.errwarn \ - modules/parsers/tasm/tests/struc.hex \ - modules/parsers/tasm/tests/exe/Makefile.inc \ - modules/parsers/tasm/tests/exe/tasm_exe_test.sh \ - modules/parsers/tasm/tests/exe/exe.asm \ - modules/parsers/tasm/tests/exe/exe.hex \ - modules/parsers/yasm_parsers.xml \ - modules/preprocs/nasm/Makefile.inc \ - modules/preprocs/raw/Makefile.inc \ - modules/preprocs/cpp/Makefile.inc \ - modules/preprocs/nasm/genversion.c \ - modules/preprocs/nasm/tests/Makefile.inc \ - modules/preprocs/nasm/tests/nasmpp_test.sh \ - modules/preprocs/nasm/tests/16args.asm \ - modules/preprocs/nasm/tests/16args.hex \ - modules/preprocs/nasm/tests/ifcritical-err.asm \ - modules/preprocs/nasm/tests/ifcritical-err.errwarn \ - modules/preprocs/nasm/tests/longline.asm \ - modules/preprocs/nasm/tests/longline.hex \ - modules/preprocs/nasm/tests/macroeof-err.asm \ - modules/preprocs/nasm/tests/macroeof-err.errwarn \ - modules/preprocs/nasm/tests/noinclude-err.asm \ - modules/preprocs/nasm/tests/noinclude-err.errwarn \ - modules/preprocs/nasm/tests/nasmpp-bigint.asm \ - modules/preprocs/nasm/tests/nasmpp-bigint.hex \ - modules/preprocs/nasm/tests/nasmpp-decimal.asm \ - modules/preprocs/nasm/tests/nasmpp-decimal.hex \ - modules/preprocs/nasm/tests/nasmpp-nested.asm \ - modules/preprocs/nasm/tests/nasmpp-nested.errwarn \ - modules/preprocs/nasm/tests/nasmpp-nested.hex \ - modules/preprocs/nasm/tests/orgsect.asm \ - modules/preprocs/nasm/tests/orgsect.hex \ - modules/preprocs/raw/tests/Makefile.inc \ - modules/preprocs/raw/tests/rawpp_test.sh \ - modules/preprocs/raw/tests/longline.asm \ - modules/preprocs/raw/tests/longline.hex \ - modules/dbgfmts/codeview/Makefile.inc \ - modules/dbgfmts/dwarf2/Makefile.inc \ - modules/dbgfmts/null/Makefile.inc \ - modules/dbgfmts/stabs/Makefile.inc \ - modules/dbgfmts/codeview/cv8.txt \ - modules/dbgfmts/dwarf2/tests/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf2_pass32_test.sh \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32-err.errwarn \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.asm \ - modules/dbgfmts/dwarf2/tests/pass32/dwarf32_testhd.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf2_pass64_test.sh \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_2loc.hex \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.asm \ - modules/dbgfmts/dwarf2/tests/pass64/dwarf64_leb128.hex \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarf2_passwin64_test.sh \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.asm \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.errwarn \ - modules/dbgfmts/dwarf2/tests/passwin64/dwarfwin64_testhd.hex \ - modules/dbgfmts/stabs/tests/Makefile.inc \ - modules/dbgfmts/stabs/tests/stabs_test.sh \ - modules/dbgfmts/stabs/tests/stabs-elf.asm \ - modules/dbgfmts/stabs/tests/stabs-elf.hex \ - modules/dbgfmts/yasm_dbgfmts.xml \ - modules/objfmts/dbg/Makefile.inc \ - modules/objfmts/bin/Makefile.inc \ - modules/objfmts/elf/Makefile.inc \ - modules/objfmts/coff/Makefile.inc \ - modules/objfmts/macho/Makefile.inc \ - modules/objfmts/rdf/Makefile.inc \ - modules/objfmts/win32/Makefile.inc \ - modules/objfmts/win64/Makefile.inc \ - modules/objfmts/xdf/Makefile.inc \ - modules/objfmts/bin/tests/Makefile.inc \ - modules/objfmts/bin/tests/bin_test.sh \ - modules/objfmts/bin/tests/abs.asm \ - modules/objfmts/bin/tests/abs.hex \ - modules/objfmts/bin/tests/bigorg.asm \ - modules/objfmts/bin/tests/bigorg.hex \ - modules/objfmts/bin/tests/bigorg.errwarn \ - modules/objfmts/bin/tests/bin-farabs.asm \ - modules/objfmts/bin/tests/bin-farabs.hex \ - modules/objfmts/bin/tests/bin-rip.asm \ - modules/objfmts/bin/tests/bin-rip.hex \ - modules/objfmts/bin/tests/bintest.asm \ - modules/objfmts/bin/tests/bintest.hex \ - modules/objfmts/bin/tests/float-err.asm \ - modules/objfmts/bin/tests/float-err.errwarn \ - modules/objfmts/bin/tests/float.asm \ - modules/objfmts/bin/tests/float.hex \ - modules/objfmts/bin/tests/integer-warn.asm \ - modules/objfmts/bin/tests/integer-warn.hex \ - modules/objfmts/bin/tests/integer-warn.errwarn \ - modules/objfmts/bin/tests/integer.asm \ - modules/objfmts/bin/tests/integer.hex \ - modules/objfmts/bin/tests/levelop.asm \ - modules/objfmts/bin/tests/levelop.hex \ - modules/objfmts/bin/tests/reserve.asm \ - modules/objfmts/bin/tests/reserve.hex \ - modules/objfmts/bin/tests/reserve.errwarn \ - modules/objfmts/bin/tests/shr.asm \ - modules/objfmts/bin/tests/shr.hex \ - modules/objfmts/bin/tests/multisect/Makefile.inc \ - modules/objfmts/bin/tests/multisect/bin_multi_test.sh \ - modules/objfmts/bin/tests/multisect/bin-align.asm \ - modules/objfmts/bin/tests/multisect/bin-align.errwarn \ - modules/objfmts/bin/tests/multisect/bin-align.hex \ - modules/objfmts/bin/tests/multisect/bin-align.map \ - modules/objfmts/bin/tests/multisect/bin-ssym.asm \ - modules/objfmts/bin/tests/multisect/bin-ssym.hex \ - modules/objfmts/bin/tests/multisect/bin-ssym.map \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/follows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/follows-notfound-err.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.asm \ - modules/objfmts/bin/tests/multisect/initbss.errwarn \ - modules/objfmts/bin/tests/multisect/initbss.hex \ - modules/objfmts/bin/tests/multisect/initbss.map \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.asm \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.hex \ - modules/objfmts/bin/tests/multisect/ldlinux-sects.map \ - modules/objfmts/bin/tests/multisect/multisect1.asm \ - modules/objfmts/bin/tests/multisect/multisect1.hex \ - modules/objfmts/bin/tests/multisect/multisect1.map \ - modules/objfmts/bin/tests/multisect/multisect2.asm \ - modules/objfmts/bin/tests/multisect/multisect2.hex \ - modules/objfmts/bin/tests/multisect/multisect2.map \ - modules/objfmts/bin/tests/multisect/multisect3.asm \ - modules/objfmts/bin/tests/multisect/multisect3.hex \ - modules/objfmts/bin/tests/multisect/multisect3.map \ - modules/objfmts/bin/tests/multisect/multisect4.asm \ - modules/objfmts/bin/tests/multisect/multisect4.hex \ - modules/objfmts/bin/tests/multisect/multisect4.map \ - modules/objfmts/bin/tests/multisect/multisect5.asm \ - modules/objfmts/bin/tests/multisect/multisect5.hex \ - modules/objfmts/bin/tests/multisect/multisect5.map \ - modules/objfmts/bin/tests/multisect/nomultisect1.asm \ - modules/objfmts/bin/tests/multisect/nomultisect1.hex \ - modules/objfmts/bin/tests/multisect/nomultisect1.map \ - modules/objfmts/bin/tests/multisect/nomultisect2.asm \ - modules/objfmts/bin/tests/multisect/nomultisect2.hex \ - modules/objfmts/bin/tests/multisect/nomultisect2.map \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop1-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-loop2-err.errwarn \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.asm \ - modules/objfmts/bin/tests/multisect/vfollows-notfound-err.errwarn \ - modules/objfmts/elf/tests/Makefile.inc \ - modules/objfmts/elf/tests/elf_test.sh \ - modules/objfmts/elf/tests/curpos.asm \ - modules/objfmts/elf/tests/curpos.hex \ - modules/objfmts/elf/tests/curpos-err.asm \ - modules/objfmts/elf/tests/curpos-err.errwarn \ - modules/objfmts/elf/tests/elf-overdef.asm \ - modules/objfmts/elf/tests/elf-overdef.hex \ - modules/objfmts/elf/tests/elf-x86id.asm \ - modules/objfmts/elf/tests/elf-x86id.hex \ - modules/objfmts/elf/tests/elfabssect.asm \ - modules/objfmts/elf/tests/elfabssect.hex \ - modules/objfmts/elf/tests/elfcond.asm \ - modules/objfmts/elf/tests/elfcond.hex \ - modules/objfmts/elf/tests/elfequabs.asm \ - modules/objfmts/elf/tests/elfequabs.hex \ - modules/objfmts/elf/tests/elfglobal.asm \ - modules/objfmts/elf/tests/elfglobal.hex \ - modules/objfmts/elf/tests/elfglobext.asm \ - modules/objfmts/elf/tests/elfglobext.hex \ - modules/objfmts/elf/tests/elfglobext2.asm \ - modules/objfmts/elf/tests/elfglobext2.hex \ - modules/objfmts/elf/tests/elfmanysym.asm \ - modules/objfmts/elf/tests/elfmanysym.hex \ - modules/objfmts/elf/tests/elfreloc.asm \ - modules/objfmts/elf/tests/elfreloc.hex \ - modules/objfmts/elf/tests/elfreloc-ext.asm \ - modules/objfmts/elf/tests/elfreloc-ext.hex \ - modules/objfmts/elf/tests/elfsectalign.asm \ - modules/objfmts/elf/tests/elfsectalign.hex \ - modules/objfmts/elf/tests/elfso.asm \ - modules/objfmts/elf/tests/elfso.hex \ - modules/objfmts/elf/tests/elftest.c \ - modules/objfmts/elf/tests/elftest.asm \ - modules/objfmts/elf/tests/elftest.hex \ - modules/objfmts/elf/tests/elftimes.asm \ - modules/objfmts/elf/tests/elftimes.hex \ - modules/objfmts/elf/tests/elftypesize.asm \ - modules/objfmts/elf/tests/elftypesize.hex \ - modules/objfmts/elf/tests/elfvisibility.asm \ - modules/objfmts/elf/tests/elfvisibility.errwarn \ - modules/objfmts/elf/tests/elfvisibility.hex \ - modules/objfmts/elf/tests/nasm-sectname.asm \ - modules/objfmts/elf/tests/nasm-sectname.hex \ - modules/objfmts/elf/tests/nasm-forceident.asm \ - modules/objfmts/elf/tests/nasm-forceident.hex \ - modules/objfmts/elf/tests/amd64/Makefile.inc \ - modules/objfmts/elf/tests/gas32/Makefile.inc \ - modules/objfmts/elf/tests/gas64/Makefile.inc \ - modules/objfmts/elf/tests/amd64/elf_amd64_test.sh \ - modules/objfmts/elf/tests/amd64/elf-rip.asm \ - modules/objfmts/elf/tests/amd64/elf-rip.hex \ - modules/objfmts/elf/tests/amd64/elfso64.asm \ - modules/objfmts/elf/tests/amd64/elfso64.hex \ - modules/objfmts/elf/tests/amd64/gotpcrel.asm \ - modules/objfmts/elf/tests/amd64/gotpcrel.hex \ - modules/objfmts/elf/tests/gas32/elf_gas32_test.sh \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.asm \ - modules/objfmts/elf/tests/gas32/elf_gas32_ssym.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_test.sh \ - modules/objfmts/elf/tests/gas64/crosssect.asm \ - modules/objfmts/elf/tests/gas64/crosssect.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_curpos.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_reloc.hex \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.asm \ - modules/objfmts/elf/tests/gas64/elf_gas64_ssym.hex \ - modules/objfmts/coff/win64-nasm.mac \ - modules/objfmts/coff/win64-gas.mac \ - modules/objfmts/coff/tests/Makefile.inc \ - modules/objfmts/coff/tests/coff_test.sh \ - modules/objfmts/coff/tests/cofftest.c \ - modules/objfmts/coff/tests/cofftest.asm \ - modules/objfmts/coff/tests/cofftest.hex \ - modules/objfmts/coff/tests/cofftimes.asm \ - modules/objfmts/coff/tests/cofftimes.hex \ - modules/objfmts/coff/tests/x86id.asm \ - modules/objfmts/coff/tests/x86id.hex \ - modules/objfmts/coff/tests/x86id.errwarn \ - modules/objfmts/macho/tests/Makefile.inc \ - modules/objfmts/macho/tests/gas32/Makefile.inc \ - modules/objfmts/macho/tests/gas64/Makefile.inc \ - modules/objfmts/macho/tests/nasm32/Makefile.inc \ - modules/objfmts/macho/tests/nasm64/Makefile.inc \ - modules/objfmts/macho/tests/gas32/gas_macho32_test.sh \ - modules/objfmts/macho/tests/gas32/gas-macho32.asm \ - modules/objfmts/macho/tests/gas32/gas-macho32.hex \ - modules/objfmts/macho/tests/gas64/gas_macho64_test.sh \ - modules/objfmts/macho/tests/gas64/gas-macho64.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64.hex \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.asm \ - modules/objfmts/macho/tests/gas64/gas-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm32/macho32_test.sh \ - modules/objfmts/macho/tests/nasm32/machotest.c \ - modules/objfmts/macho/tests/nasm32/machotest.asm \ - modules/objfmts/macho/tests/nasm32/machotest.hex \ - modules/objfmts/macho/tests/nasm32/macho-reloc.asm \ - modules/objfmts/macho/tests/nasm32/macho-reloc.hex \ - modules/objfmts/macho/tests/nasm32/macho32-sect.asm \ - modules/objfmts/macho/tests/nasm32/macho32-sect.errwarn \ - modules/objfmts/macho/tests/nasm32/macho32-sect.hex \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.asm \ - modules/objfmts/macho/tests/nasm64/nasm-macho64-pic.hex \ - modules/objfmts/macho/tests/nasm64/macho64_test.sh \ - modules/objfmts/macho/tests/nasm64/machotest64.c \ - modules/objfmts/macho/tests/nasm64/machotest64.asm \ - modules/objfmts/macho/tests/nasm64/machotest64.hex \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.asm \ - modules/objfmts/macho/tests/nasm64/macho-reloc64-err.errwarn \ - modules/objfmts/rdf/tests/Makefile.inc \ - modules/objfmts/rdf/tests/rdf_test.sh \ - modules/objfmts/rdf/tests/rdfabs.asm \ - modules/objfmts/rdf/tests/rdfabs.errwarn \ - modules/objfmts/rdf/tests/rdfabs.hex \ - modules/objfmts/rdf/tests/rdfext.asm \ - modules/objfmts/rdf/tests/rdfext.hex \ - modules/objfmts/rdf/tests/rdfseg.asm \ - modules/objfmts/rdf/tests/rdfseg.hex \ - modules/objfmts/rdf/tests/rdfseg2.asm \ - modules/objfmts/rdf/tests/rdfseg2.hex \ - modules/objfmts/rdf/tests/rdftest1.asm \ - modules/objfmts/rdf/tests/rdftest1.hex \ - modules/objfmts/rdf/tests/rdftest2.asm \ - modules/objfmts/rdf/tests/rdftest2.hex \ - modules/objfmts/rdf/tests/rdtlib.asm \ - modules/objfmts/rdf/tests/rdtlib.hex \ - modules/objfmts/rdf/tests/rdtmain.asm \ - modules/objfmts/rdf/tests/rdtmain.hex \ - modules/objfmts/rdf/tests/testlib.asm \ - modules/objfmts/rdf/tests/testlib.hex \ - modules/objfmts/win32/tests/Makefile.inc \ - modules/objfmts/win32/tests/export.asm \ - modules/objfmts/win32/tests/export.hex \ - modules/objfmts/win32/tests/win32_test.sh \ - modules/objfmts/win32/tests/win32-curpos.asm \ - modules/objfmts/win32/tests/win32-curpos.hex \ - modules/objfmts/win32/tests/win32-overdef.asm \ - modules/objfmts/win32/tests/win32-overdef.hex \ - modules/objfmts/win32/tests/win32-safeseh.asm \ - modules/objfmts/win32/tests/win32-safeseh.hex \ - modules/objfmts/win32/tests/win32-safeseh.masm \ - modules/objfmts/win32/tests/win32-segof.asm \ - modules/objfmts/win32/tests/win32-segof.hex \ - modules/objfmts/win32/tests/win32test.c \ - modules/objfmts/win32/tests/win32test.asm \ - modules/objfmts/win32/tests/win32test.hex \ - modules/objfmts/win32/tests/gas/Makefile.inc \ - modules/objfmts/win32/tests/gas/win32_gas_test.sh \ - modules/objfmts/win32/tests/gas/win32at.asm \ - modules/objfmts/win32/tests/gas/win32at.hex \ - modules/objfmts/win64/tests/Makefile.inc \ - modules/objfmts/win64/tests/win64_test.sh \ - modules/objfmts/win64/tests/sce1.asm \ - modules/objfmts/win64/tests/sce1.hex \ - modules/objfmts/win64/tests/sce1-err.asm \ - modules/objfmts/win64/tests/sce1-err.errwarn \ - modules/objfmts/win64/tests/sce2.asm \ - modules/objfmts/win64/tests/sce2.hex \ - modules/objfmts/win64/tests/sce2-err.asm \ - modules/objfmts/win64/tests/sce2-err.errwarn \ - modules/objfmts/win64/tests/sce3.asm \ - modules/objfmts/win64/tests/sce3.hex \ - modules/objfmts/win64/tests/sce3.masm \ - modules/objfmts/win64/tests/sce4.asm \ - modules/objfmts/win64/tests/sce4.hex \ - modules/objfmts/win64/tests/sce4.masm \ - modules/objfmts/win64/tests/sce4-err.asm \ - modules/objfmts/win64/tests/sce4-err.errwarn \ - modules/objfmts/win64/tests/win64-abs.asm \ - modules/objfmts/win64/tests/win64-abs.hex \ - modules/objfmts/win64/tests/win64-curpos.asm \ - modules/objfmts/win64/tests/win64-curpos.hex \ - modules/objfmts/win64/tests/win64-dataref.asm \ - modules/objfmts/win64/tests/win64-dataref.hex \ - modules/objfmts/win64/tests/win64-dataref.masm \ - modules/objfmts/win64/tests/win64-dataref2.asm \ - modules/objfmts/win64/tests/win64-dataref2.hex \ - modules/objfmts/win64/tests/win64-dataref2.masm \ - modules/objfmts/win64/tests/gas/Makefile.inc \ - modules/objfmts/win64/tests/gas/win64_gas_test.sh \ - modules/objfmts/win64/tests/gas/win64-gas-sce.asm \ - modules/objfmts/win64/tests/gas/win64-gas-sce.hex \ - modules/objfmts/xdf/tests/Makefile.inc \ - modules/objfmts/xdf/tests/xdf_test.sh \ - modules/objfmts/xdf/tests/xdf-overdef.asm \ - modules/objfmts/xdf/tests/xdf-overdef.hex \ - modules/objfmts/xdf/tests/xdflong.asm \ - modules/objfmts/xdf/tests/xdflong.hex \ - modules/objfmts/xdf/tests/xdflong.errwarn \ - modules/objfmts/xdf/tests/xdfother.asm \ - modules/objfmts/xdf/tests/xdfother.hex \ - modules/objfmts/xdf/tests/xdfprotect.asm \ - modules/objfmts/xdf/tests/xdfprotect.hex \ - modules/objfmts/xdf/tests/xdfsect.asm \ - modules/objfmts/xdf/tests/xdfsect.hex \ - modules/objfmts/xdf/tests/xdfsect-err.asm \ - modules/objfmts/xdf/tests/xdfsect-err.errwarn \ - modules/objfmts/xdf/tests/xdfvirtual.asm \ - modules/objfmts/xdf/tests/xdfvirtual.hex \ - modules/objfmts/yasm_objfmts.xml libyasm/genmodule.c \ - libyasm/module.in libyasm/tests/Makefile.inc \ - libyasm/tests/libyasm_test.sh libyasm/tests/1shl0.asm \ - libyasm/tests/1shl0.hex libyasm/tests/absloop-err.asm \ - libyasm/tests/absloop-err.errwarn \ - libyasm/tests/charconst64.asm libyasm/tests/charconst64.hex \ - libyasm/tests/data-rawvalue.asm \ - libyasm/tests/data-rawvalue.hex libyasm/tests/duplabel-err.asm \ - libyasm/tests/duplabel-err.errwarn libyasm/tests/emptydata.asm \ - libyasm/tests/emptydata.hex libyasm/tests/equ-expand.asm \ - libyasm/tests/equ-expand.hex libyasm/tests/expr-fold-level.asm \ - libyasm/tests/expr-fold-level.hex \ - libyasm/tests/expr-wide-ident.asm \ - libyasm/tests/expr-wide-ident.hex libyasm/tests/externdef.asm \ - libyasm/tests/externdef.errwarn libyasm/tests/externdef.hex \ - libyasm/tests/incbin.asm libyasm/tests/incbin.hex \ - libyasm/tests/jmpsize1.asm libyasm/tests/jmpsize1.hex \ - libyasm/tests/jmpsize1-err.asm \ - libyasm/tests/jmpsize1-err.errwarn \ - libyasm/tests/opt-align1.asm libyasm/tests/opt-align1.hex \ - libyasm/tests/opt-align2.asm libyasm/tests/opt-align2.hex \ - libyasm/tests/opt-align3.asm libyasm/tests/opt-align3.hex \ - libyasm/tests/opt-circular1-err.asm \ - libyasm/tests/opt-circular1-err.errwarn \ - libyasm/tests/opt-circular2-err.asm \ - libyasm/tests/opt-circular2-err.errwarn \ - libyasm/tests/opt-circular3-err.asm \ - libyasm/tests/opt-circular3-err.errwarn \ - libyasm/tests/opt-gvmat64.asm libyasm/tests/opt-gvmat64.hex \ - libyasm/tests/opt-immexpand.asm \ - libyasm/tests/opt-immexpand.hex \ - libyasm/tests/opt-immnoexpand.asm \ - libyasm/tests/opt-immnoexpand.hex \ - libyasm/tests/opt-oldalign.asm libyasm/tests/opt-oldalign.hex \ - libyasm/tests/opt-struc.asm libyasm/tests/opt-struc.hex \ - libyasm/tests/reserve-err1.asm \ - libyasm/tests/reserve-err1.errwarn \ - libyasm/tests/reserve-err2.asm \ - libyasm/tests/reserve-err2.errwarn libyasm/tests/strucsize.asm \ - libyasm/tests/strucsize.hex libyasm/tests/times0.asm \ - libyasm/tests/times0.hex libyasm/tests/timesover-err.asm \ - libyasm/tests/timesover-err.errwarn \ - libyasm/tests/timesunder.asm libyasm/tests/timesunder.hex \ - libyasm/tests/times-res.asm libyasm/tests/times-res.errwarn \ - libyasm/tests/times-res.hex libyasm/tests/unary.asm \ - libyasm/tests/unary.hex libyasm/tests/value-err.asm \ - libyasm/tests/value-err.errwarn \ - libyasm/tests/value-samesym.asm \ - libyasm/tests/value-samesym.errwarn \ - libyasm/tests/value-samesym.hex libyasm/tests/value-mask.asm \ - libyasm/tests/value-mask.errwarn libyasm/tests/value-mask.hex \ - frontends/yasm/Makefile.inc frontends/tasm/Makefile.inc \ - frontends/yasm/yasm.xml m4/intmax.m4 m4/longdouble.m4 \ - m4/nls.m4 m4/po.m4 m4/printf-posix.m4 m4/signed.m4 \ - m4/size_max.m4 m4/ulonglong.m4 m4/wchar_t.m4 m4/wint_t.m4 \ - m4/xsize.m4 m4/codeset.m4 m4/gettext.m4 m4/glibc21.m4 \ - m4/iconv.m4 m4/intdiv0.m4 m4/inttypes.m4 m4/inttypes_h.m4 \ - m4/inttypes-pri.m4 m4/isc-posix.m4 m4/lcmessage.m4 \ - m4/lib-ld.m4 m4/lib-link.m4 m4/lib-prefix.m4 m4/longlong.m4 \ - m4/progtest.m4 m4/stdint_h.m4 m4/uintmax_t.m4 m4/pythonhead.m4 \ - m4/pyrex.m4 out_test.sh Artistic.txt BSD.txt GNU_GPL-2.0 \ - GNU_LGPL-2.0 splint.sh Mkfiles/Makefile.flat \ - Mkfiles/Makefile.dj Mkfiles/dj/config.h \ - Mkfiles/dj/libyasm-stdint.h \ - Mkfiles/vc9/crt_secure_no_deprecate.vsprops \ - Mkfiles/vc9/yasm.sln Mkfiles/vc9/yasm.vcproj \ - Mkfiles/vc9/ytasm.vcproj Mkfiles/vc9/config.h \ - Mkfiles/vc9/libyasm-stdint.h Mkfiles/vc9/readme.vc9.txt \ - Mkfiles/vc9/yasm.rules Mkfiles/vc9/vc98_swap.py \ - Mkfiles/vc9/genmacro/genmacro.vcproj \ - Mkfiles/vc9/genmacro/run.bat \ - Mkfiles/vc9/genmodule/genmodule.vcproj \ - Mkfiles/vc9/genmodule/run.bat \ - Mkfiles/vc9/genstring/genstring.vcproj \ - Mkfiles/vc9/genstring/run.bat \ - Mkfiles/vc9/genversion/genversion.vcproj \ - Mkfiles/vc9/genversion/run.bat \ - Mkfiles/vc9/libyasm/libyasm.vcproj \ - Mkfiles/vc9/modules/modules.vcproj \ - Mkfiles/vc9/re2c/re2c.vcproj Mkfiles/vc9/re2c/run.bat \ - Mkfiles/vc9/genperf/genperf.vcproj Mkfiles/vc9/genperf/run.bat \ - genstring.c - -# libyasm-stdint.h doesn't clean up after itself? -CONFIG_CLEAN_FILES = libyasm-stdint.h -re2c_SOURCES = -re2c_LDADD = re2c-main.$(OBJEXT) re2c-code.$(OBJEXT) \ - re2c-dfa.$(OBJEXT) re2c-parser.$(OBJEXT) \ - re2c-actions.$(OBJEXT) re2c-scanner.$(OBJEXT) \ - re2c-mbo_getopt.$(OBJEXT) re2c-substr.$(OBJEXT) \ - re2c-translate.$(OBJEXT) -re2c_LINK = $(CCLD_FOR_BUILD) -o $@ -genmacro_SOURCES = -genmacro_LDADD = genmacro.$(OBJEXT) -genmacro_LINK = $(CCLD_FOR_BUILD) -o $@ -genperf_SOURCES = -genperf_LDADD = genperf.$(OBJEXT) gp-perfect.$(OBJEXT) \ - gp-phash.$(OBJEXT) gp-xmalloc.$(OBJEXT) gp-xstrdup.$(OBJEXT) -genperf_LINK = $(CCLD_FOR_BUILD) -o $@ -PYBINDING_DEPS = tools/python-yasm/bytecode.pxi \ - tools/python-yasm/errwarn.pxi tools/python-yasm/expr.pxi \ - tools/python-yasm/floatnum.pxi tools/python-yasm/intnum.pxi \ - tools/python-yasm/symrec.pxi tools/python-yasm/value.pxi -YASM_MODULES = arch_x86 arch_lc3b listfmt_nasm parser_gas parser_gnu \ - parser_nasm parser_tasm preproc_nasm preproc_tasm preproc_raw \ - preproc_cpp dbgfmt_cv8 dbgfmt_dwarf2 dbgfmt_null dbgfmt_stabs \ - objfmt_dbg objfmt_bin objfmt_dosexe objfmt_elf objfmt_elf32 \ - objfmt_elf64 objfmt_coff objfmt_macho objfmt_macho32 \ - objfmt_macho64 objfmt_rdf objfmt_win32 objfmt_win64 objfmt_x64 \ - objfmt_xdf -lib_LIBRARIES = libyasm.a -libyasm_a_SOURCES = modules/arch/x86/x86arch.c \ - modules/arch/x86/x86arch.h modules/arch/x86/x86bc.c \ - modules/arch/x86/x86expr.c modules/arch/x86/x86id.c \ - modules/arch/lc3b/lc3barch.c modules/arch/lc3b/lc3barch.h \ - modules/arch/lc3b/lc3bbc.c \ - modules/listfmts/nasm/nasm-listfmt.c \ - modules/parsers/gas/gas-parser.c \ - modules/parsers/gas/gas-parser.h \ - modules/parsers/gas/gas-parse.c \ - modules/parsers/nasm/nasm-parser.c \ - modules/parsers/nasm/nasm-parser.h \ - modules/parsers/nasm/nasm-parse.c \ - modules/preprocs/nasm/nasm-preproc.c \ - modules/preprocs/nasm/nasm-pp.h \ - modules/preprocs/nasm/nasm-pp.c modules/preprocs/nasm/nasm.h \ - modules/preprocs/nasm/nasmlib.h \ - modules/preprocs/nasm/nasmlib.c \ - modules/preprocs/nasm/nasm-eval.h \ - modules/preprocs/nasm/nasm-eval.c \ - modules/preprocs/raw/raw-preproc.c \ - modules/preprocs/cpp/cpp-preproc.c \ - modules/dbgfmts/codeview/cv-dbgfmt.h \ - modules/dbgfmts/codeview/cv-dbgfmt.c \ - modules/dbgfmts/codeview/cv-symline.c \ - modules/dbgfmts/codeview/cv-type.c \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.h \ - modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c \ - modules/dbgfmts/dwarf2/dwarf2-line.c \ - modules/dbgfmts/dwarf2/dwarf2-aranges.c \ - modules/dbgfmts/dwarf2/dwarf2-info.c \ - modules/dbgfmts/null/null-dbgfmt.c \ - modules/dbgfmts/stabs/stabs-dbgfmt.c \ - modules/objfmts/dbg/dbg-objfmt.c \ - modules/objfmts/bin/bin-objfmt.c modules/objfmts/elf/elf.c \ - modules/objfmts/elf/elf.h modules/objfmts/elf/elf-objfmt.c \ - modules/objfmts/elf/elf-machine.h \ - modules/objfmts/elf/elf-x86-x86.c \ - modules/objfmts/elf/elf-x86-amd64.c \ - modules/objfmts/coff/coff-objfmt.c \ - modules/objfmts/coff/coff-objfmt.h \ - modules/objfmts/coff/win64-except.c \ - modules/objfmts/macho/macho-objfmt.c \ - modules/objfmts/rdf/rdf-objfmt.c \ - modules/objfmts/xdf/xdf-objfmt.c libyasm/assocdat.c \ - libyasm/bitvect.c libyasm/bc-align.c libyasm/bc-data.c \ - libyasm/bc-incbin.c libyasm/bc-org.c libyasm/bc-reserve.c \ - libyasm/bytecode.c libyasm/errwarn.c libyasm/expr.c \ - libyasm/file.c libyasm/floatnum.c libyasm/hamt.c \ - libyasm/insn.c libyasm/intnum.c libyasm/inttree.c \ - libyasm/linemap.c libyasm/md5.c libyasm/mergesort.c \ - libyasm/phash.c libyasm/section.c libyasm/strcasecmp.c \ - libyasm/strsep.c libyasm/symrec.c libyasm/valparam.c \ - libyasm/value.c libyasm/xmalloc.c libyasm/xstrdup.c -nodist_libyasm_a_SOURCES = x86cpu.c x86regtmod.c lc3bid.c gas-token.c \ - nasm-token.c module.c -genversion_SOURCES = -genversion_LDADD = genversion.$(OBJEXT) -genversion_LINK = $(CCLD_FOR_BUILD) -o $@ -genmodule_SOURCES = -genmodule_LDADD = genmodule.$(OBJEXT) -genmodule_LINK = $(CCLD_FOR_BUILD) -o $@ -modincludedir = $(includedir)/libyasm -modinclude_HEADERS = libyasm/arch.h libyasm/assocdat.h \ - libyasm/bitvect.h libyasm/bytecode.h libyasm/compat-queue.h \ - libyasm/coretype.h libyasm/dbgfmt.h libyasm/errwarn.h \ - libyasm/expr.h libyasm/file.h libyasm/floatnum.h \ - libyasm/hamt.h libyasm/insn.h libyasm/intnum.h \ - libyasm/inttree.h libyasm/linemap.h libyasm/listfmt.h \ - libyasm/md5.h libyasm/module.h libyasm/objfmt.h \ - libyasm/parser.h libyasm/phash.h libyasm/preproc.h \ - libyasm/section.h libyasm/symrec.h libyasm/valparam.h \ - libyasm/value.h -bitvect_test_SOURCES = libyasm/tests/bitvect_test.c -bitvect_test_LDADD = libyasm.a $(INTLLIBS) -floatnum_test_SOURCES = libyasm/tests/floatnum_test.c -floatnum_test_LDADD = libyasm.a $(INTLLIBS) -leb128_test_SOURCES = libyasm/tests/leb128_test.c -leb128_test_LDADD = libyasm.a $(INTLLIBS) -splitpath_test_SOURCES = libyasm/tests/splitpath_test.c -splitpath_test_LDADD = libyasm.a $(INTLLIBS) -combpath_test_SOURCES = libyasm/tests/combpath_test.c -combpath_test_LDADD = libyasm.a $(INTLLIBS) -uncstring_test_SOURCES = libyasm/tests/uncstring_test.c -uncstring_test_LDADD = libyasm.a $(INTLLIBS) -yasm_SOURCES = frontends/yasm/yasm.c frontends/yasm/yasm-options.c \ - frontends/yasm/yasm-options.h -yasm_LDADD = libyasm.a $(INTLLIBS) -ytasm_SOURCES = frontends/tasm/tasm.c frontends/tasm/tasm-options.c \ - frontends/tasm/tasm-options.h -ytasm_LDADD = libyasm.a $(INTLLIBS) -ACLOCAL_AMFLAGS = -I m4 - -# genstring build -genstring_SOURCES = -genstring_LDADD = genstring.$(OBJEXT) -genstring_LINK = $(CCLD_FOR_BUILD) -o $@ -all: $(BUILT_SOURCES) config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .gperf .c .o .obj -am--refresh: - @: -$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/tools/Makefile.inc $(srcdir)/tools/re2c/Makefile.inc $(srcdir)/tools/genmacro/Makefile.inc $(srcdir)/tools/genperf/Makefile.inc $(srcdir)/tools/python-yasm/Makefile.inc $(srcdir)/tools/python-yasm/tests/Makefile.inc $(srcdir)/modules/Makefile.inc $(srcdir)/modules/arch/Makefile.inc $(srcdir)/modules/arch/x86/Makefile.inc $(srcdir)/modules/arch/x86/tests/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas32/Makefile.inc $(srcdir)/modules/arch/x86/tests/gas64/Makefile.inc $(srcdir)/modules/arch/lc3b/Makefile.inc $(srcdir)/modules/arch/lc3b/tests/Makefile.inc $(srcdir)/modules/listfmts/Makefile.inc $(srcdir)/modules/listfmts/nasm/Makefile.inc $(srcdir)/modules/parsers/Makefile.inc $(srcdir)/modules/parsers/gas/Makefile.inc $(srcdir)/modules/parsers/gas/tests/Makefile.inc $(srcdir)/modules/parsers/gas/tests/bin/Makefile.inc $(srcdir)/modules/parsers/nasm/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/Makefile.inc $(srcdir)/modules/parsers/nasm/tests/worphan/Makefile.inc $(srcdir)/modules/parsers/tasm/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/Makefile.inc $(srcdir)/modules/parsers/tasm/tests/exe/Makefile.inc $(srcdir)/modules/preprocs/Makefile.inc $(srcdir)/modules/preprocs/nasm/Makefile.inc $(srcdir)/modules/preprocs/nasm/tests/Makefile.inc $(srcdir)/modules/preprocs/raw/Makefile.inc $(srcdir)/modules/preprocs/raw/tests/Makefile.inc $(srcdir)/modules/preprocs/cpp/Makefile.inc $(srcdir)/modules/dbgfmts/Makefile.inc $(srcdir)/modules/dbgfmts/codeview/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass32/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/pass64/Makefile.inc $(srcdir)/modules/dbgfmts/dwarf2/tests/passwin64/Makefile.inc $(srcdir)/modules/dbgfmts/null/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/Makefile.inc $(srcdir)/modules/dbgfmts/stabs/tests/Makefile.inc $(srcdir)/modules/objfmts/Makefile.inc $(srcdir)/modules/objfmts/dbg/Makefile.inc $(srcdir)/modules/objfmts/bin/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/Makefile.inc $(srcdir)/modules/objfmts/bin/tests/multisect/Makefile.inc $(srcdir)/modules/objfmts/elf/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/amd64/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/elf/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/coff/Makefile.inc $(srcdir)/modules/objfmts/coff/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/gas64/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm32/Makefile.inc $(srcdir)/modules/objfmts/macho/tests/nasm64/Makefile.inc $(srcdir)/modules/objfmts/rdf/Makefile.inc $(srcdir)/modules/objfmts/rdf/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/Makefile.inc $(srcdir)/modules/objfmts/win32/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/win64/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/Makefile.inc $(srcdir)/modules/objfmts/win64/tests/gas/Makefile.inc $(srcdir)/modules/objfmts/xdf/Makefile.inc $(srcdir)/modules/objfmts/xdf/tests/Makefile.inc $(srcdir)/libyasm/Makefile.inc $(srcdir)/libyasm/tests/Makefile.inc $(srcdir)/frontends/Makefile.inc $(srcdir)/frontends/yasm/Makefile.inc $(srcdir)/frontends/tasm/Makefile.inc $(srcdir)/m4/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ - cd $(srcdir) && $(AUTOMAKE) --gnu \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: $(am__configure_deps) - cd $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): $(am__aclocal_m4_deps) - cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) - -config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: $(am__configure_deps) - cd $(top_srcdir) && $(AUTOHEADER) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -install-libLIBRARIES: $(lib_LIBRARIES) - @$(NORMAL_INSTALL) - test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - f=$(am__strip_dir) \ - echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ - else :; fi; \ - done - @$(POST_INSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - if test -f $$p; then \ - p=$(am__strip_dir) \ - echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \ - $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \ - else :; fi; \ - done - -uninstall-libLIBRARIES: - @$(NORMAL_UNINSTALL) - @list='$(lib_LIBRARIES)'; for p in $$list; do \ - p=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - rm -f "$(DESTDIR)$(libdir)/$$p"; \ - done - -clean-libLIBRARIES: - -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES) -libyasm.a: $(libyasm_a_OBJECTS) $(libyasm_a_DEPENDENCIES) - -rm -f libyasm.a - $(libyasm_a_AR) libyasm.a $(libyasm_a_OBJECTS) $(libyasm_a_LIBADD) - $(RANLIB) libyasm.a -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) - -clean-checkPROGRAMS: - -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS) - -clean-noinstPROGRAMS: - -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) -bitvect_test$(EXEEXT): $(bitvect_test_OBJECTS) $(bitvect_test_DEPENDENCIES) - @rm -f bitvect_test$(EXEEXT) - $(LINK) $(bitvect_test_OBJECTS) $(bitvect_test_LDADD) $(LIBS) -combpath_test$(EXEEXT): $(combpath_test_OBJECTS) $(combpath_test_DEPENDENCIES) - @rm -f combpath_test$(EXEEXT) - $(LINK) $(combpath_test_OBJECTS) $(combpath_test_LDADD) $(LIBS) -floatnum_test$(EXEEXT): $(floatnum_test_OBJECTS) $(floatnum_test_DEPENDENCIES) - @rm -f floatnum_test$(EXEEXT) - $(LINK) $(floatnum_test_OBJECTS) $(floatnum_test_LDADD) $(LIBS) -genmacro$(EXEEXT): $(genmacro_OBJECTS) $(genmacro_DEPENDENCIES) - @rm -f genmacro$(EXEEXT) - $(genmacro_LINK) $(genmacro_OBJECTS) $(genmacro_LDADD) $(LIBS) -genmodule$(EXEEXT): $(genmodule_OBJECTS) $(genmodule_DEPENDENCIES) - @rm -f genmodule$(EXEEXT) - $(genmodule_LINK) $(genmodule_OBJECTS) $(genmodule_LDADD) $(LIBS) -genperf$(EXEEXT): $(genperf_OBJECTS) $(genperf_DEPENDENCIES) - @rm -f genperf$(EXEEXT) - $(genperf_LINK) $(genperf_OBJECTS) $(genperf_LDADD) $(LIBS) -genstring$(EXEEXT): $(genstring_OBJECTS) $(genstring_DEPENDENCIES) - @rm -f genstring$(EXEEXT) - $(genstring_LINK) $(genstring_OBJECTS) $(genstring_LDADD) $(LIBS) -genversion$(EXEEXT): $(genversion_OBJECTS) $(genversion_DEPENDENCIES) - @rm -f genversion$(EXEEXT) - $(genversion_LINK) $(genversion_OBJECTS) $(genversion_LDADD) $(LIBS) -leb128_test$(EXEEXT): $(leb128_test_OBJECTS) $(leb128_test_DEPENDENCIES) - @rm -f leb128_test$(EXEEXT) - $(LINK) $(leb128_test_OBJECTS) $(leb128_test_LDADD) $(LIBS) -re2c$(EXEEXT): $(re2c_OBJECTS) $(re2c_DEPENDENCIES) - @rm -f re2c$(EXEEXT) - $(re2c_LINK) $(re2c_OBJECTS) $(re2c_LDADD) $(LIBS) -splitpath_test$(EXEEXT): $(splitpath_test_OBJECTS) $(splitpath_test_DEPENDENCIES) - @rm -f splitpath_test$(EXEEXT) - $(LINK) $(splitpath_test_OBJECTS) $(splitpath_test_LDADD) $(LIBS) -test_hd$(EXEEXT): $(test_hd_OBJECTS) $(test_hd_DEPENDENCIES) - @rm -f test_hd$(EXEEXT) - $(LINK) $(test_hd_OBJECTS) $(test_hd_LDADD) $(LIBS) -uncstring_test$(EXEEXT): $(uncstring_test_OBJECTS) $(uncstring_test_DEPENDENCIES) - @rm -f uncstring_test$(EXEEXT) - $(LINK) $(uncstring_test_OBJECTS) $(uncstring_test_LDADD) $(LIBS) -yasm$(EXEEXT): $(yasm_OBJECTS) $(yasm_DEPENDENCIES) - @rm -f yasm$(EXEEXT) - $(LINK) $(yasm_OBJECTS) $(yasm_LDADD) $(LIBS) -ytasm$(EXEEXT): $(ytasm_OBJECTS) $(ytasm_DEPENDENCIES) - @rm -f ytasm$(EXEEXT) - $(LINK) $(ytasm_OBJECTS) $(ytasm_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/assocdat.Po -include ./$(DEPDIR)/bc-align.Po -include ./$(DEPDIR)/bc-data.Po -include ./$(DEPDIR)/bc-incbin.Po -include ./$(DEPDIR)/bc-org.Po -include ./$(DEPDIR)/bc-reserve.Po -include ./$(DEPDIR)/bin-objfmt.Po -include ./$(DEPDIR)/bitvect.Po -include ./$(DEPDIR)/bitvect_test.Po -include ./$(DEPDIR)/bytecode.Po -include ./$(DEPDIR)/coff-objfmt.Po -include ./$(DEPDIR)/combpath_test.Po -include ./$(DEPDIR)/cpp-preproc.Po -include ./$(DEPDIR)/cv-dbgfmt.Po -include ./$(DEPDIR)/cv-symline.Po -include ./$(DEPDIR)/cv-type.Po -include ./$(DEPDIR)/dbg-objfmt.Po -include ./$(DEPDIR)/dwarf2-aranges.Po -include ./$(DEPDIR)/dwarf2-dbgfmt.Po -include ./$(DEPDIR)/dwarf2-info.Po -include ./$(DEPDIR)/dwarf2-line.Po -include ./$(DEPDIR)/elf-objfmt.Po -include ./$(DEPDIR)/elf-x86-amd64.Po -include ./$(DEPDIR)/elf-x86-x86.Po -include ./$(DEPDIR)/elf.Po -include ./$(DEPDIR)/errwarn.Po -include ./$(DEPDIR)/expr.Po -include ./$(DEPDIR)/file.Po -include ./$(DEPDIR)/floatnum.Po -include ./$(DEPDIR)/floatnum_test.Po -include ./$(DEPDIR)/gas-parse.Po -include ./$(DEPDIR)/gas-parser.Po -include ./$(DEPDIR)/gas-token.Po -include ./$(DEPDIR)/hamt.Po -include ./$(DEPDIR)/insn.Po -include ./$(DEPDIR)/intnum.Po -include ./$(DEPDIR)/inttree.Po -include ./$(DEPDIR)/lc3barch.Po -include ./$(DEPDIR)/lc3bbc.Po -include ./$(DEPDIR)/lc3bid.Po -include ./$(DEPDIR)/leb128_test.Po -include ./$(DEPDIR)/linemap.Po -include ./$(DEPDIR)/macho-objfmt.Po -include ./$(DEPDIR)/md5.Po -include ./$(DEPDIR)/mergesort.Po -include ./$(DEPDIR)/module.Po -include ./$(DEPDIR)/nasm-eval.Po -include ./$(DEPDIR)/nasm-listfmt.Po -include ./$(DEPDIR)/nasm-parse.Po -include ./$(DEPDIR)/nasm-parser.Po -include ./$(DEPDIR)/nasm-pp.Po -include ./$(DEPDIR)/nasm-preproc.Po -include ./$(DEPDIR)/nasm-token.Po -include ./$(DEPDIR)/nasmlib.Po -include ./$(DEPDIR)/null-dbgfmt.Po -include ./$(DEPDIR)/phash.Po -include ./$(DEPDIR)/raw-preproc.Po -include ./$(DEPDIR)/rdf-objfmt.Po -include ./$(DEPDIR)/section.Po -include ./$(DEPDIR)/splitpath_test.Po -include ./$(DEPDIR)/stabs-dbgfmt.Po -include ./$(DEPDIR)/strcasecmp.Po -include ./$(DEPDIR)/strsep.Po -include ./$(DEPDIR)/symrec.Po -include ./$(DEPDIR)/tasm-options.Po -include ./$(DEPDIR)/tasm.Po -include ./$(DEPDIR)/test_hd.Po -include ./$(DEPDIR)/uncstring_test.Po -include ./$(DEPDIR)/valparam.Po -include ./$(DEPDIR)/value.Po -include ./$(DEPDIR)/win64-except.Po -include ./$(DEPDIR)/x86arch.Po -include ./$(DEPDIR)/x86bc.Po -include ./$(DEPDIR)/x86cpu.Po -include ./$(DEPDIR)/x86expr.Po -include ./$(DEPDIR)/x86id.Po -include ./$(DEPDIR)/x86regtmod.Po -include ./$(DEPDIR)/xdf-objfmt.Po -include ./$(DEPDIR)/xmalloc.Po -include ./$(DEPDIR)/xstrdup.Po -include ./$(DEPDIR)/yasm-options.Po -include ./$(DEPDIR)/yasm.Po - -.c.o: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -x86arch.o: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.o -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.o `test -f 'modules/arch/x86/x86arch.c' || echo '$(srcdir)/'`modules/arch/x86/x86arch.c - -x86arch.obj: modules/arch/x86/x86arch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86arch.obj -MD -MP -MF $(DEPDIR)/x86arch.Tpo -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - mv -f $(DEPDIR)/x86arch.Tpo $(DEPDIR)/x86arch.Po -# source='modules/arch/x86/x86arch.c' object='x86arch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86arch.obj `if test -f 'modules/arch/x86/x86arch.c'; then $(CYGPATH_W) 'modules/arch/x86/x86arch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86arch.c'; fi` - -x86bc.o: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.o -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.o `test -f 'modules/arch/x86/x86bc.c' || echo '$(srcdir)/'`modules/arch/x86/x86bc.c - -x86bc.obj: modules/arch/x86/x86bc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86bc.obj -MD -MP -MF $(DEPDIR)/x86bc.Tpo -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - mv -f $(DEPDIR)/x86bc.Tpo $(DEPDIR)/x86bc.Po -# source='modules/arch/x86/x86bc.c' object='x86bc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86bc.obj `if test -f 'modules/arch/x86/x86bc.c'; then $(CYGPATH_W) 'modules/arch/x86/x86bc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86bc.c'; fi` - -x86expr.o: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.o -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.o `test -f 'modules/arch/x86/x86expr.c' || echo '$(srcdir)/'`modules/arch/x86/x86expr.c - -x86expr.obj: modules/arch/x86/x86expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86expr.obj -MD -MP -MF $(DEPDIR)/x86expr.Tpo -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - mv -f $(DEPDIR)/x86expr.Tpo $(DEPDIR)/x86expr.Po -# source='modules/arch/x86/x86expr.c' object='x86expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86expr.obj `if test -f 'modules/arch/x86/x86expr.c'; then $(CYGPATH_W) 'modules/arch/x86/x86expr.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86expr.c'; fi` - -x86id.o: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.o -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.o `test -f 'modules/arch/x86/x86id.c' || echo '$(srcdir)/'`modules/arch/x86/x86id.c - -x86id.obj: modules/arch/x86/x86id.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86id.obj -MD -MP -MF $(DEPDIR)/x86id.Tpo -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - mv -f $(DEPDIR)/x86id.Tpo $(DEPDIR)/x86id.Po -# source='modules/arch/x86/x86id.c' object='x86id.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o x86id.obj `if test -f 'modules/arch/x86/x86id.c'; then $(CYGPATH_W) 'modules/arch/x86/x86id.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/x86/x86id.c'; fi` - -lc3barch.o: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.o -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.o `test -f 'modules/arch/lc3b/lc3barch.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3barch.c - -lc3barch.obj: modules/arch/lc3b/lc3barch.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3barch.obj -MD -MP -MF $(DEPDIR)/lc3barch.Tpo -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - mv -f $(DEPDIR)/lc3barch.Tpo $(DEPDIR)/lc3barch.Po -# source='modules/arch/lc3b/lc3barch.c' object='lc3barch.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3barch.obj `if test -f 'modules/arch/lc3b/lc3barch.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3barch.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3barch.c'; fi` - -lc3bbc.o: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.o -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.o `test -f 'modules/arch/lc3b/lc3bbc.c' || echo '$(srcdir)/'`modules/arch/lc3b/lc3bbc.c - -lc3bbc.obj: modules/arch/lc3b/lc3bbc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lc3bbc.obj -MD -MP -MF $(DEPDIR)/lc3bbc.Tpo -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - mv -f $(DEPDIR)/lc3bbc.Tpo $(DEPDIR)/lc3bbc.Po -# source='modules/arch/lc3b/lc3bbc.c' object='lc3bbc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lc3bbc.obj `if test -f 'modules/arch/lc3b/lc3bbc.c'; then $(CYGPATH_W) 'modules/arch/lc3b/lc3bbc.c'; else $(CYGPATH_W) '$(srcdir)/modules/arch/lc3b/lc3bbc.c'; fi` - -nasm-listfmt.o: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.o -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.o `test -f 'modules/listfmts/nasm/nasm-listfmt.c' || echo '$(srcdir)/'`modules/listfmts/nasm/nasm-listfmt.c - -nasm-listfmt.obj: modules/listfmts/nasm/nasm-listfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-listfmt.obj -MD -MP -MF $(DEPDIR)/nasm-listfmt.Tpo -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - mv -f $(DEPDIR)/nasm-listfmt.Tpo $(DEPDIR)/nasm-listfmt.Po -# source='modules/listfmts/nasm/nasm-listfmt.c' object='nasm-listfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-listfmt.obj `if test -f 'modules/listfmts/nasm/nasm-listfmt.c'; then $(CYGPATH_W) 'modules/listfmts/nasm/nasm-listfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/listfmts/nasm/nasm-listfmt.c'; fi` - -gas-parser.o: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.o -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.o `test -f 'modules/parsers/gas/gas-parser.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parser.c - -gas-parser.obj: modules/parsers/gas/gas-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parser.obj -MD -MP -MF $(DEPDIR)/gas-parser.Tpo -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - mv -f $(DEPDIR)/gas-parser.Tpo $(DEPDIR)/gas-parser.Po -# source='modules/parsers/gas/gas-parser.c' object='gas-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parser.obj `if test -f 'modules/parsers/gas/gas-parser.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parser.c'; fi` - -gas-parse.o: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.o -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.o `test -f 'modules/parsers/gas/gas-parse.c' || echo '$(srcdir)/'`modules/parsers/gas/gas-parse.c - -gas-parse.obj: modules/parsers/gas/gas-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT gas-parse.obj -MD -MP -MF $(DEPDIR)/gas-parse.Tpo -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - mv -f $(DEPDIR)/gas-parse.Tpo $(DEPDIR)/gas-parse.Po -# source='modules/parsers/gas/gas-parse.c' object='gas-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o gas-parse.obj `if test -f 'modules/parsers/gas/gas-parse.c'; then $(CYGPATH_W) 'modules/parsers/gas/gas-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/gas/gas-parse.c'; fi` - -nasm-parser.o: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.o -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.o `test -f 'modules/parsers/nasm/nasm-parser.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parser.c - -nasm-parser.obj: modules/parsers/nasm/nasm-parser.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parser.obj -MD -MP -MF $(DEPDIR)/nasm-parser.Tpo -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - mv -f $(DEPDIR)/nasm-parser.Tpo $(DEPDIR)/nasm-parser.Po -# source='modules/parsers/nasm/nasm-parser.c' object='nasm-parser.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parser.obj `if test -f 'modules/parsers/nasm/nasm-parser.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parser.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parser.c'; fi` - -nasm-parse.o: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.o -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.o `test -f 'modules/parsers/nasm/nasm-parse.c' || echo '$(srcdir)/'`modules/parsers/nasm/nasm-parse.c - -nasm-parse.obj: modules/parsers/nasm/nasm-parse.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-parse.obj -MD -MP -MF $(DEPDIR)/nasm-parse.Tpo -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - mv -f $(DEPDIR)/nasm-parse.Tpo $(DEPDIR)/nasm-parse.Po -# source='modules/parsers/nasm/nasm-parse.c' object='nasm-parse.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-parse.obj `if test -f 'modules/parsers/nasm/nasm-parse.c'; then $(CYGPATH_W) 'modules/parsers/nasm/nasm-parse.c'; else $(CYGPATH_W) '$(srcdir)/modules/parsers/nasm/nasm-parse.c'; fi` - -nasm-preproc.o: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.o -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.o `test -f 'modules/preprocs/nasm/nasm-preproc.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-preproc.c - -nasm-preproc.obj: modules/preprocs/nasm/nasm-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-preproc.obj -MD -MP -MF $(DEPDIR)/nasm-preproc.Tpo -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - mv -f $(DEPDIR)/nasm-preproc.Tpo $(DEPDIR)/nasm-preproc.Po -# source='modules/preprocs/nasm/nasm-preproc.c' object='nasm-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-preproc.obj `if test -f 'modules/preprocs/nasm/nasm-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-preproc.c'; fi` - -nasm-pp.o: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.o -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.o `test -f 'modules/preprocs/nasm/nasm-pp.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-pp.c - -nasm-pp.obj: modules/preprocs/nasm/nasm-pp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-pp.obj -MD -MP -MF $(DEPDIR)/nasm-pp.Tpo -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - mv -f $(DEPDIR)/nasm-pp.Tpo $(DEPDIR)/nasm-pp.Po -# source='modules/preprocs/nasm/nasm-pp.c' object='nasm-pp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-pp.obj `if test -f 'modules/preprocs/nasm/nasm-pp.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-pp.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-pp.c'; fi` - -nasmlib.o: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.o -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.o `test -f 'modules/preprocs/nasm/nasmlib.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasmlib.c - -nasmlib.obj: modules/preprocs/nasm/nasmlib.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasmlib.obj -MD -MP -MF $(DEPDIR)/nasmlib.Tpo -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - mv -f $(DEPDIR)/nasmlib.Tpo $(DEPDIR)/nasmlib.Po -# source='modules/preprocs/nasm/nasmlib.c' object='nasmlib.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasmlib.obj `if test -f 'modules/preprocs/nasm/nasmlib.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasmlib.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasmlib.c'; fi` - -nasm-eval.o: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.o -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.o `test -f 'modules/preprocs/nasm/nasm-eval.c' || echo '$(srcdir)/'`modules/preprocs/nasm/nasm-eval.c - -nasm-eval.obj: modules/preprocs/nasm/nasm-eval.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT nasm-eval.obj -MD -MP -MF $(DEPDIR)/nasm-eval.Tpo -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - mv -f $(DEPDIR)/nasm-eval.Tpo $(DEPDIR)/nasm-eval.Po -# source='modules/preprocs/nasm/nasm-eval.c' object='nasm-eval.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o nasm-eval.obj `if test -f 'modules/preprocs/nasm/nasm-eval.c'; then $(CYGPATH_W) 'modules/preprocs/nasm/nasm-eval.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/nasm/nasm-eval.c'; fi` - -raw-preproc.o: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.o -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.o `test -f 'modules/preprocs/raw/raw-preproc.c' || echo '$(srcdir)/'`modules/preprocs/raw/raw-preproc.c - -raw-preproc.obj: modules/preprocs/raw/raw-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT raw-preproc.obj -MD -MP -MF $(DEPDIR)/raw-preproc.Tpo -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - mv -f $(DEPDIR)/raw-preproc.Tpo $(DEPDIR)/raw-preproc.Po -# source='modules/preprocs/raw/raw-preproc.c' object='raw-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o raw-preproc.obj `if test -f 'modules/preprocs/raw/raw-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/raw/raw-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/raw/raw-preproc.c'; fi` - -cpp-preproc.o: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.o -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.o `test -f 'modules/preprocs/cpp/cpp-preproc.c' || echo '$(srcdir)/'`modules/preprocs/cpp/cpp-preproc.c - -cpp-preproc.obj: modules/preprocs/cpp/cpp-preproc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cpp-preproc.obj -MD -MP -MF $(DEPDIR)/cpp-preproc.Tpo -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - mv -f $(DEPDIR)/cpp-preproc.Tpo $(DEPDIR)/cpp-preproc.Po -# source='modules/preprocs/cpp/cpp-preproc.c' object='cpp-preproc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cpp-preproc.obj `if test -f 'modules/preprocs/cpp/cpp-preproc.c'; then $(CYGPATH_W) 'modules/preprocs/cpp/cpp-preproc.c'; else $(CYGPATH_W) '$(srcdir)/modules/preprocs/cpp/cpp-preproc.c'; fi` - -cv-dbgfmt.o: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.o -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.o `test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-dbgfmt.c - -cv-dbgfmt.obj: modules/dbgfmts/codeview/cv-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-dbgfmt.obj -MD -MP -MF $(DEPDIR)/cv-dbgfmt.Tpo -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - mv -f $(DEPDIR)/cv-dbgfmt.Tpo $(DEPDIR)/cv-dbgfmt.Po -# source='modules/dbgfmts/codeview/cv-dbgfmt.c' object='cv-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-dbgfmt.obj `if test -f 'modules/dbgfmts/codeview/cv-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-dbgfmt.c'; fi` - -cv-symline.o: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.o -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.o `test -f 'modules/dbgfmts/codeview/cv-symline.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-symline.c - -cv-symline.obj: modules/dbgfmts/codeview/cv-symline.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-symline.obj -MD -MP -MF $(DEPDIR)/cv-symline.Tpo -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - mv -f $(DEPDIR)/cv-symline.Tpo $(DEPDIR)/cv-symline.Po -# source='modules/dbgfmts/codeview/cv-symline.c' object='cv-symline.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-symline.obj `if test -f 'modules/dbgfmts/codeview/cv-symline.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-symline.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-symline.c'; fi` - -cv-type.o: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.o -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.o `test -f 'modules/dbgfmts/codeview/cv-type.c' || echo '$(srcdir)/'`modules/dbgfmts/codeview/cv-type.c - -cv-type.obj: modules/dbgfmts/codeview/cv-type.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cv-type.obj -MD -MP -MF $(DEPDIR)/cv-type.Tpo -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - mv -f $(DEPDIR)/cv-type.Tpo $(DEPDIR)/cv-type.Po -# source='modules/dbgfmts/codeview/cv-type.c' object='cv-type.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cv-type.obj `if test -f 'modules/dbgfmts/codeview/cv-type.c'; then $(CYGPATH_W) 'modules/dbgfmts/codeview/cv-type.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/codeview/cv-type.c'; fi` - -dwarf2-dbgfmt.o: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.o -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - -dwarf2-dbgfmt.obj: modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-dbgfmt.obj -MD -MP -MF $(DEPDIR)/dwarf2-dbgfmt.Tpo -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - mv -f $(DEPDIR)/dwarf2-dbgfmt.Tpo $(DEPDIR)/dwarf2-dbgfmt.Po -# source='modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c' object='dwarf2-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-dbgfmt.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c'; fi` - -dwarf2-line.o: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.o -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-line.c - -dwarf2-line.obj: modules/dbgfmts/dwarf2/dwarf2-line.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-line.obj -MD -MP -MF $(DEPDIR)/dwarf2-line.Tpo -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - mv -f $(DEPDIR)/dwarf2-line.Tpo $(DEPDIR)/dwarf2-line.Po -# source='modules/dbgfmts/dwarf2/dwarf2-line.c' object='dwarf2-line.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-line.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-line.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-line.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-line.c'; fi` - -dwarf2-aranges.o: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.o -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-aranges.c - -dwarf2-aranges.obj: modules/dbgfmts/dwarf2/dwarf2-aranges.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-aranges.obj -MD -MP -MF $(DEPDIR)/dwarf2-aranges.Tpo -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - mv -f $(DEPDIR)/dwarf2-aranges.Tpo $(DEPDIR)/dwarf2-aranges.Po -# source='modules/dbgfmts/dwarf2/dwarf2-aranges.c' object='dwarf2-aranges.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-aranges.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-aranges.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-aranges.c'; fi` - -dwarf2-info.o: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.o -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.o `test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c' || echo '$(srcdir)/'`modules/dbgfmts/dwarf2/dwarf2-info.c - -dwarf2-info.obj: modules/dbgfmts/dwarf2/dwarf2-info.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dwarf2-info.obj -MD -MP -MF $(DEPDIR)/dwarf2-info.Tpo -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - mv -f $(DEPDIR)/dwarf2-info.Tpo $(DEPDIR)/dwarf2-info.Po -# source='modules/dbgfmts/dwarf2/dwarf2-info.c' object='dwarf2-info.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dwarf2-info.obj `if test -f 'modules/dbgfmts/dwarf2/dwarf2-info.c'; then $(CYGPATH_W) 'modules/dbgfmts/dwarf2/dwarf2-info.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/dwarf2/dwarf2-info.c'; fi` - -null-dbgfmt.o: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.o -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.o `test -f 'modules/dbgfmts/null/null-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/null/null-dbgfmt.c - -null-dbgfmt.obj: modules/dbgfmts/null/null-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT null-dbgfmt.obj -MD -MP -MF $(DEPDIR)/null-dbgfmt.Tpo -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - mv -f $(DEPDIR)/null-dbgfmt.Tpo $(DEPDIR)/null-dbgfmt.Po -# source='modules/dbgfmts/null/null-dbgfmt.c' object='null-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o null-dbgfmt.obj `if test -f 'modules/dbgfmts/null/null-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/null/null-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/null/null-dbgfmt.c'; fi` - -stabs-dbgfmt.o: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.o -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.o `test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c' || echo '$(srcdir)/'`modules/dbgfmts/stabs/stabs-dbgfmt.c - -stabs-dbgfmt.obj: modules/dbgfmts/stabs/stabs-dbgfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT stabs-dbgfmt.obj -MD -MP -MF $(DEPDIR)/stabs-dbgfmt.Tpo -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - mv -f $(DEPDIR)/stabs-dbgfmt.Tpo $(DEPDIR)/stabs-dbgfmt.Po -# source='modules/dbgfmts/stabs/stabs-dbgfmt.c' object='stabs-dbgfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o stabs-dbgfmt.obj `if test -f 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; then $(CYGPATH_W) 'modules/dbgfmts/stabs/stabs-dbgfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/dbgfmts/stabs/stabs-dbgfmt.c'; fi` - -dbg-objfmt.o: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.o -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.o `test -f 'modules/objfmts/dbg/dbg-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/dbg/dbg-objfmt.c - -dbg-objfmt.obj: modules/objfmts/dbg/dbg-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT dbg-objfmt.obj -MD -MP -MF $(DEPDIR)/dbg-objfmt.Tpo -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - mv -f $(DEPDIR)/dbg-objfmt.Tpo $(DEPDIR)/dbg-objfmt.Po -# source='modules/objfmts/dbg/dbg-objfmt.c' object='dbg-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o dbg-objfmt.obj `if test -f 'modules/objfmts/dbg/dbg-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/dbg/dbg-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/dbg/dbg-objfmt.c'; fi` - -bin-objfmt.o: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.o -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.o `test -f 'modules/objfmts/bin/bin-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/bin/bin-objfmt.c - -bin-objfmt.obj: modules/objfmts/bin/bin-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bin-objfmt.obj -MD -MP -MF $(DEPDIR)/bin-objfmt.Tpo -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - mv -f $(DEPDIR)/bin-objfmt.Tpo $(DEPDIR)/bin-objfmt.Po -# source='modules/objfmts/bin/bin-objfmt.c' object='bin-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bin-objfmt.obj `if test -f 'modules/objfmts/bin/bin-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/bin/bin-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/bin/bin-objfmt.c'; fi` - -elf.o: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.o -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.o `test -f 'modules/objfmts/elf/elf.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf.c - -elf.obj: modules/objfmts/elf/elf.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf.obj -MD -MP -MF $(DEPDIR)/elf.Tpo -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - mv -f $(DEPDIR)/elf.Tpo $(DEPDIR)/elf.Po -# source='modules/objfmts/elf/elf.c' object='elf.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf.obj `if test -f 'modules/objfmts/elf/elf.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf.c'; fi` - -elf-objfmt.o: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.o -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.o `test -f 'modules/objfmts/elf/elf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-objfmt.c - -elf-objfmt.obj: modules/objfmts/elf/elf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-objfmt.obj -MD -MP -MF $(DEPDIR)/elf-objfmt.Tpo -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - mv -f $(DEPDIR)/elf-objfmt.Tpo $(DEPDIR)/elf-objfmt.Po -# source='modules/objfmts/elf/elf-objfmt.c' object='elf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-objfmt.obj `if test -f 'modules/objfmts/elf/elf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-objfmt.c'; fi` - -elf-x86-x86.o: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.o -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.o `test -f 'modules/objfmts/elf/elf-x86-x86.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-x86.c - -elf-x86-x86.obj: modules/objfmts/elf/elf-x86-x86.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-x86.obj -MD -MP -MF $(DEPDIR)/elf-x86-x86.Tpo -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - mv -f $(DEPDIR)/elf-x86-x86.Tpo $(DEPDIR)/elf-x86-x86.Po -# source='modules/objfmts/elf/elf-x86-x86.c' object='elf-x86-x86.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-x86.obj `if test -f 'modules/objfmts/elf/elf-x86-x86.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-x86.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-x86.c'; fi` - -elf-x86-amd64.o: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.o -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.o `test -f 'modules/objfmts/elf/elf-x86-amd64.c' || echo '$(srcdir)/'`modules/objfmts/elf/elf-x86-amd64.c - -elf-x86-amd64.obj: modules/objfmts/elf/elf-x86-amd64.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT elf-x86-amd64.obj -MD -MP -MF $(DEPDIR)/elf-x86-amd64.Tpo -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - mv -f $(DEPDIR)/elf-x86-amd64.Tpo $(DEPDIR)/elf-x86-amd64.Po -# source='modules/objfmts/elf/elf-x86-amd64.c' object='elf-x86-amd64.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o elf-x86-amd64.obj `if test -f 'modules/objfmts/elf/elf-x86-amd64.c'; then $(CYGPATH_W) 'modules/objfmts/elf/elf-x86-amd64.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/elf/elf-x86-amd64.c'; fi` - -coff-objfmt.o: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.o -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.o `test -f 'modules/objfmts/coff/coff-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/coff/coff-objfmt.c - -coff-objfmt.obj: modules/objfmts/coff/coff-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT coff-objfmt.obj -MD -MP -MF $(DEPDIR)/coff-objfmt.Tpo -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - mv -f $(DEPDIR)/coff-objfmt.Tpo $(DEPDIR)/coff-objfmt.Po -# source='modules/objfmts/coff/coff-objfmt.c' object='coff-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o coff-objfmt.obj `if test -f 'modules/objfmts/coff/coff-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/coff/coff-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/coff-objfmt.c'; fi` - -win64-except.o: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.o -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.o `test -f 'modules/objfmts/coff/win64-except.c' || echo '$(srcdir)/'`modules/objfmts/coff/win64-except.c - -win64-except.obj: modules/objfmts/coff/win64-except.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT win64-except.obj -MD -MP -MF $(DEPDIR)/win64-except.Tpo -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - mv -f $(DEPDIR)/win64-except.Tpo $(DEPDIR)/win64-except.Po -# source='modules/objfmts/coff/win64-except.c' object='win64-except.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o win64-except.obj `if test -f 'modules/objfmts/coff/win64-except.c'; then $(CYGPATH_W) 'modules/objfmts/coff/win64-except.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/coff/win64-except.c'; fi` - -macho-objfmt.o: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.o -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.o `test -f 'modules/objfmts/macho/macho-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/macho/macho-objfmt.c - -macho-objfmt.obj: modules/objfmts/macho/macho-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT macho-objfmt.obj -MD -MP -MF $(DEPDIR)/macho-objfmt.Tpo -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - mv -f $(DEPDIR)/macho-objfmt.Tpo $(DEPDIR)/macho-objfmt.Po -# source='modules/objfmts/macho/macho-objfmt.c' object='macho-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o macho-objfmt.obj `if test -f 'modules/objfmts/macho/macho-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/macho/macho-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/macho/macho-objfmt.c'; fi` - -rdf-objfmt.o: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.o -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.o `test -f 'modules/objfmts/rdf/rdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/rdf/rdf-objfmt.c - -rdf-objfmt.obj: modules/objfmts/rdf/rdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT rdf-objfmt.obj -MD -MP -MF $(DEPDIR)/rdf-objfmt.Tpo -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - mv -f $(DEPDIR)/rdf-objfmt.Tpo $(DEPDIR)/rdf-objfmt.Po -# source='modules/objfmts/rdf/rdf-objfmt.c' object='rdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o rdf-objfmt.obj `if test -f 'modules/objfmts/rdf/rdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/rdf/rdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/rdf/rdf-objfmt.c'; fi` - -xdf-objfmt.o: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.o -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.o `test -f 'modules/objfmts/xdf/xdf-objfmt.c' || echo '$(srcdir)/'`modules/objfmts/xdf/xdf-objfmt.c - -xdf-objfmt.obj: modules/objfmts/xdf/xdf-objfmt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xdf-objfmt.obj -MD -MP -MF $(DEPDIR)/xdf-objfmt.Tpo -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - mv -f $(DEPDIR)/xdf-objfmt.Tpo $(DEPDIR)/xdf-objfmt.Po -# source='modules/objfmts/xdf/xdf-objfmt.c' object='xdf-objfmt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xdf-objfmt.obj `if test -f 'modules/objfmts/xdf/xdf-objfmt.c'; then $(CYGPATH_W) 'modules/objfmts/xdf/xdf-objfmt.c'; else $(CYGPATH_W) '$(srcdir)/modules/objfmts/xdf/xdf-objfmt.c'; fi` - -assocdat.o: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.o -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.o `test -f 'libyasm/assocdat.c' || echo '$(srcdir)/'`libyasm/assocdat.c - -assocdat.obj: libyasm/assocdat.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT assocdat.obj -MD -MP -MF $(DEPDIR)/assocdat.Tpo -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - mv -f $(DEPDIR)/assocdat.Tpo $(DEPDIR)/assocdat.Po -# source='libyasm/assocdat.c' object='assocdat.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o assocdat.obj `if test -f 'libyasm/assocdat.c'; then $(CYGPATH_W) 'libyasm/assocdat.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/assocdat.c'; fi` - -bitvect.o: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.o -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.o `test -f 'libyasm/bitvect.c' || echo '$(srcdir)/'`libyasm/bitvect.c - -bitvect.obj: libyasm/bitvect.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect.obj -MD -MP -MF $(DEPDIR)/bitvect.Tpo -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - mv -f $(DEPDIR)/bitvect.Tpo $(DEPDIR)/bitvect.Po -# source='libyasm/bitvect.c' object='bitvect.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect.obj `if test -f 'libyasm/bitvect.c'; then $(CYGPATH_W) 'libyasm/bitvect.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bitvect.c'; fi` - -bc-align.o: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.o -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.o `test -f 'libyasm/bc-align.c' || echo '$(srcdir)/'`libyasm/bc-align.c - -bc-align.obj: libyasm/bc-align.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-align.obj -MD -MP -MF $(DEPDIR)/bc-align.Tpo -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - mv -f $(DEPDIR)/bc-align.Tpo $(DEPDIR)/bc-align.Po -# source='libyasm/bc-align.c' object='bc-align.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-align.obj `if test -f 'libyasm/bc-align.c'; then $(CYGPATH_W) 'libyasm/bc-align.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-align.c'; fi` - -bc-data.o: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.o -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.o `test -f 'libyasm/bc-data.c' || echo '$(srcdir)/'`libyasm/bc-data.c - -bc-data.obj: libyasm/bc-data.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-data.obj -MD -MP -MF $(DEPDIR)/bc-data.Tpo -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - mv -f $(DEPDIR)/bc-data.Tpo $(DEPDIR)/bc-data.Po -# source='libyasm/bc-data.c' object='bc-data.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-data.obj `if test -f 'libyasm/bc-data.c'; then $(CYGPATH_W) 'libyasm/bc-data.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-data.c'; fi` - -bc-incbin.o: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.o -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.o `test -f 'libyasm/bc-incbin.c' || echo '$(srcdir)/'`libyasm/bc-incbin.c - -bc-incbin.obj: libyasm/bc-incbin.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-incbin.obj -MD -MP -MF $(DEPDIR)/bc-incbin.Tpo -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - mv -f $(DEPDIR)/bc-incbin.Tpo $(DEPDIR)/bc-incbin.Po -# source='libyasm/bc-incbin.c' object='bc-incbin.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-incbin.obj `if test -f 'libyasm/bc-incbin.c'; then $(CYGPATH_W) 'libyasm/bc-incbin.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-incbin.c'; fi` - -bc-org.o: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.o -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.o `test -f 'libyasm/bc-org.c' || echo '$(srcdir)/'`libyasm/bc-org.c - -bc-org.obj: libyasm/bc-org.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-org.obj -MD -MP -MF $(DEPDIR)/bc-org.Tpo -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - mv -f $(DEPDIR)/bc-org.Tpo $(DEPDIR)/bc-org.Po -# source='libyasm/bc-org.c' object='bc-org.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-org.obj `if test -f 'libyasm/bc-org.c'; then $(CYGPATH_W) 'libyasm/bc-org.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-org.c'; fi` - -bc-reserve.o: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.o -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.o `test -f 'libyasm/bc-reserve.c' || echo '$(srcdir)/'`libyasm/bc-reserve.c - -bc-reserve.obj: libyasm/bc-reserve.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bc-reserve.obj -MD -MP -MF $(DEPDIR)/bc-reserve.Tpo -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - mv -f $(DEPDIR)/bc-reserve.Tpo $(DEPDIR)/bc-reserve.Po -# source='libyasm/bc-reserve.c' object='bc-reserve.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bc-reserve.obj `if test -f 'libyasm/bc-reserve.c'; then $(CYGPATH_W) 'libyasm/bc-reserve.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bc-reserve.c'; fi` - -bytecode.o: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.o -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.o `test -f 'libyasm/bytecode.c' || echo '$(srcdir)/'`libyasm/bytecode.c - -bytecode.obj: libyasm/bytecode.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bytecode.obj -MD -MP -MF $(DEPDIR)/bytecode.Tpo -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - mv -f $(DEPDIR)/bytecode.Tpo $(DEPDIR)/bytecode.Po -# source='libyasm/bytecode.c' object='bytecode.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bytecode.obj `if test -f 'libyasm/bytecode.c'; then $(CYGPATH_W) 'libyasm/bytecode.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/bytecode.c'; fi` - -errwarn.o: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.o -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.o `test -f 'libyasm/errwarn.c' || echo '$(srcdir)/'`libyasm/errwarn.c - -errwarn.obj: libyasm/errwarn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT errwarn.obj -MD -MP -MF $(DEPDIR)/errwarn.Tpo -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - mv -f $(DEPDIR)/errwarn.Tpo $(DEPDIR)/errwarn.Po -# source='libyasm/errwarn.c' object='errwarn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o errwarn.obj `if test -f 'libyasm/errwarn.c'; then $(CYGPATH_W) 'libyasm/errwarn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/errwarn.c'; fi` - -expr.o: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.o -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.o `test -f 'libyasm/expr.c' || echo '$(srcdir)/'`libyasm/expr.c - -expr.obj: libyasm/expr.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT expr.obj -MD -MP -MF $(DEPDIR)/expr.Tpo -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - mv -f $(DEPDIR)/expr.Tpo $(DEPDIR)/expr.Po -# source='libyasm/expr.c' object='expr.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o expr.obj `if test -f 'libyasm/expr.c'; then $(CYGPATH_W) 'libyasm/expr.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/expr.c'; fi` - -file.o: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.o -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.o `test -f 'libyasm/file.c' || echo '$(srcdir)/'`libyasm/file.c - -file.obj: libyasm/file.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT file.obj -MD -MP -MF $(DEPDIR)/file.Tpo -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - mv -f $(DEPDIR)/file.Tpo $(DEPDIR)/file.Po -# source='libyasm/file.c' object='file.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o file.obj `if test -f 'libyasm/file.c'; then $(CYGPATH_W) 'libyasm/file.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/file.c'; fi` - -floatnum.o: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.o -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.o `test -f 'libyasm/floatnum.c' || echo '$(srcdir)/'`libyasm/floatnum.c - -floatnum.obj: libyasm/floatnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum.obj -MD -MP -MF $(DEPDIR)/floatnum.Tpo -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - mv -f $(DEPDIR)/floatnum.Tpo $(DEPDIR)/floatnum.Po -# source='libyasm/floatnum.c' object='floatnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum.obj `if test -f 'libyasm/floatnum.c'; then $(CYGPATH_W) 'libyasm/floatnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/floatnum.c'; fi` - -hamt.o: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.o -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.o `test -f 'libyasm/hamt.c' || echo '$(srcdir)/'`libyasm/hamt.c - -hamt.obj: libyasm/hamt.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT hamt.obj -MD -MP -MF $(DEPDIR)/hamt.Tpo -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - mv -f $(DEPDIR)/hamt.Tpo $(DEPDIR)/hamt.Po -# source='libyasm/hamt.c' object='hamt.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o hamt.obj `if test -f 'libyasm/hamt.c'; then $(CYGPATH_W) 'libyasm/hamt.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/hamt.c'; fi` - -insn.o: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.o -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.o `test -f 'libyasm/insn.c' || echo '$(srcdir)/'`libyasm/insn.c - -insn.obj: libyasm/insn.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT insn.obj -MD -MP -MF $(DEPDIR)/insn.Tpo -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - mv -f $(DEPDIR)/insn.Tpo $(DEPDIR)/insn.Po -# source='libyasm/insn.c' object='insn.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o insn.obj `if test -f 'libyasm/insn.c'; then $(CYGPATH_W) 'libyasm/insn.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/insn.c'; fi` - -intnum.o: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.o -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.o `test -f 'libyasm/intnum.c' || echo '$(srcdir)/'`libyasm/intnum.c - -intnum.obj: libyasm/intnum.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT intnum.obj -MD -MP -MF $(DEPDIR)/intnum.Tpo -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - mv -f $(DEPDIR)/intnum.Tpo $(DEPDIR)/intnum.Po -# source='libyasm/intnum.c' object='intnum.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o intnum.obj `if test -f 'libyasm/intnum.c'; then $(CYGPATH_W) 'libyasm/intnum.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/intnum.c'; fi` - -inttree.o: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.o -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.o `test -f 'libyasm/inttree.c' || echo '$(srcdir)/'`libyasm/inttree.c - -inttree.obj: libyasm/inttree.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT inttree.obj -MD -MP -MF $(DEPDIR)/inttree.Tpo -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - mv -f $(DEPDIR)/inttree.Tpo $(DEPDIR)/inttree.Po -# source='libyasm/inttree.c' object='inttree.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o inttree.obj `if test -f 'libyasm/inttree.c'; then $(CYGPATH_W) 'libyasm/inttree.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/inttree.c'; fi` - -linemap.o: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.o -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.o `test -f 'libyasm/linemap.c' || echo '$(srcdir)/'`libyasm/linemap.c - -linemap.obj: libyasm/linemap.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT linemap.obj -MD -MP -MF $(DEPDIR)/linemap.Tpo -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - mv -f $(DEPDIR)/linemap.Tpo $(DEPDIR)/linemap.Po -# source='libyasm/linemap.c' object='linemap.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o linemap.obj `if test -f 'libyasm/linemap.c'; then $(CYGPATH_W) 'libyasm/linemap.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/linemap.c'; fi` - -md5.o: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.o -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.o `test -f 'libyasm/md5.c' || echo '$(srcdir)/'`libyasm/md5.c - -md5.obj: libyasm/md5.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT md5.obj -MD -MP -MF $(DEPDIR)/md5.Tpo -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - mv -f $(DEPDIR)/md5.Tpo $(DEPDIR)/md5.Po -# source='libyasm/md5.c' object='md5.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o md5.obj `if test -f 'libyasm/md5.c'; then $(CYGPATH_W) 'libyasm/md5.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/md5.c'; fi` - -mergesort.o: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.o -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.o `test -f 'libyasm/mergesort.c' || echo '$(srcdir)/'`libyasm/mergesort.c - -mergesort.obj: libyasm/mergesort.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mergesort.obj -MD -MP -MF $(DEPDIR)/mergesort.Tpo -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - mv -f $(DEPDIR)/mergesort.Tpo $(DEPDIR)/mergesort.Po -# source='libyasm/mergesort.c' object='mergesort.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mergesort.obj `if test -f 'libyasm/mergesort.c'; then $(CYGPATH_W) 'libyasm/mergesort.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/mergesort.c'; fi` - -phash.o: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.o -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.o `test -f 'libyasm/phash.c' || echo '$(srcdir)/'`libyasm/phash.c - -phash.obj: libyasm/phash.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT phash.obj -MD -MP -MF $(DEPDIR)/phash.Tpo -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - mv -f $(DEPDIR)/phash.Tpo $(DEPDIR)/phash.Po -# source='libyasm/phash.c' object='phash.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o phash.obj `if test -f 'libyasm/phash.c'; then $(CYGPATH_W) 'libyasm/phash.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/phash.c'; fi` - -section.o: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.o -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.o `test -f 'libyasm/section.c' || echo '$(srcdir)/'`libyasm/section.c - -section.obj: libyasm/section.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT section.obj -MD -MP -MF $(DEPDIR)/section.Tpo -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - mv -f $(DEPDIR)/section.Tpo $(DEPDIR)/section.Po -# source='libyasm/section.c' object='section.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o section.obj `if test -f 'libyasm/section.c'; then $(CYGPATH_W) 'libyasm/section.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/section.c'; fi` - -strcasecmp.o: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.o -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.o `test -f 'libyasm/strcasecmp.c' || echo '$(srcdir)/'`libyasm/strcasecmp.c - -strcasecmp.obj: libyasm/strcasecmp.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strcasecmp.obj -MD -MP -MF $(DEPDIR)/strcasecmp.Tpo -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - mv -f $(DEPDIR)/strcasecmp.Tpo $(DEPDIR)/strcasecmp.Po -# source='libyasm/strcasecmp.c' object='strcasecmp.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strcasecmp.obj `if test -f 'libyasm/strcasecmp.c'; then $(CYGPATH_W) 'libyasm/strcasecmp.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strcasecmp.c'; fi` - -strsep.o: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.o -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.o `test -f 'libyasm/strsep.c' || echo '$(srcdir)/'`libyasm/strsep.c - -strsep.obj: libyasm/strsep.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT strsep.obj -MD -MP -MF $(DEPDIR)/strsep.Tpo -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - mv -f $(DEPDIR)/strsep.Tpo $(DEPDIR)/strsep.Po -# source='libyasm/strsep.c' object='strsep.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o strsep.obj `if test -f 'libyasm/strsep.c'; then $(CYGPATH_W) 'libyasm/strsep.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/strsep.c'; fi` - -symrec.o: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.o -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.o `test -f 'libyasm/symrec.c' || echo '$(srcdir)/'`libyasm/symrec.c - -symrec.obj: libyasm/symrec.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT symrec.obj -MD -MP -MF $(DEPDIR)/symrec.Tpo -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - mv -f $(DEPDIR)/symrec.Tpo $(DEPDIR)/symrec.Po -# source='libyasm/symrec.c' object='symrec.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o symrec.obj `if test -f 'libyasm/symrec.c'; then $(CYGPATH_W) 'libyasm/symrec.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/symrec.c'; fi` - -valparam.o: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.o -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.o `test -f 'libyasm/valparam.c' || echo '$(srcdir)/'`libyasm/valparam.c - -valparam.obj: libyasm/valparam.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT valparam.obj -MD -MP -MF $(DEPDIR)/valparam.Tpo -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - mv -f $(DEPDIR)/valparam.Tpo $(DEPDIR)/valparam.Po -# source='libyasm/valparam.c' object='valparam.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o valparam.obj `if test -f 'libyasm/valparam.c'; then $(CYGPATH_W) 'libyasm/valparam.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/valparam.c'; fi` - -value.o: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.o -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.o `test -f 'libyasm/value.c' || echo '$(srcdir)/'`libyasm/value.c - -value.obj: libyasm/value.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT value.obj -MD -MP -MF $(DEPDIR)/value.Tpo -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - mv -f $(DEPDIR)/value.Tpo $(DEPDIR)/value.Po -# source='libyasm/value.c' object='value.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o value.obj `if test -f 'libyasm/value.c'; then $(CYGPATH_W) 'libyasm/value.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/value.c'; fi` - -xmalloc.o: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.o -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.o `test -f 'libyasm/xmalloc.c' || echo '$(srcdir)/'`libyasm/xmalloc.c - -xmalloc.obj: libyasm/xmalloc.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xmalloc.obj -MD -MP -MF $(DEPDIR)/xmalloc.Tpo -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - mv -f $(DEPDIR)/xmalloc.Tpo $(DEPDIR)/xmalloc.Po -# source='libyasm/xmalloc.c' object='xmalloc.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xmalloc.obj `if test -f 'libyasm/xmalloc.c'; then $(CYGPATH_W) 'libyasm/xmalloc.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xmalloc.c'; fi` - -xstrdup.o: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.o -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.o `test -f 'libyasm/xstrdup.c' || echo '$(srcdir)/'`libyasm/xstrdup.c - -xstrdup.obj: libyasm/xstrdup.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xstrdup.obj -MD -MP -MF $(DEPDIR)/xstrdup.Tpo -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - mv -f $(DEPDIR)/xstrdup.Tpo $(DEPDIR)/xstrdup.Po -# source='libyasm/xstrdup.c' object='xstrdup.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xstrdup.obj `if test -f 'libyasm/xstrdup.c'; then $(CYGPATH_W) 'libyasm/xstrdup.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/xstrdup.c'; fi` - -bitvect_test.o: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.o -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.o `test -f 'libyasm/tests/bitvect_test.c' || echo '$(srcdir)/'`libyasm/tests/bitvect_test.c - -bitvect_test.obj: libyasm/tests/bitvect_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bitvect_test.obj -MD -MP -MF $(DEPDIR)/bitvect_test.Tpo -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - mv -f $(DEPDIR)/bitvect_test.Tpo $(DEPDIR)/bitvect_test.Po -# source='libyasm/tests/bitvect_test.c' object='bitvect_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bitvect_test.obj `if test -f 'libyasm/tests/bitvect_test.c'; then $(CYGPATH_W) 'libyasm/tests/bitvect_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/bitvect_test.c'; fi` - -combpath_test.o: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.o -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.o `test -f 'libyasm/tests/combpath_test.c' || echo '$(srcdir)/'`libyasm/tests/combpath_test.c - -combpath_test.obj: libyasm/tests/combpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT combpath_test.obj -MD -MP -MF $(DEPDIR)/combpath_test.Tpo -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - mv -f $(DEPDIR)/combpath_test.Tpo $(DEPDIR)/combpath_test.Po -# source='libyasm/tests/combpath_test.c' object='combpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o combpath_test.obj `if test -f 'libyasm/tests/combpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/combpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/combpath_test.c'; fi` - -floatnum_test.o: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.o -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.o `test -f 'libyasm/tests/floatnum_test.c' || echo '$(srcdir)/'`libyasm/tests/floatnum_test.c - -floatnum_test.obj: libyasm/tests/floatnum_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT floatnum_test.obj -MD -MP -MF $(DEPDIR)/floatnum_test.Tpo -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - mv -f $(DEPDIR)/floatnum_test.Tpo $(DEPDIR)/floatnum_test.Po -# source='libyasm/tests/floatnum_test.c' object='floatnum_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o floatnum_test.obj `if test -f 'libyasm/tests/floatnum_test.c'; then $(CYGPATH_W) 'libyasm/tests/floatnum_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/floatnum_test.c'; fi` - -leb128_test.o: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.o -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.o `test -f 'libyasm/tests/leb128_test.c' || echo '$(srcdir)/'`libyasm/tests/leb128_test.c - -leb128_test.obj: libyasm/tests/leb128_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT leb128_test.obj -MD -MP -MF $(DEPDIR)/leb128_test.Tpo -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - mv -f $(DEPDIR)/leb128_test.Tpo $(DEPDIR)/leb128_test.Po -# source='libyasm/tests/leb128_test.c' object='leb128_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o leb128_test.obj `if test -f 'libyasm/tests/leb128_test.c'; then $(CYGPATH_W) 'libyasm/tests/leb128_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/leb128_test.c'; fi` - -splitpath_test.o: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.o -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.o `test -f 'libyasm/tests/splitpath_test.c' || echo '$(srcdir)/'`libyasm/tests/splitpath_test.c - -splitpath_test.obj: libyasm/tests/splitpath_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT splitpath_test.obj -MD -MP -MF $(DEPDIR)/splitpath_test.Tpo -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - mv -f $(DEPDIR)/splitpath_test.Tpo $(DEPDIR)/splitpath_test.Po -# source='libyasm/tests/splitpath_test.c' object='splitpath_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o splitpath_test.obj `if test -f 'libyasm/tests/splitpath_test.c'; then $(CYGPATH_W) 'libyasm/tests/splitpath_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/splitpath_test.c'; fi` - -uncstring_test.o: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.o -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.o `test -f 'libyasm/tests/uncstring_test.c' || echo '$(srcdir)/'`libyasm/tests/uncstring_test.c - -uncstring_test.obj: libyasm/tests/uncstring_test.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT uncstring_test.obj -MD -MP -MF $(DEPDIR)/uncstring_test.Tpo -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - mv -f $(DEPDIR)/uncstring_test.Tpo $(DEPDIR)/uncstring_test.Po -# source='libyasm/tests/uncstring_test.c' object='uncstring_test.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o uncstring_test.obj `if test -f 'libyasm/tests/uncstring_test.c'; then $(CYGPATH_W) 'libyasm/tests/uncstring_test.c'; else $(CYGPATH_W) '$(srcdir)/libyasm/tests/uncstring_test.c'; fi` - -yasm.o: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.o -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.o `test -f 'frontends/yasm/yasm.c' || echo '$(srcdir)/'`frontends/yasm/yasm.c - -yasm.obj: frontends/yasm/yasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm.obj -MD -MP -MF $(DEPDIR)/yasm.Tpo -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - mv -f $(DEPDIR)/yasm.Tpo $(DEPDIR)/yasm.Po -# source='frontends/yasm/yasm.c' object='yasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm.obj `if test -f 'frontends/yasm/yasm.c'; then $(CYGPATH_W) 'frontends/yasm/yasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm.c'; fi` - -yasm-options.o: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.o -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.o `test -f 'frontends/yasm/yasm-options.c' || echo '$(srcdir)/'`frontends/yasm/yasm-options.c - -yasm-options.obj: frontends/yasm/yasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT yasm-options.obj -MD -MP -MF $(DEPDIR)/yasm-options.Tpo -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - mv -f $(DEPDIR)/yasm-options.Tpo $(DEPDIR)/yasm-options.Po -# source='frontends/yasm/yasm-options.c' object='yasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o yasm-options.obj `if test -f 'frontends/yasm/yasm-options.c'; then $(CYGPATH_W) 'frontends/yasm/yasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/yasm/yasm-options.c'; fi` - -tasm.o: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.o -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.o `test -f 'frontends/tasm/tasm.c' || echo '$(srcdir)/'`frontends/tasm/tasm.c - -tasm.obj: frontends/tasm/tasm.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm.obj -MD -MP -MF $(DEPDIR)/tasm.Tpo -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - mv -f $(DEPDIR)/tasm.Tpo $(DEPDIR)/tasm.Po -# source='frontends/tasm/tasm.c' object='tasm.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm.obj `if test -f 'frontends/tasm/tasm.c'; then $(CYGPATH_W) 'frontends/tasm/tasm.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm.c'; fi` - -tasm-options.o: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.o -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.o' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.o `test -f 'frontends/tasm/tasm-options.c' || echo '$(srcdir)/'`frontends/tasm/tasm-options.c - -tasm-options.obj: frontends/tasm/tasm-options.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tasm-options.obj -MD -MP -MF $(DEPDIR)/tasm-options.Tpo -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` - mv -f $(DEPDIR)/tasm-options.Tpo $(DEPDIR)/tasm-options.Po -# source='frontends/tasm/tasm-options.c' object='tasm-options.obj' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tasm-options.obj `if test -f 'frontends/tasm/tasm-options.c'; then $(CYGPATH_W) 'frontends/tasm/tasm-options.c'; else $(CYGPATH_W) '$(srcdir)/frontends/tasm/tasm-options.c'; fi` -install-man1: $(man1_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)" - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \ - done -uninstall-man1: - @$(NORMAL_UNINSTALL) - @list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.1*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 1*) ;; \ - *) ext='1' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man1dir)/$$inst"; \ - done -install-man7: $(man7_MANS) $(man_MANS) - @$(NORMAL_INSTALL) - test -z "$(man7dir)" || $(MKDIR_P) "$(DESTDIR)$(man7dir)" - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \ - else file=$$i; fi; \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ - $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst"; \ - done -uninstall-man7: - @$(NORMAL_UNINSTALL) - @list='$(man7_MANS) $(dist_man7_MANS) $(nodist_man7_MANS)'; \ - l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \ - for i in $$l2; do \ - case "$$i" in \ - *.7*) list="$$list $$i" ;; \ - esac; \ - done; \ - for i in $$list; do \ - ext=`echo $$i | sed -e 's/^.*\\.//'`; \ - case "$$ext" in \ - 7*) ;; \ - *) ext='7' ;; \ - esac; \ - inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \ - inst=`echo $$inst | sed -e 's/^.*\///'`; \ - inst=`echo $$inst | sed '$(transform)'`.$$ext; \ - echo " rm -f '$(DESTDIR)$(man7dir)/$$inst'"; \ - rm -f "$(DESTDIR)$(man7dir)/$$inst"; \ - done -install-includeHEADERS: $(include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done -install-modincludeHEADERS: $(modinclude_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(modincludedir)" || $(MKDIR_P) "$(DESTDIR)$(modincludedir)" - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(modincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(modincludedir)/$$f'"; \ - $(modincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(modincludedir)/$$f"; \ - done - -uninstall-modincludeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(modinclude_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(modincludedir)/$$f'"; \ - rm -f "$(DESTDIR)$(modincludedir)/$$f"; \ - done -install-nodist_includeHEADERS: $(nodist_include_HEADERS) - @$(NORMAL_INSTALL) - test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)" - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(nodist_includeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(includedir)/$$f'"; \ - $(nodist_includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \ - done - -uninstall-nodist_includeHEADERS: - @$(NORMAL_UNINSTALL) - @list='$(nodist_include_HEADERS)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \ - rm -f "$(DESTDIR)$(includedir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -$(RECURSIVE_CLEAN_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; ws='[ ]'; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - echo "XPASS: $$tst"; \ - ;; \ - *) \ - echo "PASS: $$tst"; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *$$ws$$tst$$ws*) \ - xfail=`expr $$xfail + 1`; \ - echo "XFAIL: $$tst"; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - echo "FAIL: $$tst"; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - echo "SKIP: $$tst"; \ - fi; \ - done; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="All $$all tests passed"; \ - else \ - banner="All $$all tests behaved as expected ($$xfail expected failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all tests failed"; \ - else \ - banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - skipped="($$skip tests were not run)"; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - echo "$$dashes"; \ - echo "$$banner"; \ - test -z "$$skipped" || echo "$$skipped"; \ - test -z "$$report" || echo "$$report"; \ - echo "$$dashes"; \ - test "$$failed" -eq 0; \ - else :; fi - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d $(distdir) || mkdir $(distdir) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r $(distdir) -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 - $(am__remove_distdir) - -dist-lzma: distdir - tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma - $(am__remove_distdir) - -dist-tarZ: distdir - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__remove_distdir) - -dist-shar: distdir - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__remove_distdir) - -dist dist-all: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lzma*) \ - unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir); chmod a+w $(distdir) - mkdir $(distdir)/_build - mkdir $(distdir)/_inst - chmod a-w $(distdir) - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && cd $(distdir)/_build \ - && ../configure --srcdir=.. --prefix="$$dc_install_base" \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck - $(am__remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @cd $(distuninstallcheck_dir) \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) check-recursive -all-am: Makefile $(LIBRARIES) $(PROGRAMS) $(MANS) $(HEADERS) config.h \ - all-local -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(modincludedir)" "$(DESTDIR)$(includedir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: $(BUILT_SOURCES) - $(MAKE) $(AM_MAKEFLAGS) install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-local distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-includeHEADERS install-man \ - install-modincludeHEADERS install-nodist_includeHEADERS - -install-dvi: install-dvi-recursive - -install-exec-am: install-binPROGRAMS install-libLIBRARIES - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) install-exec-hook - -install-html: install-html-recursive - -install-info: install-info-recursive - -install-man: install-man1 install-man7 - -install-pdf: install-pdf-recursive - -install-ps: install-ps-recursive - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-includeHEADERS \ - uninstall-libLIBRARIES uninstall-man \ - uninstall-modincludeHEADERS uninstall-nodist_includeHEADERS - @$(NORMAL_INSTALL) - $(MAKE) $(AM_MAKEFLAGS) uninstall-hook - -uninstall-man: uninstall-man1 uninstall-man7 - -.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ - install-exec-am install-strip uninstall-am - -.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ - all all-am all-local am--refresh check check-TESTS check-am \ - clean clean-binPROGRAMS clean-checkPROGRAMS clean-generic \ - clean-libLIBRARIES clean-noinstPROGRAMS ctags ctags-recursive \ - dist dist-all dist-bzip2 dist-gzip dist-lzma dist-shar \ - dist-tarZ dist-zip distcheck distclean distclean-compile \ - distclean-generic distclean-hdr distclean-local distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-exec-hook install-html \ - install-html-am install-includeHEADERS install-info \ - install-info-am install-libLIBRARIES install-man install-man1 \ - install-man7 install-modincludeHEADERS \ - install-nodist_includeHEADERS install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-binPROGRAMS uninstall-hook \ - uninstall-includeHEADERS uninstall-libLIBRARIES uninstall-man \ - uninstall-man1 uninstall-man7 uninstall-modincludeHEADERS \ - uninstall-nodist_includeHEADERS - - -re2c-main.$(OBJEXT): tools/re2c/main.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/main.c || echo '$(srcdir)/'`tools/re2c/main.c - -re2c-code.$(OBJEXT): tools/re2c/code.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/code.c || echo '$(srcdir)/'`tools/re2c/code.c - -re2c-dfa.$(OBJEXT): tools/re2c/dfa.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/dfa.c || echo '$(srcdir)/'`tools/re2c/dfa.c - -re2c-parser.$(OBJEXT): tools/re2c/parser.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/parser.c || echo '$(srcdir)/'`tools/re2c/parser.c - -re2c-actions.$(OBJEXT): tools/re2c/actions.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/actions.c || echo '$(srcdir)/'`tools/re2c/actions.c - -re2c-scanner.$(OBJEXT): tools/re2c/scanner.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/scanner.c || echo '$(srcdir)/'`tools/re2c/scanner.c - -re2c-mbo_getopt.$(OBJEXT): tools/re2c/mbo_getopt.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/mbo_getopt.c || echo '$(srcdir)/'`tools/re2c/mbo_getopt.c - -re2c-substr.$(OBJEXT): tools/re2c/substr.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/substr.c || echo '$(srcdir)/'`tools/re2c/substr.c - -re2c-translate.$(OBJEXT): tools/re2c/translate.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/re2c/translate.c || echo '$(srcdir)/'`tools/re2c/translate.c - -genmacro.$(OBJEXT): tools/genmacro/genmacro.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genmacro/genmacro.c || echo '$(srcdir)/'`tools/genmacro/genmacro.c -.gperf.c: genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $< $@ - -genperf.$(OBJEXT): tools/genperf/genperf.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/genperf.c || echo '$(srcdir)/'`tools/genperf/genperf.c - -gp-perfect.$(OBJEXT): tools/genperf/perfect.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f tools/genperf/perfect.c || echo '$(srcdir)/'`tools/genperf/perfect.c - -gp-phash.$(OBJEXT): libyasm/phash.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/phash.c || echo '$(srcdir)/'`libyasm/phash.c - -gp-xmalloc.$(OBJEXT): libyasm/xmalloc.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xmalloc.c || echo '$(srcdir)/'`libyasm/xmalloc.c - -gp-xstrdup.$(OBJEXT): libyasm/xstrdup.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/xstrdup.c || echo '$(srcdir)/'`libyasm/xstrdup.c - -# Use Pyxelator to generate Pyrex function headers. -#_yasm.pxi: ${HEADERS} -# @rm -rf .tmp -# @mkdir .tmp -# $(PYTHON) $(srcdir)/tools/python-yasm/pyxelator/wrap_yasm.py \ -# "YASM_DIR=${srcdir}" "CPP=${CPP}" "CPPFLAGS=${CPPFLAGS}" -# @rm -rf .tmp - -# Need to build a local copy of the main Pyrex input file to include _yasm.pxi -# from the build directory. Also need to fixup the other .pxi include paths. -#yasm.pyx: $(srcdir)/tools/python-yasm/yasm.pyx -# sed -e 's,^include "\([^_]\),include "${srcdir}/tools/python-yasm/\1,' \ -# $(srcdir)/tools/python-yasm/yasm.pyx > $@ - -# Actually run Pyrex -#yasm_python.c: yasm.pyx _yasm.pxi $(PYBINDING_DEPS) -# $(PYTHON) -c "from Pyrex.Compiler.Main import main; main(command_line=1)" \ -# -o $@ yasm.pyx - -# Now the Python build magic... -#python-setup.txt: Makefile -# echo "includes=${DEFS} ${DEFAULT_INCLUDES} ${INCLUDES} ${AM_CPPFLAGS} ${CPPFLAGS}" > python-setup.txt -# echo "sources=${libyasm_a_SOURCES}" >> python-setup.txt -# echo "srcdir=${srcdir}" >> python-setup.txt -# echo "gcc=${GCC}" >> python-setup.txt - -#.python-build: python-setup.txt yasm_python.c ${libyasm_a_SOURCES} -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py build -# touch .python-build -#python-build: .python-build - -#python-install: .python-build -# $(PYTHON) `test -f tools/python-yasm/setup.py || echo '$(srcdir)/'`tools/python-yasm/setup.py install "--install-lib=$(DESTDIR)$(pythondir)" - -#python-uninstall: -# rm -f `$(PYTHON) -c "import sys;sys.path.insert(0, '${DESTDIR}${pythondir}'); import yasm; print yasm.__file__"` - -python-build: -python-install: -python-uninstall: - -modules/arch/x86/x86id.c: x86insn_nasm.c x86insn_gas.c x86insns.c - -x86insn_nasm.gperf x86insn_gas.gperf x86insns.c: $(srcdir)/modules/arch/x86/gen_x86_insn.py - $(PYTHON) $(srcdir)/modules/arch/x86/gen_x86_insn.py -#x86insn_nasm.gperf: $(srcdir)/x86insn_nasm.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_nasm.gperf $@ -#x86insn_gas.gperf: $(srcdir)/x86insn_gas.gperf -# @echo Python must be installed to regenerate x86 instructions files -# cp $(srcdir)/x86insn_gas.gperf $@ - -# Use suffix rules for gperf files -x86insn_nasm.c: x86insn_nasm.gperf genperf$(EXEEXT) -x86insn_gas.c: x86insn_gas.gperf genperf$(EXEEXT) -x86cpu.c: $(srcdir)/modules/arch/x86/x86cpu.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86cpu.gperf $@ -x86regtmod.c: $(srcdir)/modules/arch/x86/x86regtmod.gperf genperf$(EXEEXT) - $(top_builddir)/genperf$(EXEEXT) $(srcdir)/modules/arch/x86/x86regtmod.gperf $@ - -lc3bid.c: $(srcdir)/modules/arch/lc3b/lc3bid.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -s -o $@ $(srcdir)/modules/arch/lc3b/lc3bid.re - -yasm_arch.7: modules/arch/yasm_arch.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/arch/yasm_arch.xml - -#EXTRA_DIST += modules/listfmts/nasm/tests/Makefile.inc - -#include modules/listfmts/nasm/tests/Makefile.inc - -gas-token.c: $(srcdir)/modules/parsers/gas/gas-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/gas/gas-token.re - -nasm-token.c: $(srcdir)/modules/parsers/nasm/nasm-token.re re2c$(EXEEXT) - $(top_builddir)/re2c$(EXEEXT) -b -o $@ $(srcdir)/modules/parsers/nasm/nasm-token.re - -$(top_srcdir)/modules/parsers/nasm/nasm-parser.c: nasm-macros.c - -nasm-macros.c: $(srcdir)/modules/parsers/nasm/nasm-std.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_standard_mac $(srcdir)/modules/parsers/nasm/nasm-std.mac - -yasm_parsers.7: modules/parsers/yasm_parsers.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/parsers/yasm_parsers.xml - -$(top_srcdir)/modules/preprocs/nasm/nasm-preproc.c: nasm-version.c - -nasm-version.c: version.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ nasm_version_mac version.mac - -version.mac: genversion$(EXEEXT) - $(top_builddir)/genversion$(EXEEXT) $@ - -genversion.$(OBJEXT): modules/preprocs/nasm/genversion.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f modules/preprocs/nasm/genversion.c || echo '$(srcdir)/'`modules/preprocs/nasm/genversion.c - -#EXTRA_DIST += modules/dbgfmts/codeview/tests/Makefile.inc -#include modules/dbgfmts/codeview/tests/Makefile.inc - -yasm_dbgfmts.7: modules/dbgfmts/yasm_dbgfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/dbgfmts/yasm_dbgfmts.xml - -$(top_srcdir)/modules/objfmts/coff/coff-objfmt.c: win64-nasm.c win64-gas.c - -win64-nasm.c: $(srcdir)/modules/objfmts/coff/win64-nasm.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_nasm_stdmac $(srcdir)/modules/objfmts/coff/win64-nasm.mac - -win64-gas.c: $(srcdir)/modules/objfmts/coff/win64-gas.mac genmacro$(EXEEXT) - $(top_builddir)/genmacro$(EXEEXT) $@ win64_gas_stdmac $(srcdir)/modules/objfmts/coff/win64-gas.mac - -yasm_objfmts.7: modules/objfmts/yasm_objfmts.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/modules/objfmts/yasm_objfmts.xml - -module.c: $(top_srcdir)/libyasm/module.in genmodule$(EXEEXT) Makefile - $(top_builddir)/genmodule$(EXEEXT) $(top_srcdir)/libyasm/module.in Makefile - -genmodule.$(OBJEXT): libyasm/genmodule.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f libyasm/genmodule.c || echo '$(srcdir)/'`libyasm/genmodule.c - -yasm.1: frontends/yasm/yasm.xml - $(XMLTO) -o $(top_builddir) man $(srcdir)/frontends/yasm/yasm.xml - -$(srcdir)/frontends/yasm/yasm.c: license.c - -license.c: $(srcdir)/COPYING genstring$(EXEEXT) - $(top_builddir)/genstring$(EXEEXT) license_msg $@ $(srcdir)/COPYING - -distclean-local: - -rm -rf results - -rm -rf build - -all-local: python-build -install-exec-hook: python-install -uninstall-hook: python-uninstall - -genstring.$(OBJEXT): genstring.c - $(CC_FOR_BUILD) $(DEFAULT_INCLUDES) $(INCLUDES) -c -o $@ `test -f genstring.c || echo '$(srcdir)/'`genstring.c -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/packager/third_party/yasm/source/config/win/config.h b/packager/third_party/yasm/source/config/win/config.h deleted file mode 100644 index 73ef9270e1..0000000000 --- a/packager/third_party/yasm/source/config/win/config.h +++ /dev/null @@ -1,173 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Command name to run C preprocessor */ -#define CPP_PROG "gcc -E" - -/* */ -/* #undef ENABLE_NLS */ - -/* Define to 1 if you have the `abort' function. */ -#define HAVE_ABORT 1 - -/* */ -/* #undef HAVE_CATGETS */ - -/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the - CoreFoundation framework. */ -/* #undef HAVE_CFLOCALECOPYCURRENT */ - -/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in - the CoreFoundation framework. */ -/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */ - -/* Define if the GNU dcgettext() function is already present or preinstalled. - */ -#define HAVE_DCGETTEXT 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_DIRECT_H 1 - -/* Define to 1 if you have the `ftruncate' function. */ -/* #undef HAVE_FTRUNCATE */ - -/* Define to 1 if you have the `getcwd' function. */ -#define HAVE_GETCWD 1 - -/* */ -#define HAVE_GETTEXT 1 - -/* Define to 1 if you have the GNU C Library */ -#define HAVE_GNU_C_LIBRARY 0 - -/* Define if you have the iconv() function and it works. */ -/* #undef HAVE_ICONV */ - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* */ -/* #undef HAVE_LC_MESSAGES */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LIBGEN_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mergesort' function. */ -/* #undef HAVE_MERGESORT */ - -/* Define to 1 if you have the `popen' function. */ -/* #undef HAVE_POPEN */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* */ -/* #undef HAVE_STPCPY */ - -/* Define to 1 if you have the `strcasecmp' function. */ -/* #undef HAVE_STRCASECMP */ - -/* Define to 1 if you have the `strcmpi' function. */ -/* #undef HAVE_STRCMPI */ - -/* Define to 1 if you have the `stricmp' function. */ -/* #undef HAVE_STRICMP */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_STRINGS_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strncasecmp' function. */ -#define HAVE_STRNCASECMP 1 - -/* Define to 1 if you have the `strsep' function. */ -/* #undef HAVE_STRSEP */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the `toascii' function. */ -#define HAVE_TOASCII 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_UNISTD_H */ - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to 1 if you have the `_stricmp' function. */ -/* #undef HAVE__STRICMP */ - -/* Name of package */ -#define PACKAGE "yasm" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "bug-yasm@tortall.net" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "yasm" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "yasm 1.2.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "yasm" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2.0" - -/* Define to 1 if the C compiler supports function prototypes. */ -#define PROTOTYPES 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void*', as computed by sizeof. */ -/* #undef SIZEOF_VOIDP */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.2.0" - -/* Define if using the dmalloc debugging malloc package */ -/* #undef WITH_DMALLOC */ - -/* Define like PROTOTYPES; this can be used by system headers. */ -#define __PROTOTYPES 1 - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/packager/third_party/yasm/source/config/win/libyasm-stdint.h b/packager/third_party/yasm/source/config/win/libyasm-stdint.h deleted file mode 100644 index 357610e1c1..0000000000 --- a/packager/third_party/yasm/source/config/win/libyasm-stdint.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _YASM_LIBYASM_STDINT_H -#define _YASM_LIBYASM_STDINT_H 1 -#ifndef _GENERATED_STDINT_H -#define _GENERATED_STDINT_H "yasm HEAD" -/* generated using gcc -std=gnu99 */ -#define _STDINT_HAVE_STDINT_H 1 -#include -#endif -#endif diff --git a/packager/third_party/yasm/source/config/win/stdint.h b/packager/third_party/yasm/source/config/win/stdint.h deleted file mode 100644 index d77df92638..0000000000 --- a/packager/third_party/yasm/source/config/win/stdint.h +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// YASM is written in C99 and requires and . - -#ifndef THIRD_PARTY_YASM_SOURCE_CONFIG_WIN_STDINT_H_ -#define THIRD_PARTY_YASM_SOURCE_CONFIG_WIN_STDINT_H_ - -#if !defined(_MSC_VER) -#error This file should only be included when compiling with MSVC. -#endif - -// Define C99 equivalent types. -typedef signed char int8_t; -typedef signed short int16_t; -typedef signed int int32_t; -typedef signed long long int64_t; -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; -typedef unsigned long long uint64_t; - -// Define the C99 INT64_C macro that is used for declaring 64-bit literals. -// Technically, these should only be definied when __STDC_CONSTANT_MACROS -// is defined. -#define INT64_C(value) value##LL -#define UINT64_C(value) value##ULL - -#endif // THIRD_PARTY_YASM_SOURCE_CONFIG_WIN_STDINT_H_ diff --git a/packager/third_party/yasm/yasm.gyp b/packager/third_party/yasm/yasm.gyp deleted file mode 100644 index 8ad8c7a02b..0000000000 --- a/packager/third_party/yasm/yasm.gyp +++ /dev/null @@ -1,581 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# The yasm build process creates a slew of small C subprograms that -# dynamically generate files at various point in the build process. This makes -# the build integration moderately complex. -# -# There are three classes of dynamically generated files: -# 1) C source files that should be included in the build (eg., lc3bid.c) -# 2) C source files that are #included by static C sources (eg., license.c) -# 3) Intermediate files that are used as input by other subprograms to -# further generate files in category #1 or #2. (eg., version.mac) -# -# This structure is represented with the following targets: -# 1) yasm -- Sources, flags for the main yasm executable. Also has most of -# of the actions and rules that invoke the subprograms. -# 2) config_sources -- Checked in version of files generated by manually -# running configure that are used by all binaries. -# 3) generate_files -- Actions and rules for files of type #3. -# 4) genperf_libs -- Object files shared between yasm and the genperf -# subprogram. -# 5) genmacro, genmodule, etc. -- One executable target for each subprogram. -# -# You will notice that a lot of the action targets seem very similar -- -# especially for genmacro invocations. This makes it seem like they should -# be a rule. The problem is that the correct invocation cannot be inferred -# purely from the file name, or extension. Nor is it obvious whether the -# output should be processed as a source or not. Thus, we are left with a -# large amount of repetitive code. - -{ - 'variables': { - 'yasm_include_dirs': [ - 'source/config/<(OS)', - 'source/patched-yasm', - ], - - # The cflags used by any target that will be directly linked into yasm. - # These are specifically not used when building the subprograms. While - # it would probably be safe to use these flags there as well, the - # ./configure based build does not use the same flags between the main - # yasm executable, and its subprograms. - 'yasm_defines': ['HAVE_CONFIG_H'], - 'yasm_cflags': [ - '-std=gnu99', - '-ansi', - '-pedantic', - ], - - # Locations for various generated artifacts. - 'shared_generated_dir': '<(SHARED_INTERMEDIATE_DIR)/third_party/yasm', - 'generated_dir': '<(INTERMEDIATE_DIR)/third_party/yasm', - - # Various files referenced by multiple targets. - 'version_file': 'version.mac', # Generated by genversion. - 'genmodule_source': 'genmodule_outfile.c', - }, - 'target_defaults': { - # Silence warnings in libc++ builds (C code doesn't need this flag). - 'ldflags!': [ '-stdlib=libc++', ], - }, - 'targets': [ - { - 'target_name': 'yasm', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ - 'config_sources', - 'genmacro', - 'genmodule', - 'genperf', - 'genperf_libs', - 'generate_files', # Needed to generate gperf and instruction files. - 'genstring', - 're2c', - ], - 'variables': { - 'clang_warning_flags': [ - # yasm passes a `const elf_machine_sym*` through `void*`. - '-Wno-incompatible-pointer-types', - ], - }, - 'conditions': [ - ['OS=="win"', { - # As of VS 2013 Update 3, building this project with /analyze hits an - # internal compiler error on elf-x86-amd64.c in release builds with - # the amd64_x86 compiler. This halts the build and prevents subsequent - # analysis. Therefore, /analyze is disabled for this project. See this - # bug for details: - # https://connect.microsoft.com/VisualStudio/feedback/details/1014799/internal-compiler-error-when-using-analyze - 'msvs_settings': { - 'VCCLCompilerTool': { - 'AdditionalOptions!': [ '/analyze' ] - }, - }, - }], - ], - 'sources': [ - 'source/patched-yasm/frontends/yasm/yasm-options.c', - 'source/patched-yasm/frontends/yasm/yasm.c', - 'source/patched-yasm/libyasm/assocdat.c', - 'source/patched-yasm/libyasm/bc-align.c', - 'source/patched-yasm/libyasm/bc-data.c', - 'source/patched-yasm/libyasm/bc-incbin.c', - 'source/patched-yasm/libyasm/bc-org.c', - 'source/patched-yasm/libyasm/bc-reserve.c', - 'source/patched-yasm/libyasm/bitvect.c', - 'source/patched-yasm/libyasm/bytecode.c', - 'source/patched-yasm/libyasm/errwarn.c', - 'source/patched-yasm/libyasm/expr.c', - 'source/patched-yasm/libyasm/file.c', - 'source/patched-yasm/libyasm/floatnum.c', - 'source/patched-yasm/libyasm/hamt.c', - 'source/patched-yasm/libyasm/insn.c', - 'source/patched-yasm/libyasm/intnum.c', - 'source/patched-yasm/libyasm/inttree.c', - 'source/patched-yasm/libyasm/linemap.c', - 'source/patched-yasm/libyasm/md5.c', - 'source/patched-yasm/libyasm/mergesort.c', - 'source/patched-yasm/libyasm/section.c', - 'source/patched-yasm/libyasm/strcasecmp.c', - 'source/patched-yasm/libyasm/strsep.c', - 'source/patched-yasm/libyasm/symrec.c', - 'source/patched-yasm/libyasm/valparam.c', - 'source/patched-yasm/libyasm/value.c', - 'source/patched-yasm/modules/arch/lc3b/lc3barch.c', - 'source/patched-yasm/modules/arch/lc3b/lc3bbc.c', - 'source/patched-yasm/modules/arch/x86/x86arch.c', - 'source/patched-yasm/modules/arch/x86/x86bc.c', - 'source/patched-yasm/modules/arch/x86/x86expr.c', - 'source/patched-yasm/modules/arch/x86/x86id.c', - 'source/patched-yasm/modules/dbgfmts/codeview/cv-dbgfmt.c', - 'source/patched-yasm/modules/dbgfmts/codeview/cv-symline.c', - 'source/patched-yasm/modules/dbgfmts/codeview/cv-type.c', - 'source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-aranges.c', - 'source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-dbgfmt.c', - 'source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-info.c', - 'source/patched-yasm/modules/dbgfmts/dwarf2/dwarf2-line.c', - 'source/patched-yasm/modules/dbgfmts/null/null-dbgfmt.c', - 'source/patched-yasm/modules/dbgfmts/stabs/stabs-dbgfmt.c', - 'source/patched-yasm/modules/listfmts/nasm/nasm-listfmt.c', - 'source/patched-yasm/modules/objfmts/bin/bin-objfmt.c', - 'source/patched-yasm/modules/objfmts/coff/coff-objfmt.c', - 'source/patched-yasm/modules/objfmts/coff/win64-except.c', - 'source/patched-yasm/modules/objfmts/dbg/dbg-objfmt.c', - 'source/patched-yasm/modules/objfmts/elf/elf-objfmt.c', - 'source/patched-yasm/modules/objfmts/elf/elf-x86-amd64.c', - 'source/patched-yasm/modules/objfmts/elf/elf-x86-x86.c', - 'source/patched-yasm/modules/objfmts/elf/elf.c', - 'source/patched-yasm/modules/objfmts/macho/macho-objfmt.c', - 'source/patched-yasm/modules/objfmts/rdf/rdf-objfmt.c', - 'source/patched-yasm/modules/objfmts/xdf/xdf-objfmt.c', - 'source/patched-yasm/modules/parsers/gas/gas-parse.c', - 'source/patched-yasm/modules/parsers/gas/gas-parse-intel.c', - 'source/patched-yasm/modules/parsers/gas/gas-parser.c', - 'source/patched-yasm/modules/parsers/nasm/nasm-parse.c', - 'source/patched-yasm/modules/parsers/nasm/nasm-parser.c', - 'source/patched-yasm/modules/preprocs/cpp/cpp-preproc.c', - 'source/patched-yasm/modules/preprocs/nasm/nasm-eval.c', - 'source/patched-yasm/modules/preprocs/nasm/nasm-pp.c', - 'source/patched-yasm/modules/preprocs/nasm/nasm-preproc.c', - 'source/patched-yasm/modules/preprocs/nasm/nasmlib.c', - 'source/patched-yasm/modules/preprocs/raw/raw-preproc.c', - - # Sources needed by re2c. - 'source/patched-yasm/modules/parsers/gas/gas-token.re', - 'source/patched-yasm/modules/parsers/nasm/nasm-token.re', - - # Sources needed by genperf. Make sure the generated gperf files - # (the ones in shared_generated_dir) are synced with the outputs - # for the related generate_*_insn actions in the generate_files - # target below. - '<(shared_generated_dir)/x86insn_nasm.gperf', - '<(shared_generated_dir)/x86insn_gas.gperf', - '<(shared_generated_dir)/x86cpu.c', - '<(shared_generated_dir)/x86regtmod.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - '<(shared_generated_dir)', - '<(generated_dir)', - ], - 'defines': [ '<@(yasm_defines)' ], - 'cflags': [ '<@(yasm_cflags)', ], - 'msvs_disabled_warnings': [ 4267 ], - 'rules': [ - { - 'rule_name': 'generate_gperf', - 'extension': 'gperf', - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genperf<(EXECUTABLE_SUFFIX)' ], - 'outputs': [ - '<(generated_dir)/<(RULE_INPUT_ROOT).c', - ], - 'action': ['<(PRODUCT_DIR)/genperf', - '<(RULE_INPUT_PATH)', - '<(generated_dir)/<(RULE_INPUT_ROOT).c', - ], - # These files are #included, so do not treat them as sources. - 'process_outputs_as_sources': 0, - 'message': 'yasm gperf for <(RULE_INPUT_PATH)', - }, - { - 'rule_name': 'generate_re2c', - 'extension': 're', - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)re2c<(EXECUTABLE_SUFFIX)' ], - 'outputs': [ '<(generated_dir)/<(RULE_INPUT_ROOT).c', ], - 'action': [ - '<(PRODUCT_DIR)/re2c', - '-b', - '-o', - '<(generated_dir)/<(RULE_INPUT_ROOT).c', - '<(RULE_INPUT_PATH)', - ], - 'process_outputs_as_sources': 1, - 'message': 'yasm re2c for <(RULE_INPUT_PATH)', - }, - ], - 'actions': [ - ### - ### genmacro calls. - ### - { - 'action_name': 'generate_nasm_macros', - 'variables': { - 'infile': 'source/patched-yasm/modules/parsers/nasm/nasm-std.mac', - 'varname': 'nasm_standard_mac', - 'outfile': '<(generated_dir)/nasm-macros.c', - }, - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genmacro<(EXECUTABLE_SUFFIX)', - '<(infile)', ], - 'outputs': [ '<(outfile)', ], - 'action': ['<(PRODUCT_DIR)/genmacro', - '<(outfile)', '<(varname)', '<(infile)', ], - # Not a direct source because this is #included by - # source/patched-yasm/modules/parsers/nasm/nasm-parser.c - 'process_outputs_as_sources': 1, - 'message': 'yasm genmacro for <(infile)', - }, - { - 'action_name': 'generate_nasm_version', - 'variables': { - 'infile': '<(shared_generated_dir)/<(version_file)', - 'varname': 'nasm_version_mac', - 'outfile': '<(generated_dir)/nasm-version.c', - }, - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genmacro<(EXECUTABLE_SUFFIX)', - '<(infile)', ], - 'outputs': [ '<(outfile)', ], - 'action': ['<(PRODUCT_DIR)/genmacro', - '<(outfile)', '<(varname)', '<(infile)', - ], - # Not a direct source because this is #included by - # source/patched-yasm/modules/preprocs/nasm/nasm-preproc.c - 'process_outputs_as_sources': 0, - 'message': 'yasm genmacro for <(infile)', - }, - { - 'action_name': 'generate_win64_gas', - 'variables': { - 'infile': 'source/patched-yasm/modules/objfmts/coff/win64-gas.mac', - 'varname': 'win64_gas_stdmac', - 'outfile': '<(generated_dir)/win64-gas.c', - }, - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genmacro<(EXECUTABLE_SUFFIX)', - '<(infile)', ], - 'outputs': [ '<(outfile)', ], - 'action': ['<(PRODUCT_DIR)/genmacro', - '<(outfile)', '<(varname)', '<(infile)', - ], - # Not a direct source because this is #included by - # source/patched-yasm/modules/objfmts/coff/coff-objfmt.c - 'process_outputs_as_sources': 0, - 'message': 'yasm genmacro for <(infile)', - }, - { - 'action_name': 'generate_win64_nasm', - 'variables': { - 'infile': 'source/patched-yasm/modules/objfmts/coff/win64-nasm.mac', - 'varname': 'win64_nasm_stdmac', - 'outfile': '<(generated_dir)/win64-nasm.c', - }, - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genmacro<(EXECUTABLE_SUFFIX)', - '<(infile)', ], - 'outputs': [ '<(outfile)', ], - 'action': ['<(PRODUCT_DIR)/genmacro', - '<(outfile)', - '<(varname)', - '<(infile)', - ], - # Not a direct source because this is #included by - # source/patched-yasm/modules/objfmts/coff/coff-objfmt.c - 'process_outputs_as_sources': 0, - 'message': 'yasm genmacro for <(infile)', - }, - - ### - ### genstring call. - ### - { - 'action_name': 'generate_license', - 'variables': { - 'infile': 'source/patched-yasm/COPYING', - 'varname': 'license_msg', - 'outfile': '<(generated_dir)/license.c', - }, - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genstring<(EXECUTABLE_SUFFIX)', - '<(infile)', ], - 'outputs': [ '<(outfile)', ], - 'action': ['<(PRODUCT_DIR)/genstring', - '<(varname)', - '<(outfile)', - '<(infile)', - ], - # Not a direct source because this is #included by - # source/patched-yasm/frontends/yasm/yasm.c - 'process_outputs_as_sources': 0, - 'message': 'Generating yasm embeddable license', - }, - - ### - ### A re2c call that doesn't fit into the rule below. - ### - { - 'action_name': 'generate_lc3b_token', - 'variables': { - 'infile': 'source/patched-yasm/modules/arch/lc3b/lc3bid.re', - # The license file is #included by yasm.c. - 'outfile': '<(generated_dir)/lc3bid.c', - }, - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)re2c<(EXECUTABLE_SUFFIX)', - '<(infile)', ], - 'outputs': [ '<(outfile)', ], - 'action': [ - '<(PRODUCT_DIR)/re2c', - '-s', - '-o', '<(outfile)', - '<(infile)' - ], - 'process_outputs_as_sources': 1, - 'message': 'Generating yasm tokens for lc3b', - }, - - ### - ### genmodule call. - ### - { - 'action_name': 'generate_module', - 'variables': { - 'makefile': 'source/config/<(OS)/Makefile', - 'module_in': 'source/patched-yasm/libyasm/module.in', - 'outfile': '<(generated_dir)/module.c', - }, - 'inputs': [ - '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)genmodule<(EXECUTABLE_SUFFIX)', - '<(module_in)', - '<(makefile)' - ], - 'outputs': [ '<(generated_dir)/module.c' ], - 'action': [ - '<(PRODUCT_DIR)/genmodule', - '<(module_in)', - '<(makefile)', - '<(outfile)' - ], - 'process_outputs_as_sources': 1, - 'message': 'Generating yasm module information', - }, - ], - }, - { - 'target_name': 'config_sources', - 'type': 'none', - 'toolsets': ['host'], - 'sources': [ - 'source/config/<(OS)/Makefile', - 'source/config/<(OS)/config.h', - 'source/config/<(OS)/libyasm-stdint.h', - ], - }, - { - 'target_name': 'generate_files', - 'type': 'none', - 'toolsets': ['host'], - 'dependencies': [ - 'genperf', - 'genversion', - ], - 'sources': [ - 'source/patched-yasm/modules/arch/x86/x86cpu.gperf', - 'source/patched-yasm/modules/arch/x86/x86regtmod.gperf', - ], - 'rules': [ - { - 'rule_name': 'generate_gperf', - 'extension': 'gperf', - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genperf<(EXECUTABLE_SUFFIX)' ], - 'outputs': [ '<(shared_generated_dir)/<(RULE_INPUT_ROOT).c', ], - 'action': [ - '<(PRODUCT_DIR)/genperf', - '<(RULE_INPUT_PATH)', - '<(shared_generated_dir)/<(RULE_INPUT_ROOT).c', - ], - 'process_outputs_as_sources': 0, - 'message': 'yasm genperf for <(RULE_INPUT_PATH)', - }, - ], - 'actions': [ - { - 'action_name': 'generate_x86_insn', - 'variables': { - 'gen_insn_path': - 'source/patched-yasm/modules/arch/x86/gen_x86_insn.py', - }, - 'inputs': [ '<(gen_insn_path)', ], - 'outputs': [ - '<(shared_generated_dir)/x86insns.c', - '<(shared_generated_dir)/x86insn_gas.gperf', - '<(shared_generated_dir)/x86insn_nasm.gperf', - ], - 'action': [ - 'python3', - '<(gen_insn_path)', - '<(shared_generated_dir)', - ], - 'message': 'Running <(gen_insn_path)', - 'process_outputs_as_sources': 0, - }, - { - 'action_name': 'generate_version', - 'inputs': [ '<(PRODUCT_DIR)/' - '<(EXECUTABLE_PREFIX)genversion<(EXECUTABLE_SUFFIX)' ], - 'outputs': [ '<(shared_generated_dir)/<(version_file)', ], - 'action': [ - '<(PRODUCT_DIR)/genversion', - '<(shared_generated_dir)/<(version_file)' - ], - 'message': 'Generating yasm version file: ' - '<(shared_generated_dir)/<(version_file)', - 'process_outputs_as_sources': 0, - }, - ], - }, - { - 'target_name': 'genperf_libs', - 'type': 'static_library', - 'toolsets': ['host'], - 'dependencies': [ 'config_sources', ], - 'sources': [ - 'source/patched-yasm/libyasm/phash.c', - 'source/patched-yasm/libyasm/xmalloc.c', - 'source/patched-yasm/libyasm/xstrdup.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - ], - 'defines': [ '<@(yasm_defines)' ], - 'cflags': [ - '<@(yasm_cflags)', - ], - }, - { - 'target_name': 'genstring', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ 'config_sources', ], - 'sources': [ - 'source/patched-yasm/genstring.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - ], - 'cflags': [ - '-std=gnu99', - ], - }, - { - 'target_name': 'genperf', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ - 'genperf_libs', - ], - 'sources': [ - 'source/patched-yasm/tools/genperf/genperf.c', - 'source/patched-yasm/tools/genperf/perfect.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - ], - 'cflags': [ - '-std=gnu99', - ], - }, - { - 'target_name': 'genmacro', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ 'config_sources', ], - 'sources': [ - 'source/patched-yasm/tools/genmacro/genmacro.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - ], - 'cflags': [ - '-std=gnu99', - ], - }, - { - 'target_name': 'genversion', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ 'config_sources', ], - 'sources': [ - 'source/patched-yasm/modules/preprocs/nasm/genversion.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - ], - 'cflags': [ - '-std=gnu99', - ], - }, - { - 'target_name': 're2c', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ 'config_sources', ], - 'sources': [ - 'source/patched-yasm/tools/re2c/main.c', - 'source/patched-yasm/tools/re2c/code.c', - 'source/patched-yasm/tools/re2c/dfa.c', - 'source/patched-yasm/tools/re2c/parser.c', - 'source/patched-yasm/tools/re2c/actions.c', - 'source/patched-yasm/tools/re2c/scanner.c', - 'source/patched-yasm/tools/re2c/mbo_getopt.c', - 'source/patched-yasm/tools/re2c/substr.c', - 'source/patched-yasm/tools/re2c/translate.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - ], - 'cflags': [ - '-std=gnu99', - ], - 'variables': { - # re2c is missing CLOSEVOP from one switch. - 'clang_warning_flags': [ '-Wno-switch' ], - }, - 'msvs_disabled_warnings': [ 4267 ], - }, - { - 'target_name': 'genmodule', - 'type': 'executable', - 'toolsets': ['host'], - 'dependencies': [ - 'config_sources', - ], - 'sources': [ - 'source/patched-yasm/libyasm/genmodule.c', - ], - 'include_dirs': [ - '<@(yasm_include_dirs)', - - ], - 'cflags': [ - '-std=gnu99', - ], - }, - ], -} diff --git a/packager/third_party/yasm/yasm_assemble.gni b/packager/third_party/yasm/yasm_assemble.gni deleted file mode 100644 index 1a84d5149d..0000000000 --- a/packager/third_party/yasm/yasm_assemble.gni +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# This provides the yasm_assemble() template which uses YASM to assemble -# assembly files. -# -# Files to be assembled with YASM should have an extension of .asm. -# -# Parameters -# -# yasm_flags (optional) -# [list of strings] Pass additional flags into YASM. These are appended -# to the command line. Note that the target machine type and system is -# already set up based on the current toolchain so you don't need to -# specify these things (see below). -# -# Example: yasm_flags = [ "--force-strict" ] -# -# include_dirs (optional) -# [list of dir names] List of additional include dirs. Note that the -# source root and the root generated file dir is always added, just like -# our C++ build sets up. -# -# Example: include_dirs = [ "//some/other/path", target_gen_dir ] -# -# defines (optional) -# [list of strings] List of defines, as with the native code defines. -# -# Example: defines = [ "FOO", "BAR=1" ] -# -# inputs, deps, visibility (optional) -# These have the same meaning as in an action. -# -# Example -# -# yasm_assemble("my_yasm_target") { -# sources = [ -# "ultra_optimized_awesome.asm", -# ] -# include_dirs = [ "assembly_include" ] -# } - -if (is_mac || is_ios) { - if (current_cpu == "x86") { - _yasm_flags = [ - "-fmacho32", - "-m", - "x86", - ] - } else if (current_cpu == "x64") { - _yasm_flags = [ - "-fmacho64", - "-m", - "amd64", - ] - } -} else if (is_posix) { - if (current_cpu == "x86") { - _yasm_flags = [ - "-felf32", - "-m", - "x86", - ] - } else if (current_cpu == "x64") { - _yasm_flags = [ - "-DPIC", - "-felf64", - "-m", - "amd64", - ] - } -} else if (is_win) { - if (current_cpu == "x86") { - _yasm_flags = [ - "-DPREFIX", - "-fwin32", - "-m", - "x86", - ] - } else if (current_cpu == "x64") { - _yasm_flags = [ - "-fwin64", - "-m", - "amd64", - ] - } -} - -if (is_win) { - asm_obj_extension = "obj" -} else { - asm_obj_extension = "o" -} - -template("yasm_assemble") { - # TODO(ajwong): Support use_system_yasm. - assert(defined(invoker.sources), "Need sources defined for $target_name") - - # Only depend on YASM on x86 systems. Force compilation of .asm files for - # ARM to fail. - assert(current_cpu == "x86" || current_cpu == "x64") - - action_name = "${target_name}_action" - source_set_name = target_name - - action_foreach(action_name) { - # Only the source set can depend on this. - visibility = [ ":$source_set_name" ] - - script = "//third_party/yasm/run_yasm.py" - sources = invoker.sources - - if (defined(invoker.inputs)) { - inputs = invoker.inputs - } - - # Executable (first in the args). The binary might be in the root build dir - # (no cross-compiling) or in a toolchain-specific subdirectory of that - # (when cross-compiling). - yasm_label = "//third_party/yasm($host_toolchain)" - args = [ "./" + # Force current dir. - rebase_path(get_label_info(yasm_label, "root_out_dir") + "/yasm", - root_build_dir) ] - - # Deps. - deps = [ - yasm_label, - ] - if (defined(invoker.deps)) { - deps += invoker.deps - } - - # Flags. - args += _yasm_flags - if (defined(invoker.yasm_flags)) { - args += invoker.yasm_flags - } - - # User defined include dirs go first. - if (defined(invoker.include_dirs)) { - foreach(include, invoker.include_dirs) { - args += [ "-I" + rebase_path(include, root_build_dir) ] - } - } - - # Default yasm include dirs. Make it match the native build (source root and - # root generated code directory). - # This goes to the end of include list. - args += [ - "-I.", - - # Using "//." will produce a relative path "../.." which looks better than - # "../../" which will result from using "//" as the base (although both - # work). This is because rebase_path will terminate the result in a - # slash if the input ends in a slash. - "-I" + rebase_path("//.", root_build_dir), - "-I" + rebase_path(root_gen_dir, root_build_dir), - ] - - # Extra defines. - if (defined(invoker.defines)) { - foreach(def, invoker.defines) { - args += [ "-D$def" ] - } - } - - # Output file. - # - # TODO(brettw) it might be nice if there was a source expansion for the - # path of the source file relative to the source root. Then we could - # exactly duplicate the naming and location of object files from the - # native build, which would be: - # "$root_out_dir/${target_name}.{{source_dir_part}}.$asm_obj_extension" - outputs = [ - "$target_out_dir/{{source_name_part}}.o", - ] - args += [ - "-o", - rebase_path(outputs[0], root_build_dir), - "{{source}}", - ] - - # The wrapper script run_yasm will write the depfile to the same name as - # the output but with .d appended (like gcc will). - depfile = outputs[0] + ".d" - } - - # Gather the .o files into a linkable thing. This doesn't actually link - # anything (a source set just compiles files to link later), but will pass - # the object files generated by the action up the dependency chain. - source_set(source_set_name) { - if (defined(invoker.visibility)) { - visibility = invoker.visibility - } - - sources = get_target_outputs(":$action_name") - - deps = [ - ":$action_name", - ] - } -} diff --git a/packager/third_party/yasm/yasm_compile.gypi b/packager/third_party/yasm/yasm_compile.gypi deleted file mode 100644 index 75b870520e..0000000000 --- a/packager/third_party/yasm/yasm_compile.gypi +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# This is an gyp include to use YASM for compiling assembly files. -# -# Files to be compiled with YASM should have an extension of .asm. -# -# There are three variables for this include: -# yasm_flags : Pass additional flags into YASM. -# yasm_output_path : Output directory for the compiled object files. -# yasm_includes : Includes used by .asm code. Changes to which should force -# recompilation. -# -# Sample usage: -# 'sources': [ -# 'ultra_optimized_awesome.asm', -# ], -# 'variables': { -# 'yasm_flags': [ -# '-I', 'assembly_include', -# ], -# 'yasm_output_path': '<(SHARED_INTERMEDIATE_DIR)/project', -# 'yasm_includes': ['ultra_optimized_awesome.inc'] -# }, -# 'includes': [ -# 'third_party/yasm/yasm_compile.gypi' -# ], - -{ - 'variables': { - 'yasm_flags': [], - 'yasm_includes': [], - - 'conditions': [ - [ 'use_system_yasm==0', { - 'yasm_path': '<(PRODUCT_DIR)/yasm<(EXECUTABLE_SUFFIX)', - }, { - 'yasm_path': '/src/tools/vim/chromium.ycm_extra_conf.py' -# -# 2. Profit -# -# -# Usage notes: -# -# * You must use ninja & clang to build. -# -# * You must have run gyp_packager and build the Debug version. -# -# -# Hacking notes: -# -# * The purpose of this script is to construct an accurate enough command line -# for YCM to pass to clang so it can build and extract the symbols. -# -# * Right now, we only pull the -I and -D flags. That seems to be sufficient -# for everything I've used it for. -# -# * That whole ninja & clang thing? We could support other configs if someone -# were willing to write the correct commands and a parser. -# -# * This has only been tested on gPrecise. - -# Slightly modified the Chromium version so that it works with the packager. - -import os -import os.path -import re -import subprocess -import sys - - -def SystemIncludeDirectoryFlags(): - """Determines compile flags to include the system include directories. - - Use as a workaround for https://github.com/Valloric/YouCompleteMe/issues/303 - - Returns: - (List of Strings) Compile flags to append. - """ - try: - with open(os.devnull, 'rb') as DEVNULL: - output = subprocess.check_output(['clang', '-v', '-E', '-x', 'c++', '-'], - stdin=DEVNULL, stderr=subprocess.STDOUT) - except: - return [] - includes_regex = r'#include <\.\.\.> search starts here:\s*' \ - r'(.*?)End of search list\.' - includes = re.search(includes_regex, output.decode(), re.DOTALL).group(1) - flags = [] - for path in includes.splitlines(): - path = path.strip() - if os.path.isdir(path): - flags.append('-isystem') - flags.append(path) - return flags - - -_system_include_flags = SystemIncludeDirectoryFlags() - -# Flags from YCM's default config. -flags = [ -'-DUSE_CLANG_COMPLETER', -'-std=c++11', -'-x', -'c++', -] - - -def PathExists(*args): - return os.path.exists(os.path.join(*args)) - - -def FindChromeSrcFromFilename(filename): - """Searches for the root of the Chromium checkout. - - Simply checks parent directories until it finds .gclient and src/. - - Args: - filename: (String) Path to source file being edited. - - Returns: - (String) Path of 'src/', or None if unable to find. - """ - curdir = os.path.normpath(os.path.dirname(filename)) - while not (PathExists(curdir, 'DEPS') - and PathExists(curdir, 'packager') - and (PathExists(curdir, '..', '.gclient') - or PathExists(curdir, '.git'))): - nextdir = os.path.normpath(os.path.join(curdir, '..')) - if nextdir == curdir: - return None - curdir = nextdir - return curdir - - -def GetClangCommandFromNinjaForFilename(chrome_root, filename): - """Returns the command line to build |filename|. - - Asks ninja how it would build the source file. If the specified file is a - header, tries to find its companion source file first. - - Args: - chrome_root: (String) Path to src/. - filename: (String) Path to source file being edited. - - Returns: - (List of Strings) Command line arguments for clang. - """ - if not chrome_root: - return [] - - # Generally, everyone benefits from including Chromium's src/, because all of - # Chromium's includes are relative to that. - chrome_flags = ['-I' + os.path.join(chrome_root)] - - # Version of Clang used to compile Chromium can be newer then version of - # libclang that YCM uses for completion. So it's possible that YCM's libclang - # doesn't know about some used warning options, which causes compilation - # warnings (and errors, because of '-Werror'); - chrome_flags.append('-Wno-unknown-warning-option') - - # Default file to get a reasonable approximation of the flags for a Blink - # file. - blink_root = os.path.join(chrome_root, 'third_party', 'WebKit') - default_blink_file = os.path.join(blink_root, 'Source', 'core', 'Init.cpp') - - # Header files can't be built. Instead, try to match a header file to its - # corresponding source file. - if filename.endswith('.h'): - alternates = ['.cc', '.cpp'] - for alt_extension in alternates: - alt_name = filename[:-2] + alt_extension - if os.path.exists(alt_name): - filename = alt_name - break - else: - # If this is a standalone .h file with no source, the best we can do is - # try to use the default flags. - return chrome_flags - - #sys.path.append(os.path.join(chrome_root, 'tools', 'vim')) - #from ninja_output import GetNinjaOutputDirectory - #out_dir = os.path.realpath(GetNinjaOutputDirectory(chrome_root)) - # TODO(rkuroiwa): The lines above can be uncommented and the line below can be - # deleted when we pull tools/vim/ninja_output.py. For now, just look at the - # Debug output directory. - out_dir = os.path.realpath(os.path.join(chrome_root, 'out', 'Debug')) - - # Ninja needs the path to the source file relative to the output build - # directory. - rel_filename = os.path.relpath(os.path.realpath(filename), out_dir) - - # Ask ninja how it would build our source file. - p = subprocess.Popen(['ninja', '-v', '-C', out_dir, '-t', - 'commands', rel_filename + '^'], - stdout=subprocess.PIPE) - stdout, stderr = p.communicate() - if p.returncode: - return chrome_flags - - # Ninja might execute several commands to build something. We want the last - # clang command. - clang_line = None - for line in reversed(stdout.split('\n')): - if 'clang' in line: - clang_line = line - break - else: - return chrome_flags - - # Parse flags that are important for YCM's purposes. - for flag in clang_line.split(' '): - if flag.startswith('-I'): - # Relative paths need to be resolved, because they're relative to the - # output dir, not the source. - if flag[2] == '/': - chrome_flags.append(flag) - else: - abs_path = os.path.normpath(os.path.join(out_dir, flag[2:])) - chrome_flags.append('-I' + abs_path) - elif flag.startswith('-std'): - chrome_flags.append(flag) - elif flag.startswith('-') and flag[1] in 'DWFfmO': - if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard': - # These flags causes libclang (3.3) to crash. Remove it until things - # are fixed. - continue - chrome_flags.append(flag) - - return chrome_flags - - -def FlagsForFile(filename): - """This is the main entry point for YCM. Its interface is fixed. - - Args: - filename: (String) Path to source file being edited. - - Returns: - (Dictionary) - 'flags': (List of Strings) Command line flags. - 'do_cache': (Boolean) True if the result should be cached. - """ - chrome_root = FindChromeSrcFromFilename(filename) - chrome_flags = GetClangCommandFromNinjaForFilename(chrome_root, - filename) - final_flags = flags + chrome_flags + _system_include_flags - - return { - 'flags': final_flags, - 'do_cache': True - } diff --git a/packager/version/CMakeLists.txt b/packager/version/CMakeLists.txt new file mode 100644 index 0000000000..fefc99f274 --- /dev/null +++ b/packager/version/CMakeLists.txt @@ -0,0 +1,22 @@ +# Copyright 2022 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +execute_process( + COMMAND python3 generate_version_string.py + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" + RESULT_VARIABLE STATUS + OUTPUT_VARIABLE PACKAGER_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE + ENCODING UTF8) +if(NOT STATUS EQUAL 0) + message(FATAL_ERROR "Failed to generate Packager version") +endif() + +add_library(version STATIC version.cc) +target_compile_definitions(version PRIVATE + PACKAGER_VERSION="${PACKAGER_VERSION}") +target_link_libraries(version + absl::synchronization) diff --git a/packager/version/version.cc b/packager/version/version.cc index c8ec78220b..d1b3567847 100644 --- a/packager/version/version.cc +++ b/packager/version/version.cc @@ -6,7 +6,7 @@ #include "packager/version/version.h" -#include "packager/base/synchronization/read_write_lock.h" +#include "absl/synchronization/mutex.h" namespace shaka { namespace { @@ -32,11 +32,11 @@ class Version { ~Version() {} const std::string& GetVersion() { - base::subtle::AutoReadLock read_lock(lock_); + absl::ReaderMutexLock lock(&mutex_); return version_; } void SetVersion(const std::string& version) { - base::subtle::AutoWriteLock write_lock(lock_); + absl::MutexLock lock(&mutex_); version_ = version; } @@ -44,8 +44,8 @@ class Version { Version(const Version&) = delete; Version& operator=(const Version&) = delete; - base::subtle::ReadWriteLock lock_; - std::string version_; + absl::Mutex mutex_; + std::string version_ GUARDED_BY(mutex_); }; } // namespace diff --git a/packager/version/version.gyp b/packager/version/version.gyp deleted file mode 100644 index cf011f473c..0000000000 --- a/packager/version/version.gyp +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file or at -# https://developers.google.com/open-source/licenses/bsd - -{ - 'variables': { - 'shaka_code': 1, - }, - 'targets': [ - { - 'target_name': 'version', - 'type': '<(component)', - 'defines': [ - 'PACKAGER_VERSION="